diff --git a/Cargo.lock b/Cargo.lock index 6e6a6ee419..e0cff6b7c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,10 +79,17 @@ name = "actors-umbrella" version = "0.1.0" dependencies = [ "fendermint_actor_activity_tracker", + "fendermint_actor_adm", + "fendermint_actor_blob_reader", + "fendermint_actor_blobs", + "fendermint_actor_bucket", "fendermint_actor_chainmetadata", "fendermint_actor_eam", "fendermint_actor_f3_light_client", "fendermint_actor_gas_market_eip1559", + "fendermint_actor_machine", + "fendermint_actor_recall_config", + "fendermint_actor_timehub", ] [[package]] @@ -121,6 +128,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ + "bytes", "crypto-common", "generic-array 0.14.9", ] @@ -189,6 +197,141 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "alloy-json-abi" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4584e3641181ff073e9d5bec5b3b8f78f9749d9fb108a1cfbc4399a4a139c72a" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "777d58b30eb9a4db0e5f59bc30e8c2caef877fee7dc8734cf242a51a60f22e05" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 2.0.1", + "foldhash", + "hashbrown 0.15.5", + "indexmap 2.11.4", + "itoa", + "k256 0.13.4", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash 2.1.1", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" +dependencies = [ + "arrayvec 0.7.6", + "bytes", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e68b32b6fa0d09bb74b4cefe35ccc8269d711c26629bc7cd98a47eeb12fe353f" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2afe6879ac373e58fd53581636f2cce843998ae0b058ebe1e4f649195e2bd23c" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap 2.11.4", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.106", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ba01aee235a8c699d07e5be97ba215607564e71be72f433665329bec307d28" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "macro-string", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.106", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c13fc168b97411e04465f03e632f31ef94cad1c7c8951bf799237fd7870d535" +dependencies = [ + "serde", + "winnow 0.7.13", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e960c4b52508ef2ae1e37cae5058e905e9ae099b107900067a503f8c454036f" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "ambassador" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e87ccf220415ad6a81b21e21780134c746463fdb821cc2530a001df2c3d13a36" +dependencies = [ + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "ambassador" version = "0.4.2" @@ -317,6 +460,195 @@ dependencies = [ "password-hash 0.5.0", ] +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec 0.7.6", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.106", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-std 0.5.0", + "arrayvec 0.7.6", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -428,6 +760,19 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-compat" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ba85bc55464dcbf728b56d97e119d673f4cf9062be330a9a26f3acf504a590" +dependencies = [ + "futures-core", + "futures-io", + "once_cell", + "pin-project-lite", + "tokio", +] + [[package]] name = "async-executor" version = "1.13.3" @@ -622,7 +967,7 @@ dependencies = [ "async-trait", "futures-io", "futures-util", - "hickory-resolver", + "hickory-resolver 0.24.4", "pin-utils", "socket2 0.5.10", ] @@ -678,7 +1023,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version", + "rustc_version 0.4.1", ] [[package]] @@ -707,6 +1052,15 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "atomic-polyfill" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" +dependencies = [ + "critical-section", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -724,6 +1078,18 @@ dependencies = [ "url", ] +[[package]] +name = "attohttpc" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" +dependencies = [ + "base64 0.22.1", + "http 1.3.1", + "log", + "url", +] + [[package]] name = "atty" version = "0.2.14" @@ -804,6 +1170,17 @@ dependencies = [ "tower-service", ] +[[package]] +name = "backon" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef" +dependencies = [ + "fastrand 2.3.0", + "gloo-timers 0.3.0", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.71" @@ -819,6 +1196,23 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "bao-tree" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff16d65e48353db458be63ee395c03028f24564fd48668389bd65fd945f5ac36" +dependencies = [ + "blake3", + "bytes", + "futures-lite 2.6.1", + "genawaiter", + "iroh-io", + "positioned-io", + "range-collections", + "self_cell", + "smallvec", +] + [[package]] name = "base-x" version = "0.2.11" @@ -847,6 +1241,12 @@ dependencies = [ "match-lookup", ] +[[package]] +name = "base32" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "022dfe9eb35f19ebbcb51e0b40a5ab759f46ad60cadf7297e0bd085afb50e076" + [[package]] name = "base64" version = "0.13.1" @@ -930,6 +1330,12 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "binary-merge" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597bb81c80a54b6a4381b23faba8d7774b144c94cbd1d6fe3f1329bd776554ab" + [[package]] name = "bincode" version = "1.3.3" @@ -987,6 +1393,15 @@ dependencies = [ "bit-vec 0.6.3", ] +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec 0.8.0", +] + [[package]] name = "bit-vec" version = "0.4.4" @@ -999,6 +1414,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bitflags" version = "1.3.2" @@ -1256,6 +1677,12 @@ dependencies = [ "serde_with 3.15.0", ] +[[package]] +name = "bounded-integer" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "102dbef1187b1893e6dfe05a774e79fd52265f49f214f6879c8ff49f52c8188b" + [[package]] name = "bs58" version = "0.5.1" @@ -1372,7 +1799,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.27", "serde", "serde_json", "thiserror 1.0.69", @@ -1386,7 +1813,7 @@ checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.27", "serde", "serde_json", "thiserror 2.0.17", @@ -1431,6 +1858,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -1734,6 +2167,16 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1848,6 +2291,16 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "cordyceps" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" +dependencies = [ + "loom", + "tracing", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1858,6 +2311,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -2008,6 +2471,21 @@ dependencies = [ "target-lexicon", ] +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + [[package]] name = "crc32fast" version = "1.5.0" @@ -2017,6 +2495,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crossbeam" version = "0.8.4" @@ -2124,6 +2608,38 @@ dependencies = [ "subtle", ] +[[package]] +name = "crypto_box" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16182b4f39a82ec8a6851155cc4c0cda3065bb1db33651726a29e1951de0f009" +dependencies = [ + "aead", + "chacha20", + "crypto_secretbox", + "curve25519-dalek", + "salsa20", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto_secretbox" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d6cf87adf719ddf43a805e92c6870a531aedda35ff640442cbaf8674e141e1" +dependencies = [ + "aead", + "chacha20", + "cipher", + "generic-array 0.14.9", + "poly1305", + "salsa20", + "subtle", + "zeroize", +] + [[package]] name = "cs_serde_bytes" version = "0.12.2" @@ -2172,7 +2688,9 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version", + "rand_core 0.6.4", + "rustc_version 0.4.1", + "serde", "subtle", "zeroize", ] @@ -2292,6 +2810,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", + "der_derive", + "pem-rfc7468", "zeroize", ] @@ -2309,6 +2829,17 @@ dependencies = [ "rusticata-macros", ] +[[package]] +name = "der_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "deranged" version = "0.5.4" @@ -2319,6 +2850,17 @@ dependencies = [ "serde_core", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_arbitrary" version = "1.4.2" @@ -2357,6 +2899,7 @@ dependencies = [ "proc-macro2", "quote", "syn 2.0.106", + "unicode-xid", ] [[package]] @@ -2372,6 +2915,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "diatomic-waker" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" + [[package]] name = "diff" version = "0.1.13" @@ -2463,12 +3012,32 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "dlopen2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b4f5f101177ff01b8ec4ecc81eead416a8aa42819a2869311b3420fa114ffa" +dependencies = [ + "libc", + "once_cell", + "winapi", +] + [[package]] name = "dlv-list" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +[[package]] +name = "document-features" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +dependencies = [ + "litrs", +] + [[package]] name = "dtoa" version = "1.0.10" @@ -2548,6 +3117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8 0.10.2", + "serde", "signature 2.2.0", ] @@ -2572,12 +3142,25 @@ checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", + "rand_core 0.6.4", "serde", "sha2 0.10.9", "subtle", "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "either" version = "1.15.0" @@ -2683,6 +3266,46 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "enum-ordinalize" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "enumflags2" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1027f7680c853e056ebcec683615fb6fbbc07dbaa13b4d5d9442b146ded4ecef" +dependencies = [ + "enumflags2_derive", +] + +[[package]] +name = "enumflags2_derive" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c78a4d8fdf9953a5c9d458f9efe940fd97a0cab0941c075a813ac594733827" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "env_home" version = "0.1.0" @@ -2933,7 +3556,7 @@ dependencies = [ "chrono", "ethers-core", "reqwest 0.11.27", - "semver", + "semver 1.0.27", "serde", "serde_json", "thiserror 1.0.69", @@ -3043,7 +3666,7 @@ dependencies = [ "path-slash", "rayon", "regex", - "semver", + "semver 1.0.27", "serde", "serde_json", "solang-parser", @@ -3161,44 +3784,190 @@ dependencies = [ ] [[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "fdlimit" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec 0.7.6", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec 0.7.6", + "auto_impl", + "bytes", +] + +[[package]] +name = "fdlimit" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" +dependencies = [ + "libc", + "thiserror 1.0.69", +] + +[[package]] +name = "fendermint_abci" +version = "0.1.0" +dependencies = [ + "async-stm", + "async-trait", + "futures", + "im", + "structopt", + "tendermint 0.31.1", + "tokio", + "tower 0.4.13", + "tower-abci", + "tracing", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "fendermint_actor_activity_tracker" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fil_actors_evm_shared", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "log", + "multihash 0.18.1", + "num-derive 0.4.2", + "num-traits", + "serde", + "serde_tuple 0.5.0", +] + +[[package]] +name = "fendermint_actor_adm" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fendermint_actor_machine", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "integer-encoding 3.0.4", + "log", + "multihash 0.18.1", + "num-derive 0.4.2", + "num-traits", + "recall_actor_sdk", + "recall_sol_facade", + "serde", +] + +[[package]] +name = "fendermint_actor_blob_reader" +version = "0.1.0" +dependencies = [ + "anyhow", + "fendermint_actor_blobs_shared", + "fendermint_actor_blobs_testing", + "fil_actors_evm_shared", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "log", + "num-derive 0.4.2", + "num-traits", + "recall_actor_sdk", + "recall_ipld", + "recall_sol_facade", + "serde", +] + +[[package]] +name = "fendermint_actor_blobs" +version = "0.1.0" +dependencies = [ + "anyhow", + "bls-signatures 0.13.1", + "cid 0.11.1", + "fendermint_actor_blobs_shared", + "fendermint_actor_blobs_testing", + "fendermint_actor_recall_config_shared", + "fil_actors_evm_shared", + "fil_actors_runtime", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "log", + "num-traits", + "rand 0.8.5", + "recall_actor_sdk", + "recall_ipld", + "recall_sol_facade", + "serde", +] + +[[package]] +name = "fendermint_actor_blobs_shared" +version = "0.1.0" dependencies = [ - "libc", - "thiserror 1.0.69", + "anyhow", + "blake3", + "data-encoding", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "num-derive 0.4.2", + "num-traits", + "recall_ipld", + "serde", ] [[package]] -name = "fendermint_abci" +name = "fendermint_actor_blobs_testing" version = "0.1.0" dependencies = [ - "async-stm", - "async-trait", - "futures", - "im", - "structopt", - "tendermint 0.31.1", - "tokio", - "tower 0.4.13", - "tower-abci", - "tracing", + "fendermint_actor_blobs_shared", + "fvm_shared", + "iroh-blobs", + "rand 0.8.5", "tracing-subscriber 0.3.20", ] [[package]] -name = "fendermint_actor_activity_tracker" +name = "fendermint_actor_bucket" version = "0.1.0" dependencies = [ "anyhow", + "blake3", "cid 0.11.1", + "fendermint_actor_blobs_shared", + "fendermint_actor_blobs_testing", + "fendermint_actor_machine", "fil_actors_evm_shared", "fil_actors_runtime", "frc42_dispatch 8.0.0", @@ -3206,12 +3975,14 @@ dependencies = [ "fvm_ipld_encoding 0.5.3", "fvm_shared", "hex-literal 0.4.1", - "log", - "multihash 0.18.1", "num-derive 0.4.2", "num-traits", + "quickcheck", + "quickcheck_macros", + "recall_actor_sdk", + "recall_ipld", + "recall_sol_facade", "serde", - "serde_tuple 0.5.0", ] [[package]] @@ -3296,6 +4067,79 @@ dependencies = [ "serde", ] +[[package]] +name = "fendermint_actor_machine" +version = "0.1.0" +dependencies = [ + "anyhow", + "fil_actor_adm", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "recall_actor_sdk", + "recall_sol_facade", + "serde", +] + +[[package]] +name = "fendermint_actor_recall_config" +version = "0.1.0" +dependencies = [ + "anyhow", + "fendermint_actor_blobs_shared", + "fendermint_actor_recall_config_shared", + "fil_actors_evm_shared", + "fil_actors_runtime", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "num-traits", + "recall_actor_sdk", + "recall_sol_facade", + "serde", +] + +[[package]] +name = "fendermint_actor_recall_config_shared" +version = "0.1.0" +dependencies = [ + "fendermint_actor_blobs_shared", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "num-derive 0.4.2", + "num-traits", + "serde", +] + +[[package]] +name = "fendermint_actor_timehub" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fendermint_actor_blobs_shared", + "fendermint_actor_machine", + "fil_actors_evm_shared", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_amt", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "multihash-codetable", + "num-derive 0.4.2", + "num-traits", + "recall_actor_sdk", + "recall_sol_facade", + "serde", + "tracing", +] + [[package]] name = "fendermint_app" version = "0.1.0" @@ -3312,6 +4156,8 @@ dependencies = [ "contracts-artifacts", "ethers", "fendermint_abci", + "fendermint_actor_blobs_shared", + "fendermint_actor_bucket", "fendermint_actor_f3_light_client", "fendermint_actor_gas_market_eip1559", "fendermint_app_options", @@ -3329,11 +4175,13 @@ dependencies = [ "fendermint_vm_event", "fendermint_vm_genesis", "fendermint_vm_interpreter", + "fendermint_vm_iroh_resolver", "fendermint_vm_message", "fendermint_vm_resolver", "fendermint_vm_snapshot", "fendermint_vm_topdown", "fs-err", + "futures-util", "fvm", "fvm_ipld_blockstore 0.3.1", "fvm_ipld_car 0.9.0", @@ -3345,12 +4193,16 @@ dependencies = [ "ipc-provider", "ipc_actors_abis", "ipc_ipld_resolver", + "iroh", + "iroh-blobs", + "iroh_manager", "k256 0.11.6", "lazy_static", "libipld", "libp2p", "libp2p-bitswap", "literally", + "mime_guess", "multiaddr", "num-traits", "openssl", @@ -3361,6 +4213,8 @@ dependencies = [ "quickcheck", "quickcheck_macros", "rand_chacha 0.3.1", + "recall_entangler", + "recall_entangler_storage", "serde", "serde_json", "serde_with 2.3.3", @@ -3369,6 +4223,7 @@ dependencies = [ "tendermint-config 0.33.2", "tendermint-proto 0.31.1", "tendermint-rpc", + "thiserror 1.0.69", "tokio", "tokio-util 0.7.16", "toml 0.8.23", @@ -3378,6 +4233,9 @@ dependencies = [ "tracing-appender", "tracing-subscriber 0.3.20", "url", + "urlencoding", + "uuid 1.18.1", + "warp", ] [[package]] @@ -3645,6 +4503,8 @@ dependencies = [ "cid 0.11.1", "clap 4.5.49", "ethers", + "fendermint_actor_blobs_shared", + "fendermint_actor_bucket", "fendermint_crypto", "fendermint_vm_actor_interface", "fendermint_vm_genesis", @@ -3831,10 +4691,16 @@ dependencies = [ "cid 0.11.1", "ethers", "fendermint_actor_activity_tracker", + "fendermint_actor_adm", + "fendermint_actor_blob_reader", + "fendermint_actor_blobs", + "fendermint_actor_blobs_shared", "fendermint_actor_chainmetadata", "fendermint_actor_eam", "fendermint_actor_f3_light_client", "fendermint_actor_gas_market_eip1559", + "fendermint_actor_recall_config", + "fendermint_actor_recall_config_shared", "fendermint_crypto", "fendermint_eth_deployer", "fendermint_eth_hardhat", @@ -3847,9 +4713,11 @@ dependencies = [ "fendermint_vm_event", "fendermint_vm_genesis", "fendermint_vm_interpreter", + "fendermint_vm_iroh_resolver", "fendermint_vm_message", "fendermint_vm_resolver", "fendermint_vm_topdown", + "fil_actor_adm", "fil_actor_eam", "fil_actor_evm", "futures-core", @@ -3863,6 +4731,8 @@ dependencies = [ "ipc-api", "ipc-observability", "ipc_actors_abis", + "iroh", + "iroh-blobs", "libipld", "merkle-tree-rs", "multihash 0.18.1", @@ -3873,6 +4743,8 @@ dependencies = [ "quickcheck", "quickcheck_macros", "rand 0.8.5", + "recall_executor", + "recall_kernel", "serde", "serde_json", "serde_with 2.3.3", @@ -3888,6 +4760,28 @@ dependencies = [ "tracing", ] +[[package]] +name = "fendermint_vm_iroh_resolver" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-stm", + "fendermint_vm_topdown", + "hex", + "im", + "ipc-api", + "ipc-observability", + "ipc_ipld_resolver", + "iroh", + "iroh-blobs", + "libp2p", + "prometheus", + "rand 0.8.5", + "serde", + "tokio", + "tracing", +] + [[package]] name = "fendermint_vm_message" version = "0.1.0" @@ -3898,6 +4792,7 @@ dependencies = [ "cid 0.11.1", "ethers", "ethers-core", + "fendermint_actor_blobs_shared", "fendermint_crypto", "fendermint_testing", "fendermint_vm_actor_interface", @@ -3907,6 +4802,8 @@ dependencies = [ "fvm_shared", "hex", "ipc-api", + "iroh-base", + "iroh-blobs", "lazy_static", "multihash-codetable", "num-traits", @@ -4001,6 +4898,7 @@ dependencies = [ "ipc-provider", "ipc_actors_abis", "ipc_ipld_resolver", + "iroh-blobs", "libp2p", "num-traits", "prometheus", @@ -4042,6 +4940,13 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "fil_actor_adm" +version = "0.1.0" +dependencies = [ + "serde", +] + [[package]] name = "fil_actor_bundler" version = "6.1.0" @@ -4295,6 +5200,18 @@ dependencies = [ "spin 0.9.8", ] +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "nanorand", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -4476,6 +5393,19 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-buffered" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" +dependencies = [ + "cordyceps", + "diatomic-waker", + "futures-core", + "pin-project-lite", + "spin 0.10.0", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -4626,7 +5556,7 @@ name = "fvm" version = "4.7.4" source = "git+https://github.com/consensus-shipyard/ref-fvm.git?branch=master#8ab9b7e78a5b4d95dfe18985a2afdd0616da5654" dependencies = [ - "ambassador", + "ambassador 0.4.2", "anyhow", "arbitrary", "cid 0.11.1", @@ -4876,6 +5806,51 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "genawaiter" +version = "0.99.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c86bd0361bcbde39b13475e6e36cb24c329964aa2611be285289d1e4b751c1a0" +dependencies = [ + "futures-core", + "genawaiter-macro", + "genawaiter-proc-macro", + "proc-macro-hack", +] + +[[package]] +name = "genawaiter-macro" +version = "0.99.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b32dfe1fdfc0bbde1f22a5da25355514b5e450c33a6af6770884c8750aedfbc" + +[[package]] +name = "genawaiter-proc-macro" +version = "0.99.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784f84eebc366e15251c4a8c3acee82a6a6f427949776ecb88377362a9621738" +dependencies = [ + "proc-macro-error 0.4.12", + "proc-macro-hack", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "generator" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" +dependencies = [ + "cc", + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.61.3", +] + [[package]] name = "generic-array" version = "0.14.9" @@ -5058,6 +6033,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -5072,6 +6056,9 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash 0.8.12", +] [[package]] name = "hashbrown" @@ -5100,6 +6087,15 @@ dependencies = [ "fxhash", ] +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "hdrhistogram" version = "7.5.4" @@ -5134,6 +6130,20 @@ dependencies = [ "http 0.2.12", ] +[[package]] +name = "heapless" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" +dependencies = [ + "atomic-polyfill", + "hash32", + "rustc_version 0.4.1", + "serde", + "spin 0.9.8", + "stable_deref_trait", +] + [[package]] name = "heck" version = "0.3.3" @@ -5214,31 +6224,77 @@ dependencies = [ "ipnet", "once_cell", "rand 0.8.5", - "socket2 0.5.10", + "socket2 0.5.10", + "thiserror 1.0.69", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-proto" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8a6fe56c0038198998a6f217ca4e7ef3a5e51f46163bd6dd60b5c71ca6c6502" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.2", + "ring 0.17.14", + "thiserror 2.0.17", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.24.4", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot", + "rand 0.8.5", + "resolv-conf", + "smallvec", "thiserror 1.0.69", - "tinyvec", "tokio", "tracing", - "url", ] [[package]] name = "hickory-resolver" -version = "0.24.4" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" +checksum = "dc62a9a99b0bfb44d2ab95a7208ac952d31060efc16241c87eaf36406fecf87a" dependencies = [ "cfg-if", "futures-util", - "hickory-proto", + "hickory-proto 0.25.2", "ipconfig", - "lru-cache", + "moka", "once_cell", "parking_lot", - "rand 0.8.5", + "rand 0.9.2", "resolv-conf", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -5282,6 +6338,22 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "hmac-sha1" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b05da5b9e5d4720bfb691eebb2b9d42da3570745da71eac8a1f5bb7e59aab88" +dependencies = [ + "hmac 0.12.1", + "sha1", +] + +[[package]] +name = "hmac-sha256" +version = "1.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad6880c8d4a9ebf39c6e8b77007ce223f646a4d21ce29d99f70cb16420545425" + [[package]] name = "home" version = "0.5.11" @@ -5291,6 +6363,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "hostname-validator" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f558a64ac9af88b5ba400d99b579451af0d39c6d360980045b91aac966d705e2" + [[package]] name = "http" version = "0.2.12" @@ -5409,6 +6487,7 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "httparse", + "httpdate", "itoa", "pin-project-lite", "pin-utils", @@ -5707,21 +6786,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" dependencies = [ "async-io 2.6.0", - "core-foundation", + "core-foundation 0.9.4", "fnv", "futures", "if-addrs", "ipnet", "log", "netlink-packet-core", - "netlink-packet-route", + "netlink-packet-route 0.17.1", "netlink-proto", "netlink-sys", "rtnetlink", "smol", "system-configuration 0.6.1", "tokio", - "windows", + "windows 0.53.0", ] [[package]] @@ -5731,7 +6810,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" dependencies = [ "async-trait", - "attohttpc", + "attohttpc 0.24.1", "bytes", "futures", "http 0.2.12", @@ -5743,6 +6822,27 @@ dependencies = [ "xmltree", ] +[[package]] +name = "igd-next" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "516893339c97f6011282d5825ac94fc1c7aad5cad26bdc2d0cee068c0bf97f97" +dependencies = [ + "async-trait", + "attohttpc 0.30.1", + "bytes", + "futures", + "http 1.3.1", + "http-body-util", + "hyper 1.7.0", + "hyper-util", + "log", + "rand 0.9.2", + "tokio", + "url", + "xmltree", +] + [[package]] name = "ignore" version = "0.4.23" @@ -5875,6 +6975,15 @@ dependencies = [ "generic-array 0.14.9", ] +[[package]] +name = "inplace-vec-builder" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf64c2edc8226891a71f127587a2861b132d2b942310843814d5001d99a1d307" +dependencies = [ + "smallvec", +] + [[package]] name = "instant" version = "0.1.13" @@ -5882,6 +6991,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -6024,6 +7136,41 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ipc-decentralized-storage" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "blake2b_simd", + "bls-signatures 0.13.1", + "clap 4.5.49", + "fendermint_actor_blobs_shared", + "fendermint_crypto", + "fendermint_rpc", + "fendermint_vm_actor_interface", + "fendermint_vm_message", + "futures", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex", + "iroh", + "iroh-base", + "iroh-blobs", + "iroh_manager", + "rand 0.8.5", + "reqwest 0.11.27", + "serde", + "serde_json", + "tempfile", + "tendermint-rpc", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-subscriber 0.3.20", + "warp", +] + [[package]] name = "ipc-observability" version = "0.1.0" @@ -6147,109 +7294,403 @@ dependencies = [ "quickcheck_macros", "rand 0.8.5", "serde", - "serde_ipld_dagcbor 0.4.2", - "serde_json", + "serde_ipld_dagcbor 0.4.2", + "serde_json", + "tempfile", + "thiserror 1.0.69", + "tokio", + "xsalsa20poly1305", + "zeroize", +] + +[[package]] +name = "ipc_actors_abis" +version = "0.1.0" +dependencies = [ + "anyhow", + "build-rs-utils", + "color-eyre 0.6.5", + "const-hex", + "ethers", + "fs-err", + "fvm_shared", + "lazy_static", + "prettyplease", + "syn 2.0.106", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "ipc_ipld_resolver" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "base64 0.21.7", + "blake2b_simd", + "bloom", + "bytes", + "cid 0.11.1", + "env_logger 0.10.2", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_ipld_hamt", + "fvm_shared", + "gcra", + "ipc-api", + "ipc-observability", + "ipc_ipld_resolver", + "iroh", + "iroh-blobs", + "iroh_manager", + "lazy_static", + "libipld", + "libp2p", + "libp2p-bitswap", + "libp2p-mplex", + "libsecp256k1", + "log", + "lru_time_cache", + "multihash 0.18.1", + "multihash-codetable", + "prometheus", + "quickcheck", + "quickcheck_macros", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.10", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + +[[package]] +name = "ipld-core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "104718b1cc124d92a6d01ca9c9258a7df311405debb3408c445a36452f9bf8db" +dependencies = [ + "cid 0.11.1", + "serde", + "serde_bytes", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "iroh" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ca758f4ce39ae3f07de922be6c73de6a48a07f39554e78b5745585652ce38f5" +dependencies = [ + "aead", + "anyhow", + "atomic-waker", + "backon", + "bytes", + "cfg_aliases", + "concurrent-queue", + "crypto_box", + "data-encoding", + "der 0.7.10", + "derive_more 1.0.0", + "ed25519-dalek", + "futures-buffered", + "futures-util", + "getrandom 0.3.4", + "hickory-resolver 0.25.2", + "http 1.3.1", + "igd-next 0.16.2", + "instant", + "iroh-base", + "iroh-metrics", + "iroh-quinn", + "iroh-quinn-proto", + "iroh-quinn-udp", + "iroh-relay", + "n0-future", + "netdev", + "netwatch", + "pin-project", + "pkarr", + "portmapper", + "rand 0.8.5", + "rcgen 0.13.2", + "reqwest 0.12.24", + "ring 0.17.14", + "rustls 0.23.32", + "rustls-webpki 0.102.8", + "serde", + "smallvec", + "spki 0.7.3", + "strum", + "stun-rs", + "surge-ping", + "thiserror 2.0.17", + "time", + "tokio", + "tokio-stream", + "tokio-util 0.7.16", + "tracing", + "url", + "wasm-bindgen-futures", + "webpki-roots 0.26.11", + "x509-parser", + "z32", +] + +[[package]] +name = "iroh-base" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91ac4aaab68153d726c4e6b39c30f9f9253743f0e25664e52f4caeb46f48d11" +dependencies = [ + "curve25519-dalek", + "data-encoding", + "derive_more 1.0.0", + "ed25519-dalek", + "postcard", + "rand_core 0.6.4", + "serde", + "thiserror 2.0.17", + "url", +] + +[[package]] +name = "iroh-blobs" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "817b785193b73c34ef1f2dcb5ddf8729ecef9b72a8fc0e706ee6d7a9bf8766a6" +dependencies = [ + "anyhow", + "async-channel 2.5.0", + "bao-tree", + "blake3", + "bytes", + "chrono", + "data-encoding", + "derive_more 1.0.0", + "futures-buffered", + "futures-lite 2.6.1", + "futures-util", + "genawaiter", + "hashlink", + "hex", + "iroh", + "iroh-base", + "iroh-io", + "iroh-metrics", + "nested_enum_utils 0.1.0", + "num_cpus", + "oneshot", + "parking_lot", + "portable-atomic", + "postcard", + "quic-rpc", + "quic-rpc-derive", + "rand 0.8.5", + "range-collections", + "redb", + "reflink-copy", + "self_cell", + "serde", + "serde-error", + "smallvec", + "ssh-key", + "strum", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.17", "tokio", - "xsalsa20poly1305", - "zeroize", + "tokio-util 0.7.16", + "tracing", + "tracing-futures", + "tracing-test", + "walkdir", ] [[package]] -name = "ipc_actors_abis" -version = "0.1.0" +name = "iroh-io" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a5feb781017b983ff1b155cd1faf8174da2acafd807aa482876da2d7e6577a" dependencies = [ - "anyhow", - "build-rs-utils", - "color-eyre 0.6.5", - "const-hex", - "ethers", - "fs-err", - "fvm_shared", - "lazy_static", - "prettyplease", - "syn 2.0.106", - "thiserror 1.0.69", - "tracing", + "bytes", + "futures-lite 2.6.1", + "pin-project", + "smallvec", + "tokio", ] [[package]] -name = "ipc_ipld_resolver" -version = "0.1.0" +name = "iroh-metrics" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f70466f14caff7420a14373676947e25e2917af6a5b1bec45825beb2bf1eb6a7" dependencies = [ - "anyhow", - "async-trait", - "base64 0.21.7", - "blake2b_simd", - "bloom", - "cid 0.11.1", - "env_logger 0.10.2", - "fvm_ipld_blockstore 0.3.1", - "fvm_ipld_encoding 0.5.3", - "fvm_ipld_hamt", - "fvm_shared", - "gcra", - "ipc-api", - "ipc-observability", - "ipc_ipld_resolver", - "lazy_static", - "libipld", - "libp2p", - "libp2p-bitswap", - "libp2p-mplex", - "libsecp256k1", - "log", - "lru_time_cache", - "multihash 0.18.1", - "multihash-codetable", - "prometheus", - "quickcheck", - "quickcheck_macros", - "rand 0.8.5", + "iroh-metrics-derive", + "itoa", "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", + "snafu", + "tracing", ] [[package]] -name = "ipconfig" -version = "0.3.2" +name = "iroh-metrics-derive" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +checksum = "8d12f5c45c4ed2436302a4e03cad9a0ad34b2962ad0c5791e1019c0ee30eeb09" dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "iroh-quinn" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c6245c9ed906506ab9185e8d7f64857129aee4f935e899f398a3bd3b70338d" +dependencies = [ + "bytes", + "cfg_aliases", + "iroh-quinn-proto", + "iroh-quinn-udp", + "pin-project-lite", + "rustc-hash 2.1.1", + "rustls 0.23.32", "socket2 0.5.10", - "widestring", - "windows-sys 0.48.0", - "winreg", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", ] [[package]] -name = "ipld-core" -version = "0.4.2" +name = "iroh-quinn-proto" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "104718b1cc124d92a6d01ca9c9258a7df311405debb3408c445a36452f9bf8db" +checksum = "929d5d8fa77d5c304d3ee7cae9aede31f13908bd049f9de8c7c0094ad6f7c535" dependencies = [ - "cid 0.11.1", - "serde", - "serde_bytes", + "bytes", + "getrandom 0.2.16", + "rand 0.8.5", + "ring 0.17.14", + "rustc-hash 2.1.1", + "rustls 0.23.32", + "rustls-pki-types", + "rustls-platform-verifier", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", ] [[package]] -name = "ipnet" -version = "2.11.0" +name = "iroh-quinn-udp" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +checksum = "c53afaa1049f7c83ea1331f5ebb9e6ebc5fdd69c468b7a22dd598b02c9bcc973" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] [[package]] -name = "iri-string" -version = "0.7.8" +name = "iroh-relay" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +checksum = "c63f122cdfaa4b4e0e7d6d3921d2b878f42a0c6d3ee5a29456dc3f5ab5ec931f" dependencies = [ - "memchr", + "anyhow", + "bytes", + "cfg_aliases", + "data-encoding", + "derive_more 1.0.0", + "getrandom 0.3.4", + "hickory-resolver 0.25.2", + "http 1.3.1", + "http-body-util", + "hyper 1.7.0", + "hyper-util", + "iroh-base", + "iroh-metrics", + "iroh-quinn", + "iroh-quinn-proto", + "lru 0.12.5", + "n0-future", + "num_enum", + "pin-project", + "pkarr", + "postcard", + "rand 0.8.5", + "reqwest 0.12.24", + "rustls 0.23.32", + "rustls-webpki 0.102.8", "serde", + "sha1", + "strum", + "stun-rs", + "thiserror 2.0.17", + "tokio", + "tokio-rustls 0.26.4", + "tokio-util 0.7.16", + "tokio-websockets", + "tracing", + "url", + "webpki-roots 0.26.11", + "ws_stream_wasm", + "z32", +] + +[[package]] +name = "iroh_manager" +version = "0.1.0" +dependencies = [ + "anyhow", + "iroh", + "iroh-blobs", + "iroh-quinn", + "iroh-relay", + "n0-future", + "num-traits", + "quic-rpc", + "tempfile", + "tokio", + "tracing", + "tracing-subscriber 0.3.20", + "url", ] [[package]] @@ -6338,6 +7779,28 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.34" @@ -6443,6 +7906,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "kv-log-macro" version = "1.0.7" @@ -6459,7 +7932,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55cb077ad656299f160924eb2912aa147d7339ea7d69e1b5517326fdcec3c1ca" dependencies = [ "ascii-canvas", - "bit-set", + "bit-set 0.5.3", "ena", "itertools 0.11.0", "lalrpop-util", @@ -6701,7 +8174,7 @@ dependencies = [ "async-std-resolver", "async-trait", "futures", - "hickory-resolver", + "hickory-resolver 0.24.4", "libp2p-core", "libp2p-identity", "parking_lot", @@ -6755,7 +8228,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru", + "lru 0.12.5", "quick-protobuf", "quick-protobuf-codec 0.3.1", "smallvec", @@ -6826,7 +8299,7 @@ dependencies = [ "async-std", "data-encoding", "futures", - "hickory-proto", + "hickory-proto 0.24.4", "if-watch", "libp2p-core", "libp2p-identity", @@ -6997,7 +8470,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru", + "lru 0.12.5", "multistream-select", "once_cell", "rand 0.8.5", @@ -7047,7 +8520,7 @@ dependencies = [ "futures-rustls", "libp2p-core", "libp2p-identity", - "rcgen", + "rcgen 0.11.3", "ring 0.17.14", "rustls 0.23.32", "rustls-webpki 0.101.7", @@ -7064,7 +8537,7 @@ checksum = "cccf04b0e3ff3de52d07d5fd6c3b061d0e7f908ffc683c32d9638caedce86fc8" dependencies = [ "futures", "futures-timer", - "igd-next", + "igd-next 0.14.3", "libp2p-core", "libp2p-swarm", "tokio", @@ -7209,6 +8682,12 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0d2be3f5a0d4d5c983d1f8ecc2a87676a0875a14feb9eebf0675f7c3e2f3c35" +[[package]] +name = "litrs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" + [[package]] name = "lock_api" version = "0.4.14" @@ -7227,6 +8706,19 @@ dependencies = [ "value-bag", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber 0.3.20", +] + [[package]] name = "lru" version = "0.12.5" @@ -7236,6 +8728,12 @@ dependencies = [ "hashbrown 0.15.5", ] +[[package]] +name = "lru" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" + [[package]] name = "lru-cache" version = "0.1.2" @@ -7276,6 +8774,17 @@ dependencies = [ "libc", ] +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "match-lookup" version = "0.1.1" @@ -7322,6 +8831,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "md5" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" + [[package]] name = "memchr" version = "2.7.6" @@ -7437,6 +8952,24 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "moka" +version = "0.12.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "equivalent", + "parking_lot", + "portable-atomic", + "rustc_version 0.4.1", + "smallvec", + "tagptr", + "uuid 1.18.1", +] + [[package]] name = "multer" version = "2.1.0" @@ -7544,7 +9077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" dependencies = [ "proc-macro-crate 1.1.3", - "proc-macro-error", + "proc-macro-error 1.0.4", "proc-macro2", "quote", "syn 1.0.109", @@ -7595,6 +9128,27 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "n0-future" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bb0e5d99e681ab3c938842b96fcb41bf8a7bb4bfdb11ccbd653a7e83e06c794" +dependencies = [ + "cfg_aliases", + "derive_more 1.0.0", + "futures-buffered", + "futures-lite 2.6.1", + "futures-util", + "js-sys", + "pin-project", + "send_wrapper 0.6.0", + "tokio", + "tokio-util 0.7.16", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-time", +] + [[package]] name = "nalgebra" version = "0.33.2" @@ -7612,6 +9166,15 @@ dependencies = [ "typenum", ] +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom 0.2.16", +] + [[package]] name = "native-tls" version = "0.2.14" @@ -7624,7 +9187,7 @@ dependencies = [ "openssl-probe", "openssl-sys", "schannel", - "security-framework", + "security-framework 2.11.1", "security-framework-sys", "tempfile", ] @@ -7649,6 +9212,47 @@ dependencies = [ "trait-set", ] +[[package]] +name = "nested_enum_utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f256ef99e7ac37428ef98c89bef9d84b590172de4bbfbe81b68a4cd3abadb32" +dependencies = [ + "proc-macro-crate 3.4.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "nested_enum_utils" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d5475271bdd36a4a2769eac1ef88df0f99428ea43e52dfd8b0ee5cb674695f" +dependencies = [ + "proc-macro-crate 3.4.0", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "netdev" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f901362e84cd407be6f8cd9d3a46bccf09136b095792785401ea7d283c79b91d" +dependencies = [ + "dlopen2", + "ipnet", + "libc", + "netlink-packet-core", + "netlink-packet-route 0.17.1", + "netlink-sys", + "once_cell", + "system-configuration 0.6.1", + "windows-sys 0.52.0", +] + [[package]] name = "netlink-packet-core" version = "0.7.0" @@ -7674,6 +9278,21 @@ dependencies = [ "netlink-packet-utils", ] +[[package]] +name = "netlink-packet-route" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0800eae8638a299eaa67476e1c6b6692922273e0f7939fd188fc861c837b9cd2" +dependencies = [ + "anyhow", + "bitflags 2.9.4", + "byteorder", + "libc", + "log", + "netlink-packet-core", + "netlink-packet-utils", +] + [[package]] name = "netlink-packet-utils" version = "0.5.2" @@ -7714,6 +9333,37 @@ dependencies = [ "tokio", ] +[[package]] +name = "netwatch" +version = "0.5.0" +dependencies = [ + "atomic-waker", + "bytes", + "cfg_aliases", + "derive_more 1.0.0", + "iroh-quinn-udp", + "js-sys", + "libc", + "n0-future", + "nested_enum_utils 0.2.3", + "netdev", + "netlink-packet-core", + "netlink-packet-route 0.23.0", + "netlink-proto", + "netlink-sys", + "serde", + "snafu", + "socket2 0.5.10", + "time", + "tokio", + "tokio-util 0.7.16", + "tracing", + "web-sys", + "windows 0.59.0", + "windows-result 0.3.4", + "wmi", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -7731,6 +9381,12 @@ dependencies = [ "libc", ] +[[package]] +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + [[package]] name = "nohash-hasher" version = "0.2.0" @@ -7747,6 +9403,21 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "ntimestamp" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c50f94c405726d3e0095e89e72f75ce7f6587b94a8bd8dc8054b73f65c0fd68c" +dependencies = [ + "base32", + "document-features", + "getrandom 0.2.16", + "httpdate", + "js-sys", + "once_cell", + "serde", +] + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -7783,6 +9454,22 @@ dependencies = [ "serde", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82c79c15c05d4bf82b6f5ef163104cc81a760d8e874d38ac50ab67c8877b647b" +dependencies = [ + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + [[package]] name = "num-complex" version = "0.4.6" @@ -7930,6 +9617,10 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "once_cell_polyfill" @@ -7937,6 +9628,12 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "oneshot" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ce411919553d3f9fa53a0880544cda985a112117a0444d5ff1e870a893d6ea" + [[package]] name = "opaque-debug" version = "0.3.1" @@ -8050,6 +9747,44 @@ version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "primeorder", + "sha2 0.10.9", +] + +[[package]] +name = "p384" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" +dependencies = [ + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "primeorder", + "sha2 0.10.9", +] + +[[package]] +name = "p521" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" +dependencies = [ + "base16ct 0.2.0", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "primeorder", + "rand_core 0.6.4", + "sha2 0.10.9", +] + [[package]] name = "pairing" version = "0.22.0" @@ -8256,6 +9991,15 @@ dependencies = [ "serde_core", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.2" @@ -8322,7 +10066,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version", + "rustc_version 0.4.1", ] [[package]] @@ -8410,6 +10154,48 @@ dependencies = [ "futures-io", ] +[[package]] +name = "pkarr" +version = "3.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eb1f2f4311bae1da11f930c804c724c9914cf55ae51a9ee0440fc98826984f7" +dependencies = [ + "async-compat", + "base32", + "bytes", + "cfg_aliases", + "document-features", + "dyn-clone", + "ed25519-dalek", + "futures-buffered", + "futures-lite 2.6.1", + "getrandom 0.2.16", + "log", + "lru 0.13.0", + "ntimestamp", + "reqwest 0.12.24", + "self_cell", + "serde", + "sha1_smol", + "simple-dns", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der 0.7.10", + "pkcs8 0.10.2", + "spki 0.7.3", +] + [[package]] name = "pkcs8" version = "0.9.0" @@ -8436,6 +10222,48 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "pnet_base" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cf6fb3ab38b68d01ab2aea03ed3d1132b4868fa4e06285f29f16da01c5f4c" +dependencies = [ + "no-std-net", +] + +[[package]] +name = "pnet_macros" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688b17499eee04a0408aca0aa5cba5fc86401d7216de8a63fdf7a4c227871804" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.106", +] + +[[package]] +name = "pnet_macros_support" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eea925b72f4bd37f8eab0f221bbe4c78b63498350c983ffa9dd4bcde7e030f56" +dependencies = [ + "pnet_base", +] + +[[package]] +name = "pnet_packet" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9a005825396b7fe7a38a8e288dbc342d5034dac80c15212436424fef8ea90ba" +dependencies = [ + "glob", + "pnet_base", + "pnet_macros", + "pnet_macros_support", +] + [[package]] name = "polling" version = "2.8.0" @@ -8489,6 +10317,43 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portmapper" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d6db66007eac4a0ec8331d0d20c734bd64f6445d64bbaf0d0a27fea7a054e36" +dependencies = [ + "base64 0.22.1", + "bytes", + "derive_more 1.0.0", + "futures-lite 2.6.1", + "futures-util", + "hyper-util", + "igd-next 0.16.2", + "iroh-metrics", + "libc", + "nested_enum_utils 0.2.3", + "netwatch", + "num_enum", + "rand 0.8.5", + "serde", + "smallvec", + "snafu", + "socket2 0.5.10", + "time", + "tokio", + "tokio-util 0.7.16", + "tower-layer", + "tracing", + "url", +] + [[package]] name = "positioned-io" version = "0.3.5" @@ -8509,9 +10374,22 @@ dependencies = [ "cobs", "embedded-io 0.4.0", "embedded-io 0.6.1", + "heapless", + "postcard-derive", "serde", ] +[[package]] +name = "postcard-derive" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0232bd009a197ceec9cc881ba46f727fcd8060a2d8d6a9dde7a69030a6fe2bb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "potential_utf" version = "0.1.3" @@ -8536,6 +10414,40 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "precis-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c2e7b31f132e0c6f8682cfb7bf4a5340dbe925b7986618d0826a56dfe0c8e56" +dependencies = [ + "precis-tools", + "ucd-parse", + "unicode-normalization", +] + +[[package]] +name = "precis-profiles" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e2768890a47af73a032af9f0cedbddce3c9d06cf8de201d5b8f2436ded7674" +dependencies = [ + "lazy_static", + "precis-core", + "precis-tools", + "unicode-normalization", +] + +[[package]] +name = "precis-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cc1eb2d5887ac7bfd2c0b745764db89edb84b856e4214e204ef48ef96d10c4a" +dependencies = [ + "lazy_static", + "regex", + "ucd-parse", +] + [[package]] name = "precomputed-hash" version = "0.1.1" @@ -8572,6 +10484,15 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve 0.13.8", +] + [[package]] name = "primitive-types" version = "0.12.2" @@ -8605,16 +10526,42 @@ dependencies = [ "toml_edit 0.23.7", ] +[[package]] +name = "proc-macro-error" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18f33027081eba0a6d8aba6d1b1c3a3be58cbb12106341c2d5759fcd9b5277e7" +dependencies = [ + "proc-macro-error-attr 0.4.12", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + [[package]] name = "proc-macro-error" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ - "proc-macro-error-attr", + "proc-macro-error-attr 1.0.4", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a5b4b77fdb63c1eca72173d68d24501c54ab1269409f6b672c85deb18af69de" +dependencies = [ "proc-macro2", "quote", "syn 1.0.109", + "syn-mid", "version_check", ] @@ -8629,6 +10576,34 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + [[package]] name = "proc-macro2" version = "1.0.101" @@ -8721,6 +10696,8 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce" dependencies = [ + "bit-set 0.8.0", + "bit-vec 0.8.0", "bitflags 2.9.4", "lazy_static", "num-traits", @@ -8728,6 +10705,8 @@ dependencies = [ "rand_chacha 0.9.0", "rand_xorshift 0.4.0", "regex-syntax", + "rusty-fork", + "tempfile", "unarray", ] @@ -8842,6 +10821,52 @@ dependencies = [ "wasmtime-math", ] +[[package]] +name = "quic-rpc" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18bad98bd048264ceb1361ff9d77a031535d8c1e3fe8f12c6966ec825bf68eb7" +dependencies = [ + "anyhow", + "bytes", + "document-features", + "flume 0.11.1", + "futures-lite 2.6.1", + "futures-sink", + "futures-util", + "iroh-quinn", + "pin-project", + "postcard", + "rcgen 0.13.2", + "rustls 0.23.32", + "serde", + "slab", + "smallvec", + "time", + "tokio", + "tokio-serde", + "tokio-util 0.7.16", + "tracing", +] + +[[package]] +name = "quic-rpc-derive" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf13f1bced5f2f2642d9d89a29d75f2d81ab34c4acfcb434c209d6094b9b2b7" +dependencies = [ + "proc-macro2", + "quic-rpc", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quick-protobuf" version = "0.8.1" @@ -8976,6 +11001,16 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "quoted-string-parser" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc75379cdb451d001f1cb667a9f74e8b355e9df84cc5193513cbe62b96fc5e9" +dependencies = [ + "pest", + "pest_derive", +] + [[package]] name = "r-efi" version = "5.3.0" @@ -8997,6 +11032,7 @@ dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", + "serde", ] [[package]] @@ -9084,6 +11120,18 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "range-collections" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "861706ea9c4aded7584c5cd1d241cec2ea7f5f50999f236c22b65409a1f1a0d0" +dependencies = [ + "binary-merge", + "inplace-vec-builder", + "ref-cast", + "smallvec", +] + [[package]] name = "rawpointer" version = "0.2.1" @@ -9122,6 +11170,174 @@ dependencies = [ "yasna", ] +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem 3.0.6", + "ring 0.17.14", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "recall_actor_sdk" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fil_actor_adm", + "fil_actors_runtime", + "fvm_ipld_encoding 0.5.3", + "fvm_sdk", + "fvm_shared", + "num-traits", + "recall_sol_facade", + "serde", +] + +[[package]] +name = "recall_entangler" +version = "0.1.0" +source = "git+https://github.com/recallnet/entanglement.git?rev=aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc#aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc" +dependencies = [ + "anyhow", + "async-trait", + "bytes", + "cid 0.10.1", + "futures", + "iroh", + "iroh-blobs", + "recall_entangler_storage", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tokio-stream", +] + +[[package]] +name = "recall_entangler_storage" +version = "0.1.0" +source = "git+https://github.com/recallnet/entanglement.git?rev=aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc#aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc" +dependencies = [ + "anyhow", + "async-trait", + "bytes", + "cid 0.10.1", + "futures", + "futures-lite 2.6.1", + "iroh", + "iroh-blobs", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "uuid 1.18.1", +] + +[[package]] +name = "recall_executor" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fendermint_actor_blobs_shared", + "fendermint_vm_actor_interface", + "fvm", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "num-traits", + "replace_with", + "tracing", +] + +[[package]] +name = "recall_ipld" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fil_actors_runtime", + "fvm_ipld_amt", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_ipld_hamt", + "fvm_sdk", + "fvm_shared", + "integer-encoding 3.0.4", + "serde", +] + +[[package]] +name = "recall_kernel" +version = "0.1.0" +dependencies = [ + "ambassador 0.3.7", + "anyhow", + "fvm", + "fvm_ipld_blockstore 0.3.1", + "fvm_shared", + "recall_kernel_ops", + "recall_syscalls", +] + +[[package]] +name = "recall_kernel_ops" +version = "0.1.0" +dependencies = [ + "fvm", +] + +[[package]] +name = "recall_sol_facade" +version = "0.1.2" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "alloy-sol-types", + "anyhow", + "dunce", + "eyre", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "serde", + "serde_json", + "syn 2.0.106", + "thiserror 2.0.17", + "walkdir", +] + +[[package]] +name = "recall_syscalls" +version = "0.1.0" +dependencies = [ + "fvm", + "fvm_shared", + "iroh-blobs", + "iroh_manager", + "recall_kernel_ops", + "tokio", + "tracing", +] + +[[package]] +name = "redb" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0a72cd7140de9fc3e318823b883abf819c20d478ec89ce880466dc2ef263c6" +dependencies = [ + "libc", +] + [[package]] name = "redox_syscall" version = "0.5.18" @@ -9162,6 +11378,18 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "reflink-copy" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23bbed272e39c47a095a5242218a67412a220006842558b03fe2935e8f3d7b92" +dependencies = [ + "cfg-if", + "libc", + "rustix 1.1.2", + "windows 0.62.2", +] + [[package]] name = "regalloc2" version = "0.11.2" @@ -9199,6 +11427,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-lite" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da" + [[package]] name = "regex-syntax" version = "0.8.8" @@ -9431,6 +11665,27 @@ dependencies = [ "serde", ] +[[package]] +name = "rsa" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" +dependencies = [ + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "sha2 0.10.9", + "signature 2.2.0", + "spki 0.7.3", + "subtle", + "zeroize", +] + [[package]] name = "rtnetlink" version = "0.13.1" @@ -9441,7 +11696,7 @@ dependencies = [ "futures", "log", "netlink-packet-core", - "netlink-packet-route", + "netlink-packet-route 0.17.1", "netlink-packet-utils", "netlink-proto", "netlink-sys", @@ -9450,6 +11705,40 @@ dependencies = [ "tokio", ] +[[package]] +name = "ruint" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a68df0380e5c9d20ce49534f292a36a7514ae21350726efe1865bdb1fa91d278" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "ark-ff 0.5.0", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rand 0.9.2", + "rlp 0.5.2", + "ruint-macro", + "serde_core", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rust-embed" version = "6.8.1" @@ -9518,13 +11807,22 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver", + "semver 1.0.27", ] [[package]] @@ -9619,6 +11917,7 @@ version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ + "log", "once_cell", "ring 0.17.14", "rustls-pki-types", @@ -9636,7 +11935,7 @@ dependencies = [ "openssl-probe", "rustls 0.19.1", "schannel", - "security-framework", + "security-framework 2.11.1", ] [[package]] @@ -9648,7 +11947,19 @@ dependencies = [ "openssl-probe", "rustls-pemfile", "schannel", - "security-framework", + "security-framework 2.11.1", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework 3.5.1", ] [[package]] @@ -9670,6 +11981,33 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19787cda76408ec5404443dc8b31795c87cd8fec49762dc75fa727740d34acc1" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls 0.23.32", + "rustls-native-certs 0.8.2", + "rustls-platform-verifier-android", + "rustls-webpki 0.103.7", + "security-framework 3.5.1", + "security-framework-sys", + "webpki-root-certs 0.26.11", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -9680,6 +12018,17 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustls-webpki" +version = "0.102.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "ring 0.17.14", + "rustls-pki-types", + "untrusted 0.9.0", +] + [[package]] name = "rustls-webpki" version = "0.103.7" @@ -9697,6 +12046,18 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" +[[package]] +name = "rusty-fork" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "rw-stream-sink" version = "0.4.0" @@ -9892,7 +12253,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.9.4", - "core-foundation", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -9908,6 +12282,21 @@ dependencies = [ "libc", ] +[[package]] +name = "self_cell" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16c2f82143577edb4921b71ede051dac62ca3c16084e918bf7b40c96ae10eb33" + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.27" @@ -9918,6 +12307,15 @@ dependencies = [ "serde_core", ] +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -9949,6 +12347,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde-error" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "342110fb7a5d801060c885da03bf91bfa7c7ca936deafcc64bb6706375605d47" +dependencies = [ + "serde", +] + [[package]] name = "serde_bytes" version = "0.11.19" @@ -10169,6 +12576,16 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct 0.2.0", + "serde", +] + [[package]] name = "serial_test" version = "3.2.0" @@ -10205,6 +12622,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + [[package]] name = "sha2" version = "0.9.9" @@ -10263,6 +12686,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -10326,6 +12759,21 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "simple-dns" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dee851d0e5e7af3721faea1843e8015e820a234f81fda3dea9247e15bac9a86a" +dependencies = [ + "bitflags 2.9.4", +] + [[package]] name = "simple_asn1" version = "0.6.3" @@ -10386,6 +12834,27 @@ dependencies = [ "futures-lite 2.6.1", ] +[[package]] +name = "snafu" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e84b3f4eacbf3a1ce05eac6763b4d629d60cbc94d632e4092c54ade71f1e1a2" +dependencies = [ + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1c97747dbf44bb1ca44a561ece23508e99cb592e862f22222dcf42f51d1e451" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "snap" version = "1.1.1" @@ -10404,7 +12873,7 @@ dependencies = [ "curve25519-dalek", "rand_core 0.6.4", "ring 0.17.14", - "rustc_version", + "rustc_version 0.4.1", "sha2 0.10.9", "subtle", ] @@ -10468,6 +12937,12 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + [[package]] name = "spki" version = "0.6.0" @@ -10494,6 +12969,48 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" +[[package]] +name = "ssh-cipher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caac132742f0d33c3af65bfcde7f6aa8f62f0e991d80db99149eb9d44708784f" +dependencies = [ + "cipher", + "ssh-encoding", +] + +[[package]] +name = "ssh-encoding" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9242b9ef4108a78e8cd1a2c98e193ef372437f8c22be363075233321dd4a15" +dependencies = [ + "base64ct", + "pem-rfc7468", + "sha2 0.10.9", +] + +[[package]] +name = "ssh-key" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b86f5297f0f04d08cabaa0f6bff7cb6aec4d9c3b49d87990d63da9d9156a8c3" +dependencies = [ + "ed25519-dalek", + "p256", + "p384", + "p521", + "rand_core 0.6.4", + "rsa", + "sec1 0.7.3", + "sha2 0.10.9", + "signature 2.2.0", + "ssh-cipher", + "ssh-encoding", + "subtle", + "zeroize", +] + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -10546,7 +13063,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "semver", + "semver 1.0.27", "serde", "serde_json", "sha2 0.10.9", @@ -10694,7 +13211,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", - "proc-macro-error", + "proc-macro-error 1.0.4", "proc-macro2", "quote", "syn 1.0.109", @@ -10722,6 +13239,30 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "stun-rs" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb921f10397d5669e1af6455e9e2d367bf1f9cebcd6b1dd1dc50e19f6a9ac2ac" +dependencies = [ + "base64 0.22.1", + "bounded-integer", + "byteorder", + "crc", + "enumflags2", + "fallible-iterator", + "hmac-sha1", + "hmac-sha256", + "hostname-validator", + "lazy_static", + "md5", + "paste", + "precis-core", + "precis-profiles", + "quoted-string-parser", + "rand 0.9.2", +] + [[package]] name = "substrate-bn" version = "0.6.0" @@ -10756,6 +13297,22 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" +[[package]] +name = "surge-ping" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27ea7b4bfbd3d9980392cd9f90e4158212a5f775fa58e9b85216a0bf739067d" +dependencies = [ + "hex", + "parking_lot", + "pnet_packet", + "rand 0.9.2", + "socket2 0.6.1", + "thiserror 1.0.69", + "tokio", + "tracing", +] + [[package]] name = "svm-rs" version = "0.3.5" @@ -10767,7 +13324,7 @@ dependencies = [ "hex", "once_cell", "reqwest 0.11.27", - "semver", + "semver 1.0.27", "serde", "serde_json", "sha2 0.10.9", @@ -10798,6 +13355,29 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn-mid" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea305d57546cc8cd04feb14b62ec84bf17f50e3f7b12560d7bfa9265f39d9ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "syn-solidity" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab4e6eed052a117409a1a744c8bda9c3ea6934597cf7419f791cb7d590871c4c" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -10843,7 +13423,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", - "core-foundation", + "core-foundation 0.9.4", "system-configuration-sys 0.5.0", ] @@ -10854,7 +13434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags 2.9.4", - "core-foundation", + "core-foundation 0.9.4", "system-configuration-sys 0.6.0", ] @@ -10878,6 +13458,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -11062,7 +13648,7 @@ dependencies = [ "hyper-rustls 0.22.1", "peg", "pin-project", - "semver", + "semver 1.0.27", "serde", "serde_bytes", "serde_json", @@ -11180,6 +13766,7 @@ checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", + "js-sys", "num-conv", "powerfmt", "serde", @@ -11326,8 +13913,20 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.32", - "tokio", + "rustls 0.23.32", + "tokio", +] + +[[package]] +name = "tokio-serde" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caf600e7036b17782571dd44fa0a5cea3c82f60db5137f774a325a76a0d6852b" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project", ] [[package]] @@ -11339,6 +13938,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util 0.7.16", ] [[package]] @@ -11406,8 +14006,32 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", + "futures-util", "pin-project-lite", + "slab", + "tokio", +] + +[[package]] +name = "tokio-websockets" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fcaf159b4e7a376b05b5bfd77bfd38f3324f5fce751b4213bfc7eaa47affb4e" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-sink", + "getrandom 0.3.4", + "http 1.3.1", + "httparse", + "rand 0.9.2", + "ring 0.17.14", + "rustls-pki-types", + "simdutf8", "tokio", + "tokio-rustls 0.26.4", + "tokio-util 0.7.16", ] [[package]] @@ -11767,6 +14391,27 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-test" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" +dependencies = [ + "tracing-core", + "tracing-subscriber 0.3.20", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" +dependencies = [ + "quote", + "syn 2.0.106", +] + [[package]] name = "trait-set" version = "0.3.0" @@ -11866,6 +14511,15 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" +[[package]] +name = "ucd-parse" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06ff81122fcbf4df4c1660b15f7e3336058e7aec14437c9f85c6b31a0f279b9" +dependencies = [ + "regex-lite", +] + [[package]] name = "ucd-trie" version = "0.1.7" @@ -11914,6 +14568,15 @@ version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" +[[package]] +name = "unicode-normalization" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-segmentation" version = "1.12.0" @@ -12090,6 +14753,15 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "waker-fn" version = "1.2.0" @@ -12281,7 +14953,7 @@ checksum = "9dbe55c8f9d0dbd25d9447a5a889ff90c0cc3feaa7395310d3d826b2c703eaab" dependencies = [ "bitflags 2.9.4", "indexmap 2.11.4", - "semver", + "semver 1.0.27", ] [[package]] @@ -12293,7 +14965,7 @@ dependencies = [ "bitflags 2.9.4", "hashbrown 0.15.5", "indexmap 2.11.4", - "semver", + "semver 1.0.27", "serde", ] @@ -12512,6 +15184,24 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "webpki-root-certs" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" +dependencies = [ + "webpki-root-certs 1.0.4", +] + +[[package]] +name = "webpki-root-certs" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee3e3b5f5e80bc89f30ce8d0343bf4e5f12341c51f3e26cbeecbc7c85443e85b" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "webpki-roots" version = "0.21.1" @@ -12527,6 +15217,15 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.3", +] + [[package]] name = "webpki-roots" version = "1.0.3" @@ -12617,6 +15316,59 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f919aee0a93304be7f62e8e5027811bbba96bcb1de84d6618be56e43f8a32a1" +dependencies = [ + "windows-core 0.59.0", + "windows-targets 0.53.5", +] + +[[package]] +name = "windows" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +dependencies = [ + "windows-collections 0.2.0", + "windows-core 0.61.2", + "windows-future 0.2.1", + "windows-link 0.1.3", + "windows-numerics 0.2.0", +] + +[[package]] +name = "windows" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" +dependencies = [ + "windows-collections 0.3.2", + "windows-core 0.62.2", + "windows-future 0.3.2", + "windows-numerics 0.3.1", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +dependencies = [ + "windows-core 0.61.2", +] + +[[package]] +name = "windows-collections" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" +dependencies = [ + "windows-core 0.62.2", +] + [[package]] name = "windows-core" version = "0.53.0" @@ -12627,19 +15379,78 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "810ce18ed2112484b0d4e15d022e5f598113e220c53e373fb31e67e21670c1ce" +dependencies = [ + "windows-implement 0.59.0", + "windows-interface", + "windows-result 0.3.4", + "windows-strings 0.3.1", + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement 0.60.2", + "windows-interface", + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", +] + [[package]] name = "windows-core" version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ - "windows-implement", + "windows-implement 0.60.2", "windows-interface", "windows-link 0.2.1", "windows-result 0.4.1", "windows-strings 0.5.1", ] +[[package]] +name = "windows-future" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" +dependencies = [ + "windows-core 0.61.2", + "windows-link 0.1.3", + "windows-threading 0.1.0", +] + +[[package]] +name = "windows-future" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", + "windows-threading 0.2.1", +] + +[[package]] +name = "windows-implement" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83577b051e2f49a058c308f17f273b570a6a758386fc291b5f6a934dd84e48c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "windows-implement" version = "0.60.2" @@ -12674,6 +15485,26 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-numerics" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" +dependencies = [ + "windows-core 0.61.2", + "windows-link 0.1.3", +] + +[[package]] +name = "windows-numerics" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", +] + [[package]] name = "windows-registry" version = "0.5.3" @@ -12712,6 +15543,15 @@ dependencies = [ "windows-link 0.2.1", ] +[[package]] +name = "windows-strings" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +dependencies = [ + "windows-link 0.1.3", +] + [[package]] name = "windows-strings" version = "0.4.2" @@ -12730,6 +15570,15 @@ dependencies = [ "windows-link 0.2.1", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -12775,6 +15624,21 @@ dependencies = [ "windows-link 0.2.1", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -12823,6 +15687,30 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link 0.1.3", +] + +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link 0.2.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -12841,6 +15729,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -12859,6 +15753,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -12889,6 +15789,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -12907,6 +15813,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -12925,6 +15837,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -12943,6 +15861,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -13001,6 +15925,21 @@ version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +[[package]] +name = "wmi" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7787dacdd8e71cbc104658aade4009300777f9b5fda6a75f19145fedb8a18e71" +dependencies = [ + "chrono", + "futures", + "log", + "serde", + "thiserror 2.0.17", + "windows 0.59.0", + "windows-core 0.59.0", +] + [[package]] name = "writeable" version = "0.6.1" @@ -13018,7 +15957,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version", + "rustc_version 0.4.1", "send_wrapper 0.6.0", "thiserror 2.0.17", "wasm-bindgen", @@ -13168,7 +16107,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ca6c5a4d66c1a9ea261811cf4773c27343de7e5033e1b75ea3f297dc7db3c1a" dependencies = [ - "flume", + "flume 0.10.14", "scopeguard", ] @@ -13196,6 +16135,12 @@ dependencies = [ "synstructure 0.13.2", ] +[[package]] +name = "z32" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2164e798d9e3d84ee2c91139ace54638059a3b23e361f5c11781c2c6459bde0f" + [[package]] name = "zerocopy" version = "0.8.27" diff --git a/Cargo.toml b/Cargo.toml index 8a30f3afd3..44e7e58660 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "ipc/api", "ipc/types", "ipc/observability", + "ipc-decentralized-storage", # ipld "ipld/resolver", @@ -44,6 +45,30 @@ members = [ "fendermint/actors/eam", "fendermint/actors/f3-light-client", "fendermint/actors/gas_market/eip1559", + # recall actors + "fendermint/actors/adm_types", # fil_actor_adm - ADM types + "fendermint/actors/adm", # ADM actor + "fendermint/actors/machine", # Machine base trait + "fendermint/actors/blobs", + "fendermint/actors/blobs/shared", + "fendermint/actors/blobs/testing", + "fendermint/actors/blob_reader", + "fendermint/actors/bucket", # S3-like object storage + "fendermint/actors/timehub", # Timestamping service + "fendermint/actors/recall_config", + "fendermint/actors/recall_config/shared", + + # recall storage (netwatch patched for socket2 0.5 compatibility!) + "recall/kernel", + "recall/kernel/ops", + "recall/syscalls", + "recall/executor", + "recall/iroh_manager", + "recall/ipld", + "recall/actor_sdk", + + # recall contracts (vendored locally, FVM 4.7 upgrade) + "recall-contracts/crates/facade", "build-rs-utils", "contracts-artifacts", @@ -70,6 +95,7 @@ axum = { version = "0.6", features = ["ws"] } base64 = "0.21" bollard = "0.15" blake2b_simd = "1.0" +blake3 = "1.5" bloom = "0.3" bytes = "1.4" clap = { version = "4.1", features = ["derive", "env", "string"] } @@ -77,6 +103,7 @@ color-eyre = "0.5.11" byteorder = "1.5.0" config = "0.13" const-hex = "1.14.0" +data-encoding = "2.3.3" dirs = "5.0" dircpy = "0.3.19" either = "1.10" @@ -96,6 +123,15 @@ hex-literal = "0.4.1" http = "0.2.12" im = "15.1.0" integer-encoding = { version = "3.0.3", default-features = false } +# Recall/Iroh dependencies +ambassador = "0.3.5" +iroh = "0.35" +iroh-base = "0.35" +iroh-blobs = { version = "0.35", features = ["rpc"] } +iroh-relay = "0.35" +iroh-quinn = { version = "0.13" } +n0-future = "0.1.2" +quic-rpc = { version = "0.20", features = ["quinn-transport"] } jsonrpc-v2 = { version = "0.11", default-features = false, features = [ "bytes-v10", ] } @@ -147,8 +183,19 @@ quickcheck_macros = "1" rand = "0.8" rand_chacha = "0.3" regex = "1" +replace_with = "0.1.7" statrs = "0.18.0" reqwest = { version = "0.11.13", features = ["json"] } +# Recall entanglement library +entangler = { package = "recall_entangler", git = "https://github.com/recallnet/entanglement.git", rev = "aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc" } +entangler_storage = { package = "recall_entangler_storage", git = "https://github.com/recallnet/entanglement.git", rev = "aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc" } +# Objects HTTP API dependencies +warp = "0.3" +uuid = { version = "1.0", features = ["v4"] } +mime_guess = "2.0" +urlencoding = "2.1" +# Recall Solidity facades (vendored locally, upgraded to FVM 4.7) +recall_sol_facade = { path = "recall-contracts/crates/facade" } sha2 = "0.10" serde = { version = "1.0.217", features = ["derive"] } serde_bytes = "0.11" @@ -223,6 +270,7 @@ fvm_ipld_amt = "0.7.4" # NOTE: Using master branch instead of v17.0.0 tag due to serde dependency fixes # Master is currently at commit 2f040c12 which fixes the serde::__private::PhantomData import issue fil_actors_evm_shared = { git = "https://github.com/filecoin-project/builtin-actors", branch = "master" } +fil_actor_adm = { path = "fendermint/actors/adm_types" } fil_actor_eam = { git = "https://github.com/filecoin-project/builtin-actors", branch = "master" } fil_actor_evm = { git = "https://github.com/filecoin-project/builtin-actors", branch = "master" } fil_actors_runtime = { git = "https://github.com/filecoin-project/builtin-actors", branch = "master" } @@ -231,6 +279,7 @@ cid = { version = "0.11", default-features = false, features = [ "serde-codec", "std", ] } +multihash-codetable = "0.1" frc42_dispatch = { path = "./ext/frc42_dispatch" } @@ -249,6 +298,11 @@ tendermint-proto = { version = "0.31" } [patch.crates-io] # Using latest FVM to match builtin-actors v17.0.0 requirements fvm = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } + +# Fix netwatch socket2 0.5 compatibility (macOS BSD sockets) +# Patched version with socket2 0.5+ API fixes +netwatch = { path = "patches/netwatch" } + fvm_shared = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } fvm_sdk = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } fvm_ipld_blockstore = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } diff --git a/RECALL_BUCKET.md b/RECALL_BUCKET.md new file mode 100644 index 0000000000..6572427082 --- /dev/null +++ b/RECALL_BUCKET.md @@ -0,0 +1,267 @@ +# Recall Bucket Storage Guide (Path-Based Access) + +## Configuration + +```bash +# From RECALL_RUN.md +export TENDERMINT_RPC=http://localhost:26657 +export OBJECTS_LISTEN_ADDR=http://localhost:8080 +export NODE_OPERATION_OBJECT_API=http://localhost:8081 +export ETH_RPC=http://localhost:8545 +export BLOBS_ACTOR=0x6d342defae60f6402aee1f804653bbae4e66ae46 +export ADM_ACTOR=0x7caec36fc8a3a867ca5b80c6acb5e5871d05aa28 + +# Your credentials +export USER_SK= +export USER_ADDR= +``` + +## 6. Start Gateway +```bash +cargo build --release -p ipc-decentralized-storage --bin gateway --bin node + +# prepare to start node +export FM_NETWORK=test +# validator bls key file in hex format +export BLS_KEY_FILE=./test-network/bls_key.hex +# fendermint secret key file +export SECRET_KEY_FILE=./test-network/keys/alice.sk + +# register as a storage node operator +./target/release/node register-operator --bls-key-file $BLS_KEY_FILE --secret-key-file $SECRET_KEY_FILE --operator-rpc-url $NODE_OPERATION_OBJECT_API + +# start the node +./target/release/node run \ + --secret-key-file ./test-network/bls_key.hex \ + --iroh-path ./iroh_node \ + --iroh-v4-addr 0.0.0.0:11204 \ + --rpc-url http://localhost:26657 \ + --batch-size 10 \ + --poll-interval-secs 5 \ + --max-concurrent-downloads 10 \ + --rpc-bind-addr 127.0.0.1:8081 + +./target/release/gateway --bls-key-file $BLS_KEY_FILE --secret-key-file $SECRET_KEY_FILE --iroh-path ./iroh_gateway --objects-listen-addr 0.0.0.0:8080 + +``` +## 6. Download the Blob + +Download via HTTP API: + +```bash +# Download the blob +curl $NODE_OPERATION_OBJECT_API/v1/blobs/${BLOB_HASH#0x}/content +# You should see the original file +``` + +--- + +## 1. Create a Bucket + +First, create a bucket via the ADM (Actor Deployment Manager): + +```bash +# Buy 1 FIL worth of credits +cast send $BLOBS_ACTOR "buyCredit()" \ + --value 0.1ether \ + --private-key $USER_SK \ + --rpc-url http://localhost:8545 + +# Create a new bucket (caller becomes owner) +TX_RESULT=$(cast send $ADM_ACTOR "createBucket()" \ + --private-key $USER_SK \ + --rpc-url $ETH_RPC \ + --json) + +echo $TX_RESULT | jq '.' + +# Extract bucket address from MachineInitialized event +# Event signature: MachineInitialized(uint8 indexed kind, address machineAddress) +BUCKET_ADDR=$(echo $TX_RESULT | jq -r '.logs[] | select(.topics[0] == "0x8f7252642373d5f0b89a0c5cd9cd242e5cd5bb1a36aec623756e4f52a8c1ea6e") | .data' | cut -c27-66) +BUCKET_ADDR="0x$BUCKET_ADDR" + +echo "Bucket created at: $BUCKET_ADDR" +export BUCKET_ADDR +``` + +## 2. Upload and Register an Object + +### Step 2a: Upload file to Iroh (same as basic flow) + +```bash +# Create a test file +echo "Hello from bucket storage!" > myfile.txt + +# Get file size +BLOB_SIZE=$(stat -f%z myfile.txt 2>/dev/null || stat -c%s myfile.txt) + +# Upload to Iroh +UPLOAD_RESPONSE=$(curl -s -X POST $OBJECTS_API/v1/objects \ + -F "size=${BLOB_SIZE}" \ + -F "data=@myfile.txt") + +echo $UPLOAD_RESPONSE | jq '.' + +# Extract hashes +BLOB_HASH_B32=$(echo $UPLOAD_RESPONSE | jq -r '.hash') +METADATA_HASH_B32=$(echo $UPLOAD_RESPONSE | jq -r '.metadata_hash // .metadataHash') +NODE_ID_BASE32=$(curl -s $OBJECTS_API/v1/node | jq -r '.node_id') + +# Convert to hex (same as RECALL_RUN.md) +export BLOB_HASH=$(python3 -c " +import base64 +h = '$BLOB_HASH_B32'.upper() +padding = (8 - len(h) % 8) % 8 +h = h + '=' * padding +decoded = base64.b32decode(h) +if len(decoded) > 32: + decoded = decoded[:32] +elif len(decoded) < 32: + decoded = decoded + b'\x00' * (32 - len(decoded)) +print('0x' + decoded.hex()) +") + +export METADATA_HASH=$(python3 -c " +import base64 +h = '$METADATA_HASH_B32'.upper() +padding = (8 - len(h) % 8) % 8 +h = h + '=' * padding +decoded = base64.b32decode(h) +if len(decoded) > 32: + decoded = decoded[:32] +elif len(decoded) < 32: + decoded = decoded + b'\x00' * (32 - len(decoded)) +print('0x' + decoded.hex()) +") + +export SOURCE_NODE="0x$NODE_ID_BASE32" + +echo "Blob Hash: $BLOB_HASH" +echo "Metadata Hash: $METADATA_HASH" +echo "Source Node: $SOURCE_NODE" +``` + +### Step 2b: Register object in bucket with a path + +```bash +# Add object with a path-based key +# Signature: addObject(bytes32 source, string key, bytes32 hash, bytes32 recoveryHash, uint64 size) +cast send $BUCKET_ADDR "addObject(bytes32,string,bytes32,bytes32,uint64)" \ + $SOURCE_NODE \ + "documents/myfile.txt" \ + $BLOB_HASH \ + $METADATA_HASH \ + $BLOB_SIZE \ + --private-key $USER_SK \ + --rpc-url $ETH_RPC +``` + +## 3. Query Objects + +### Get a single object by path + +```bash +# Get object by exact path +# Returns: ObjectValue(bytes32 blobHash, bytes32 recoveryHash, uint64 size, uint64 expiry, (string,string)[] metadata) +cast call $BUCKET_ADDR "getObject(string)((bytes32,bytes32,uint64,uint64,(string,string)[]))" "documents/myfile.txt" --rpc-url $ETH_RPC +``` + +### List all objects (no filter) + +```bash +# List all objects in bucket +cast call $BUCKET_ADDR "queryObjects()(((string,(bytes32,uint64,uint64,(string,string)[]))[],string[],string))" \ + --rpc-url $ETH_RPC +``` + +### List with prefix (folder-like) + +```bash +# List everything under "documents/" +cast call $BUCKET_ADDR "queryObjects(string)(((string,(bytes32,uint64,uint64,(string,string)[]))[],string[],string))" "documents/" --rpc-url $ETH_RPC +``` + +### List with delimiter (S3-style folder simulation) + +```bash +# List top-level "folders" and files +# Returns: Query((string,ObjectState)[] objects, string[] commonPrefixes, string nextKey) +# Where ObjectState = (bytes32 blobHash, uint64 size, uint64 expiry, (string,string)[] metadata) +cast call $BUCKET_ADDR "queryObjects(string,string)(((string,(bytes32,uint64,uint64,(string,string)[]))[],string[],string))" "" "/" \ + --rpc-url $ETH_RPC + +# Example response: +# ([], ["documents/", "images/"], "") +# ^objects at root ^"folders" ^nextKey (empty = no more pages) + +# Extract blob hash from first object: +# BLOB_HASH=$(cast call ... | jq -r '.[0][0][1][0]') + +# List contents of "documents/" folder +cast call $BUCKET_ADDR "queryObjects(string,string)(((string,(bytes32,uint64,uint64,(string,string)[]))[],string[],string))" "documents/" "/" \ + --rpc-url $ETH_RPC +``` + +### Paginated queries + +```bash +# Query with pagination +# queryObjects(prefix, delimiter, startKey, limit) +cast call $BUCKET_ADDR "queryObjects(string,string,string,uint64)" \ + "documents/" \ + "/" \ + "" \ + 100 \ + --rpc-url $ETH_RPC + +# If nextKey is returned, use it for the next page +cast call $BUCKET_ADDR "queryObjects(string,string,string,uint64)" \ + "documents/" \ + "/" \ + "documents/page2start.txt" \ + 100 \ + --rpc-url $ETH_RPC +``` + +--- + +## 4. Update Object Metadata + +```bash +# Update metadata for an existing object +# Set value to empty string to delete a metadata key +cast send $BUCKET_ADDR "updateObjectMetadata(string,(string,string)[])" \ + "documents/myfile.txt" \ + '[("content-type","text/markdown"),("version","2")]' \ + --private-key $USER_SK \ + --rpc-url $ETH_RPC +``` + +--- + +## 5. Delete an Object + +```bash +# Delete object by path +cast send $BUCKET_ADDR "deleteObject(string)" "documents/myfile.txt" \ + --private-key $USER_SK \ + --rpc-url $ETH_RPC +``` + +--- + +## 6. Download Content + +Downloads still go through the Iroh/Objects API using the blob hash: + +```bash +# First get the object to retrieve its blob hash +OBJECT_INFO=$(cast call $BUCKET_ADDR "getObject(string)" "documents/myfile.txt" \ + --rpc-url $ETH_RPC) + +# Extract blob hash from response and download +# (The blob hash is the first bytes32 in the response) +curl $NODE_OPERATION_OBJECT_API/v1/blobs/${BLOB_HASH#0x}/content +``` + +--- \ No newline at end of file diff --git a/builtin-actors/output/bundle.car b/builtin-actors/output/bundle.car new file mode 100644 index 0000000000..293176c24a Binary files /dev/null and b/builtin-actors/output/bundle.car differ diff --git a/docs/ipc/recall-migration-guide.md b/docs/ipc/recall-migration-guide.md new file mode 100644 index 0000000000..a2cc0021cb --- /dev/null +++ b/docs/ipc/recall-migration-guide.md @@ -0,0 +1,1350 @@ +# Recall Storage Migration Guide: ipc-recall → main + +## Executive Summary + +This document outlines the requirements and steps needed to migrate the Recall storage implementation from the `ipc-recall` branch to the `main` branch. + +**Branch Status:** +- `ipc-recall` is **959 commits behind** and **77 commits ahead** of `main` +- Current commit on `ipc-recall`: `567108af` (fix: non-determinism from actor debug flag) +- Current commit on `main`: `984fc4a4` (feat: add f3 cert actor) + +**Migration Complexity:** High - requires significant reconciliation of architectural changes + +--- + +## Table of Contents + +1. [Critical Version Differences](#critical-version-differences) +2. [Architectural Changes on Main](#architectural-changes-on-main) +3. [Recall-Specific Components](#recall-specific-components) +4. [Migration Strategy](#migration-strategy) +5. [Step-by-Step Migration Plan](#step-by-step-migration-plan) +6. [Testing Requirements](#testing-requirements) +7. [Risk Assessment](#risk-assessment) +8. [Rollback Plan](#rollback-plan) + +--- + +## Critical Version Differences + +### FVM (Filecoin Virtual Machine) + +**Current State:** +- `ipc-recall`: FVM **4.3.0** +- `main`: FVM **4.7.4** (updated in #1459) + +**Impact:** HIGH +- FVM upgrade includes API changes, new features, and bug fixes +- Actor code may need updates for new FVM interfaces +- Syscalls and kernel interfaces may have changed + +**Action Required:** +1. Audit all FVM-dependent code in Recall components +2. Update `recall/kernel/`, `recall/syscalls/`, `recall/executor/` for FVM 4.7.4 compatibility +3. Test actor execution with new FVM version +4. Review FVM 4.4, 4.5, 4.6, 4.7 changelogs for breaking changes + +### Rust Toolchain + +**Current State:** +- `ipc-recall`: Rust 1.81.0 (approximately) +- `main`: Rust 1.83.0 (updated in #1385) + +**Impact:** MEDIUM +- New Rust features and lints available +- Dependency version conflicts possible +- Clippy rule changes + +**Action Required:** +1. Update `rust-toolchain.toml` +2. Run `cargo clippy` and fix new warnings +3. Update dependencies for Rust 1.83.0 compatibility + +### Builtin Actors + +**Current State:** +- Builtin actors versions likely diverged significantly + +**Impact:** HIGH +- Core actor interfaces may have changed +- Gateway, Subnet, and Registry contracts updated on main + +**Action Required:** +1. Review builtin actors submodule version on main +2. Test compatibility with Recall actors +3. Update actor interfaces if needed + +### Iroh (P2P Storage Layer) + +**Current State:** +- `ipc-recall`: iroh 0.34.x (updated in #565) +- `main`: Unknown (may be older or removed) + +**Impact:** CRITICAL +- Iroh is fundamental to Recall storage +- API changes between versions can be breaking + +**Action Required:** +1. Verify iroh version compatibility requirements +2. Test iroh_manager with target version +3. Update iroh_blobs API calls if needed + +--- + +## Architectural Changes on Main + +### 1. Workspace Reorganization + +**Changes:** +```diff +- contract-bindings/ (root level) ++ contracts/binding/ (moved under contracts/) + +- build-rs-utils/ (removed) +- contracts-artifacts/ (removed) +``` + +**Impact:** MEDIUM +- Build scripts need updating +- Import paths may need changes +- Cargo workspace configuration different + +**Migration Required:** +- Update `Cargo.toml` workspace members list +- Fix contract binding imports throughout Recall code +- Update build scripts in recall actors + +### 2. Contract Bindings Refactoring (#1290) + +**Changes:** +- Contract bindings moved to `contracts/binding/` +- Build process standardized +- Error parsing improvements + +**Impact:** MEDIUM +- Any Recall code importing contract bindings needs path updates +- Blobs actor Solidity facade may need updates + +**Migration Required:** +- Update import statements in: + - `fendermint/actors/blobs/src/sol_facade/` + - `fendermint/actors/bucket/src/sol_facade.rs` + - `fendermint/actors/recall_config/src/sol_facade.rs` + +### 3. Actors Builder Refactoring (#1300) + +**Changes:** +- New actor building and bundling system +- Custom actors bundle generation updated + +**Impact:** HIGH +- Recall actors need to integrate with new build system +- Custom actor manifest may need updates + +**Migration Required:** +- Update `fendermint/actors/src/manifest.rs` to include Recall actors +- Ensure Recall actors are included in `custom_actors_bundle.car` +- Test actor loading and initialization + +### 4. F3 Cert Actor Addition (#1438) + +**Changes:** +- New F3 (Fast Finality) certificate actor added +- Genesis and actor initialization updated + +**Impact:** LOW +- Doesn't directly affect Recall, but changes genesis flow + +**Migration Required:** +- Ensure Recall actors initialize properly with F3 actor present +- Test genesis with all actors + +### 5. Observability Refinements (#1085, #1207) + +**Changes:** +- Metrics scheme migrated +- Logging levels refactored +- Tracing improvements + +**Impact:** MEDIUM +- Recall observability code may need updates + +**Migration Required:** +- Update metrics in `fendermint/vm/iroh_resolver/src/observe.rs` +- Update blobs actor metrics +- Verify logging works with new scheme + +### 6. IPC CLI UI (#1401) + +**Changes:** +- New CLI interface and commands +- Node management commands added + +**Impact:** LOW (unless Recall adds CLI commands) + +**Migration Required:** +- Consider adding Recall-specific CLI commands for: + - Blob management + - Storage statistics + - Node diagnostics + +--- + +## Recall-Specific Components + +### Core Recall Modules (in `recall/`) + +#### 1. `recall/kernel/` +**Purpose:** Custom FVM kernel with Recall-specific operations + +**Files:** +- `src/lib.rs` - RecallKernel implementation +- `ops/src/lib.rs` - RecallOps trait + +**Dependencies:** +- `fvm` 4.3.0 → needs upgrade to 4.7.4 +- `fvm_shared`, `fvm_ipld_blockstore` + +**Migration Concerns:** +- Kernel API changes in FVM 4.7.4 +- Syscall linker interface updates +- Block operations compatibility + +#### 2. `recall/syscalls/` +**Purpose:** Syscall implementations for blob operations + +**Files:** +- `src/lib.rs` - delete_blob syscall + +**Dependencies:** +- `iroh_blobs` - RPC client for blob deletion +- `iroh_manager` - connection management + +**Migration Concerns:** +- Syscall signature changes in new FVM +- Iroh RPC client compatibility + +#### 3. `recall/executor/` +**Purpose:** Custom executor with gas allowances for storage + +**Files:** +- `src/lib.rs` - RecallExecutor implementation +- `outputs.rs` - Gas calculation logic + +**Dependencies:** +- `fvm`, `fvm_shared` - needs FVM upgrade +- `fendermint_actor_blobs_shared` - gas allowance types + +**Migration Concerns:** +- Executor interface changes in FVM 4.7.4 +- Gas calculation compatibility +- Actor method invocation updates + +#### 4. `recall/iroh_manager/` +**Purpose:** Iroh node management and blob operations + +**Files:** +- `src/lib.rs` - Helper functions for hash sequences +- `src/manager.rs` - IrohManager with RPC server +- `src/node.rs` - IrohNode wrapper + +**Dependencies:** +- `iroh` 0.34.x - P2P networking +- `iroh_blobs` - blob storage protocol +- `quic_rpc` - RPC transport + +**Migration Concerns:** +- Iroh version compatibility (critical) +- RPC protocol changes +- Endpoint and relay configuration + +#### 5. `recall/ipld/` +**Purpose:** Custom IPLD data structures (AMT, HAMT) + +**Files:** +- `src/amt/` - Array Mapped Trie +- `src/hamt/` - Hash Array Mapped Trie + +**Dependencies:** +- `fvm_ipld_blockstore`, `fvm_ipld_encoding` +- `fvm_shared` - actor error types + +**Migration Concerns:** +- IPLD encoding compatibility +- Blockstore interface changes + +#### 6. `recall/actor_sdk/` +**Purpose:** SDK for actors using Recall storage + +**Files:** +- `src/lib.rs` - Public exports +- `src/caller.rs` - Actor caller utilities +- `src/evm.rs` - EVM integration +- `src/storage.rs` - Storage syscall wrapper +- `src/util.rs` - Helper functions + +**Dependencies:** +- `fvm_sdk` - needs FVM upgrade + +**Migration Concerns:** +- SDK API changes in new FVM +- Actor calling conventions + +### Fendermint Actors (in `fendermint/actors/`) + +#### 7. `fendermint/actors/blobs/` +**Purpose:** Main Blobs actor for storage management + +**Structure:** +``` +blobs/ +├── Cargo.toml +├── shared/ # Shared types and traits +├── src/ +│ ├── actor/ # Actor methods (user, admin, system) +│ ├── caller.rs # Caller authentication +│ ├── state/ # State management +│ └── sol_facade/ # Solidity interface +└── testing/ # Test utilities +``` + +**Key Features:** +- Blob subscription management +- Credit and gas allowance system +- TTL and expiry tracking +- Status tracking (Added, Pending, Resolved, Failed) + +**Migration Concerns:** +- Contract binding imports (sol_facade) +- Actor interface registration +- State serialization compatibility +- Integration with FVM executor + +#### 8. `fendermint/actors/blob_reader/` +**Purpose:** Read-only access to blob data + +**Migration Concerns:** +- Actor method registration +- Query interface compatibility + +#### 9. `fendermint/actors/bucket/` +**Purpose:** S3-like bucket abstraction over blobs + +**Migration Concerns:** +- Object key management +- Blob ownership model +- Solidity facade updates + +#### 10. `fendermint/actors/recall_config/` +**Purpose:** Network-wide Recall configuration + +**Migration Concerns:** +- Configuration parameter compatibility +- Governance integration + +### VM Components + +#### 11. `fendermint/vm/iroh_resolver/` +**Purpose:** Blob resolution and vote tallying + +**Structure:** +``` +iroh_resolver/ +├── src/ +│ ├── iroh.rs # Resolution logic +│ ├── pool.rs # Task pool management +│ ├── observe.rs # Metrics and events +│ └── lib.rs +``` + +**Key Features:** +- Async blob download from Iroh nodes +- Vote casting for resolution +- Retry logic for failed downloads +- Read request handling + +**Migration Concerns:** +- Vote tally integration (uses fendermint_vm_topdown) +- Metrics registration +- Task scheduling +- Iroh client compatibility + +#### 12. `fendermint/vm/interpreter/` (modifications) +**Purpose:** Integration of blob resolution into chain execution + +**Key Changes:** +- Blob pool management +- BlobPending and BlobFinalized message handling +- Proposal validation with vote quorum +- State transitions for blobs + +**Migration Concerns:** +- ChainMessage enum additions +- Interpreter state transaction handling +- Block proposal validation +- Integration with CheckInterpreter + +--- + +## Migration Strategy + +### Approach: Incremental Integration + +We recommend an **incremental integration** approach rather than a full merge: + +1. **Create clean feature branch** from latest main +2. **Port components incrementally** in dependency order +3. **Test each component** before proceeding +4. **Fix compatibility issues** as they arise +5. **Validate integration** with full system tests + +### Why Not Direct Merge? + +❌ **Direct merge would fail because:** +- 959 commits divergence = massive conflicts +- Workspace structure completely reorganized +- FVM version incompatibility +- Build system changes throughout +- Many files moved/renamed/deleted + +✅ **Incremental port advantages:** +- Control over what changes are adopted +- Easier to test each component +- Can adapt Recall code to new patterns +- Clear audit trail of changes +- Reduced risk of breaking main + +--- + +## Step-by-Step Migration Plan + +### Phase 0: Preparation (1-2 days) + +**Goal:** Set up environment and understand scope + +- [ ] **0.1** Create tracking branch from main: `git checkout -b recall-migration origin/main` +- [ ] **0.2** Document current test coverage on ipc-recall +- [ ] **0.3** Review FVM 4.4 → 4.7.4 changelogs +- [ ] **0.4** Review Iroh 0.34.x requirements and compatibility +- [ ] **0.5** Set up comparison testing environment +- [ ] **0.6** Create migration test plan document + +### Phase 1: Core Dependencies (2-3 days) + +**Goal:** Update low-level dependencies and utilities + +#### Step 1.1: Update Recall IPLD Structures +```bash +# Port recall/ipld/ to new workspace +cp -r recall/ipld/ /recall/ipld/ +``` + +**Tasks:** +- [ ] Update `Cargo.toml` with FVM 4.7.4 dependencies +- [ ] Fix any IPLD API changes +- [ ] Run tests: `cargo test -p recall_ipld` +- [ ] Fix compilation errors +- [ ] Validate HAMT/AMT functionality + +**Potential Issues:** +- `fvm_ipld_encoding` API changes +- `ActorError` type changes +- Blockstore interface updates + +#### Step 1.2: Update Recall Kernel +```bash +cp -r recall/kernel/ /recall/kernel/ +``` + +**Tasks:** +- [ ] Update FVM dependencies to 4.7.4 +- [ ] Update `RecallKernel` trait implementations +- [ ] Update syscall linker for new FVM +- [ ] Fix `block_add` operation if API changed +- [ ] Test kernel operations + +**Potential Issues:** +- Kernel trait signature changes +- CallManager interface updates +- Gas charging changes + +#### Step 1.3: Update Recall Syscalls +```bash +cp -r recall/syscalls/ /recall/syscalls/ +``` + +**Tasks:** +- [ ] Update FVM SDK to 4.7.4 +- [ ] Verify syscall signature compatibility +- [ ] Update `delete_blob` implementation +- [ ] Test syscall registration + +**Watch out for:** +- Syscall context parameter changes +- Memory access API updates + +#### Step 1.4: Update Recall Actor SDK +```bash +cp -r recall/actor_sdk/ /recall/actor_sdk/ +``` + +**Tasks:** +- [ ] Update `fvm_sdk` to 4.7.4 +- [ ] Fix actor calling conventions +- [ ] Update EVM integration if needed +- [ ] Test storage syscall wrapper + +### Phase 2: Iroh Integration (2-3 days) + +**Goal:** Ensure Iroh P2P layer works with target environment + +#### Step 2.1: Verify Iroh Version +```bash +# Check if main has iroh +cd +grep -r "iroh" Cargo.toml +``` + +**Tasks:** +- [ ] Determine if Iroh exists on main +- [ ] If not, add `iroh` and `iroh_blobs` dependencies to workspace +- [ ] Verify version compatibility (prefer 0.34.x or document upgrade needs) +- [ ] Test basic Iroh node creation + +**Decision Point:** +- If main has no Iroh: Add it as new dependency +- If main has old Iroh: Determine upgrade path +- If main has newer Iroh: Update recall code + +#### Step 2.2: Port Iroh Manager +```bash +cp -r recall/iroh_manager/ /recall/iroh_manager/ +``` + +**Tasks:** +- [ ] Update `Cargo.toml` dependencies +- [ ] Fix Iroh API compatibility issues +- [ ] Update relay configuration +- [ ] Test node creation and RPC server +- [ ] Validate blob upload/download + +**Critical Tests:** +- [ ] Create persistent Iroh node +- [ ] Upload test blob +- [ ] Download blob from node ID +- [ ] RPC client connection +- [ ] Hash sequence operations + +### Phase 3: Recall Executor (3-4 days) + +**Goal:** Integrate custom executor with gas allowances + +#### Step 3.1: Port Executor Code +```bash +cp -r recall/executor/ /recall/executor/ +``` + +**Tasks:** +- [ ] Update FVM dependencies +- [ ] Update `RecallExecutor` for FVM 4.7.4 API +- [ ] Fix `execute_message` signature changes +- [ ] Update gas calculation logic +- [ ] Fix `preflight_message` compatibility +- [ ] Test gas allowance system + +**Key Integration Points:** +- [ ] Verify actor method invocation works +- [ ] Test gas charging with allowances +- [ ] Validate sponsor gas mechanics +- [ ] Ensure BLOBS_ACTOR integration + +#### Step 3.2: Update Fendermint App Integration + +**Tasks:** +- [ ] Update `fendermint/app/src/app.rs` to use RecallExecutor +- [ ] Pass IrohManager to app initialization +- [ ] Configure executor with engine pool +- [ ] Test message execution end-to-end + +**Files to modify:** +- `fendermint/app/src/app.rs` +- `fendermint/app/src/cmd/run.rs` + +### Phase 4: Actors (5-7 days) + +**Goal:** Port and integrate all Recall actors + +#### Step 4.1: Port Blobs Actor (Shared) +```bash +cp -r fendermint/actors/blobs/shared/ /fendermint/actors/blobs/shared/ +``` + +**Tasks:** +- [ ] Update `Cargo.toml` +- [ ] Fix dependency imports +- [ ] Compile shared types +- [ ] No test failures in shared + +#### Step 4.2: Port Blobs Actor (Main) +```bash +cp -r fendermint/actors/blobs/src/ /fendermint/actors/blobs/src/ +``` + +**Tasks:** +- [ ] Update contract binding imports (sol_facade) + - Fix path from `ipc_actors_abis` to new location +- [ ] Update actor registration in manifest +- [ ] Fix state serialization if needed +- [ ] Compile all actor methods +- [ ] Run actor unit tests + +**Critical Files:** +- `src/actor.rs` - Main actor dispatcher +- `src/state.rs` - State management +- `src/sol_facade/blobs.rs` - Solidity interface + +**Solidity Contract Updates:** +- [ ] Verify Solidity contracts exist in contracts/ +- [ ] Update ABI paths if contracts moved +- [ ] Regenerate bindings if needed + +#### Step 4.3: Port Bucket Actor +```bash +cp -r fendermint/actors/bucket/ /fendermint/actors/bucket/ +``` + +**Tasks:** +- [ ] Update imports +- [ ] Fix Solidity facade +- [ ] Test bucket operations + +#### Step 4.4: Port Blob Reader Actor +```bash +cp -r fendermint/actors/blob_reader/ /fendermint/actors/blob_reader/ +``` + +**Tasks:** +- [ ] Update imports +- [ ] Fix query interfaces +- [ ] Test read operations + +#### Step 4.5: Port Recall Config Actor +```bash +cp -r fendermint/actors/recall_config/ /fendermint/actors/recall_config/ +``` + +**Tasks:** +- [ ] Update imports +- [ ] Fix Solidity facade +- [ ] Test config read/write + +#### Step 4.6: Update Actor Manifest +**File:** `fendermint/actors/src/manifest.rs` + +**Tasks:** +- [ ] Add Recall actors to manifest +- [ ] Set correct actor codes (CIDs) +- [ ] Register in builtin actors list +- [ ] Update genesis initialization + +**Example:** +```rust +pub const BLOBS_ACTOR_NAME: &str = "blobs"; +pub const BUCKET_ACTOR_NAME: &str = "bucket"; +pub const BLOB_READER_ACTOR_NAME: &str = "blob_reader"; +pub const RECALL_CONFIG_ACTOR_NAME: &str = "recall_config"; +``` + +#### Step 4.7: Update Actor Bundle Build +**File:** `fendermint/actors/build.rs` + +**Tasks:** +- [ ] Ensure Recall actors included in bundle +- [ ] Test bundle generation +- [ ] Verify bundle.car contains Recall actors +- [ ] Test actor loading from bundle + +### Phase 5: VM Integration (4-5 days) + +**Goal:** Integrate blob resolution and vote tallying + +#### Step 5.1: Port Iroh Resolver +```bash +cp -r fendermint/vm/iroh_resolver/ /fendermint/vm/iroh_resolver/ +``` + +**Tasks:** +- [ ] Update `Cargo.toml` workspace registration +- [ ] Fix import paths +- [ ] Update metrics registration (new observability scheme) +- [ ] Fix vote tally integration +- [ ] Update Iroh client usage +- [ ] Test resolution logic + +**Files to update:** +- `src/iroh.rs` - Core resolution +- `src/pool.rs` - Task pool +- `src/observe.rs` - Metrics (update to new scheme) + +#### Step 5.2: Update Vote Tally (if needed) +**File:** `fendermint/vm/topdown/src/voting.rs` + +**Check:** +- [ ] Verify blob voting methods exist +- [ ] Ensure `VoteTally` has blob_votes field +- [ ] Test vote tallying logic + +**If missing:** +- [ ] Port blob voting code from ipc-recall +- [ ] Add `add_blob_vote` and `find_blob_quorum` +- [ ] Update vote gossip protocol + +#### Step 5.3: Update Chain Interpreter +**File:** `fendermint/vm/interpreter/src/chain.rs` + +**Tasks:** +- [ ] Add blob pool fields to ChainEnv +- [ ] Import BlobPoolItem, PendingBlob, FinalizedBlob +- [ ] Add blob message handling in `propose()` +- [ ] Add blob message validation in `check()` +- [ ] Add blob finalization in `deliver()` +- [ ] Integrate with vote tally + +**Key Sections:** +```rust +// In propose(): +- Fetch added blobs from state +- Create BlobPending messages +- Fetch finalized blobs from pool +- Create BlobFinalized messages + +// In check(): +- Validate BlobFinalized has quorum +- Check blob not already finalized + +// In deliver(): +- Call blobs actor to finalize +- Remove from pool +``` + +#### Step 5.4: Update Message Types +**File:** `fendermint/vm/message/src/chain.rs` + +**Tasks:** +- [ ] Add `ChainMessage::Ipc(IpcMessage::BlobPending(...))` +- [ ] Add `ChainMessage::Ipc(IpcMessage::BlobFinalized(...))` +- [ ] Update message serialization +- [ ] Test message encoding/decoding + +**File:** `fendermint/vm/message/src/ipc.rs` + +**Tasks:** +- [ ] Add `IpcMessage::BlobPending` variant +- [ ] Add `IpcMessage::BlobFinalized` variant +- [ ] Implement message type methods + +#### Step 5.5: Update State Queries +**File:** `fendermint/vm/interpreter/src/fvm/state/query.rs` + +**Tasks:** +- [ ] Add `get_added_blobs()` function +- [ ] Add `get_pending_blobs()` function +- [ ] Add `is_blob_finalized()` function +- [ ] Query blobs actor state correctly + +### Phase 6: Genesis Integration (2-3 days) + +**Goal:** Initialize Recall actors at genesis + +#### Step 6.1: Update Genesis Configuration +**File:** `fendermint/vm/genesis/src/lib.rs` + +**Tasks:** +- [ ] Add Recall actor initialization +- [ ] Set BLOBS_ACTOR_ID +- [ ] Configure initial credits +- [ ] Set storage capacity + +#### Step 6.2: Test Genesis Creation +**Tasks:** +- [ ] Create test genesis with Recall +- [ ] Verify all actors initialized +- [ ] Check actor addresses assigned correctly +- [ ] Validate initial state + +### Phase 7: Application Layer (2-3 days) + +**Goal:** Integrate with fendermint application + +#### Step 7.1: Update App Settings +**File:** `fendermint/app/settings/src/lib.rs` + +**Tasks:** +- [ ] Add Recall configuration section +- [ ] Add blob concurrency settings +- [ ] Add Iroh node configuration +- [ ] Add resolver settings + +#### Step 7.2: Update App Initialization +**File:** `fendermint/app/src/app.rs` + +**Tasks:** +- [ ] Initialize IrohManager +- [ ] Start iroh resolver +- [ ] Configure blob pools +- [ ] Set up vote tally + +#### Step 7.3: Add Objects API (Optional) +**File:** `fendermint/app/src/cmd/objects.rs` + +**Tasks:** +- [ ] Port upload/download handlers +- [ ] Port entangler integration +- [ ] Add HTTP endpoints +- [ ] Test API functionality + +### Phase 8: Contracts Integration (3-4 days) + +**Goal:** Deploy and integrate Solidity contracts + +#### Step 8.1: Port Solidity Contracts +**Directory:** `contracts/contracts/` + +**Tasks:** +- [ ] Add Blobs.sol interface/facade +- [ ] Add Bucket.sol interface +- [ ] Add RecallConfig.sol interface +- [ ] Update contract compilation +- [ ] Generate ABI files + +#### Step 8.2: Update Contract Bindings +**Directory:** `contracts/binding/` + +**Tasks:** +- [ ] Update build.rs to include Recall contracts +- [ ] Generate Rust bindings +- [ ] Test binding imports in actors +- [ ] Verify error parsing + +#### Step 8.3: Update Deployment Scripts +**Directory:** `contracts/tasks/` + +**Tasks:** +- [ ] Add Recall actor deployment scripts (if needed) +- [ ] Update genesis task +- [ ] Test contract deployment +- [ ] Document deployment process + +### Phase 9: Testing (5-7 days) + +**Goal:** Comprehensive testing of integration + +#### Step 9.1: Unit Tests +**Tasks:** +- [ ] Run all recall unit tests: `cargo test -p recall_*` +- [ ] Run actor tests: `cargo test -p fendermint_actor_blobs` +- [ ] Fix any failing tests +- [ ] Add new tests for integrations + +#### Step 9.2: Integration Tests +**Tasks:** +- [ ] Create integration test for full upload flow +- [ ] Test blob resolution with vote tally +- [ ] Test blob finalization +- [ ] Test bucket operations +- [ ] Test credit system + +**Test Scenarios:** +```rust +#[test] +async fn test_blob_upload_and_resolution() { + // 1. Initialize network with Recall actors + // 2. Upload blob to client's Iroh node + // 3. Register blob with Blobs actor + // 4. Validators fetch and vote + // 5. Verify quorum reached + // 6. Verify blob finalized on-chain + // 7. Download blob from validator +} +``` + +#### Step 9.3: End-to-End Tests +**Tasks:** +- [ ] Deploy test subnet with Recall +- [ ] Upload real files +- [ ] Verify replication +- [ ] Test TTL expiry +- [ ] Test failure scenarios +- [ ] Test network partition recovery + +#### Step 9.4: Performance Testing +**Tasks:** +- [ ] Benchmark upload throughput +- [ ] Test concurrent uploads +- [ ] Measure resolution latency +- [ ] Check memory usage +- [ ] Monitor gas consumption + +### Phase 10: Documentation (2-3 days) + +**Goal:** Document changes and usage + +**Tasks:** +- [ ] Update main README with Recall features +- [ ] Document Recall actor APIs +- [ ] Create deployment guide +- [ ] Update CLI documentation (if added) +- [ ] Document configuration options +- [ ] Create troubleshooting guide +- [ ] Update architecture diagrams + +--- + +## Testing Requirements + +### Unit Test Coverage + +**Minimum Requirements:** +- [ ] 80%+ code coverage for recall/ modules +- [ ] 90%+ coverage for critical paths (vote tally, state transitions) +- [ ] All actor methods have unit tests +- [ ] Edge cases tested (TTL expiry, vote equivocation, etc.) + +### Integration Test Suites + +#### 1. Blob Lifecycle Tests +```rust +- test_blob_add_and_subscribe() +- test_blob_resolution_success() +- test_blob_resolution_failure() +- test_blob_expiry() +- test_blob_overwrite() +``` + +#### 2. Vote Tally Tests +```rust +- test_vote_recording() +- test_quorum_calculation() +- test_equivocation_prevention() +- test_power_table_update() +``` + +#### 3. Credit System Tests +```rust +- test_gas_allowance_creation() +- test_gas_allowance_consumption() +- test_sponsored_transactions() +- test_allowance_expiry() +``` + +#### 4. Iroh Integration Tests +```rust +- test_iroh_node_initialization() +- test_blob_upload() +- test_blob_download() +- test_node_discovery() +- test_relay_connection() +``` + +### Regression Tests + +**Must not break existing functionality:** +- [ ] IPC cross-net messaging still works +- [ ] Subnet creation/join unaffected +- [ ] Checkpoint submission works +- [ ] Gateway operations work +- [ ] All existing integration tests pass + +### Performance Benchmarks + +**Baseline Metrics to Maintain:** +- [ ] Block time: < 2s +- [ ] Transaction throughput: > 100 tx/s +- [ ] Memory usage: < 2GB per validator +- [ ] Sync time: < 30 min for 10k blocks + +**New Recall Metrics:** +- [ ] Blob upload time: < 30s for 10MB +- [ ] Resolution time: < 60s for 10MB blob +- [ ] Vote propagation: < 5s +- [ ] Finalization latency: < 1 block after quorum + +--- + +## Risk Assessment + +### Critical Risks + +#### 1. FVM API Incompatibility +**Risk Level:** 🔴 **HIGH** + +**Impact:** Recall kernel/executor may not compile or work correctly + +**Mitigation:** +- Thorough review of FVM 4.4→4.7 changelogs +- Create compatibility layer if needed +- Extensive testing of actor execution +- Have FVM experts review changes + +**Contingency:** +- May need to stay on FVM 4.3 temporarily +- Create isolated branch for FVM upgrade +- Parallel track with stability fixes + +#### 2. Iroh Version Mismatch +**Risk Level:** 🔴 **HIGH** + +**Impact:** P2P blob transfer may fail completely + +**Mitigation:** +- Test Iroh compatibility early (Phase 2) +- Have fallback plan for Iroh upgrade +- Maintain version compatibility matrix +- Test with real network conditions + +**Contingency:** +- Bundle specific Iroh version +- Vendor Iroh dependencies if needed +- Consider alternative P2P layer + +#### 3. State Serialization Breaking Changes +**Risk Level:** 🟡 **MEDIUM** + +**Impact:** Cannot deserialize existing Recall state + +**Mitigation:** +- Test state migrations explicitly +- Create state version detection +- Implement migration logic if needed +- Backup/restore testing + +**Contingency:** +- Fresh genesis for Recall launch +- State migration scripts +- Parallel chain for testing + +#### 4. Vote Tally Integration Issues +**Risk Level:** 🟡 **MEDIUM** + +**Impact:** Blobs never reach quorum, network stalls + +**Mitigation:** +- Extensive vote tally testing +- Simulate various validator scenarios +- Test network partition recovery +- Monitor vote metrics + +**Contingency:** +- Temporary lower quorum for testing +- Manual intervention mechanisms +- Enhanced diagnostics + +#### 5. Contract Binding Path Changes +**Risk Level:** 🟢 **LOW** + +**Impact:** Compilation errors in Solidity facades + +**Mitigation:** +- Update imports systematically +- Regenerate bindings +- Test contract interactions + +**Contingency:** +- Simple find/replace for paths +- Straightforward to fix + +### Migration Risks by Phase + +| Phase | Risk Level | Key Concerns | +|-------|-----------|--------------| +| Phase 1: Core Dependencies | 🔴 HIGH | FVM compatibility | +| Phase 2: Iroh Integration | 🔴 HIGH | P2P functionality | +| Phase 3: Executor | 🟡 MEDIUM | Gas mechanics | +| Phase 4: Actors | 🟡 MEDIUM | State compatibility | +| Phase 5: VM Integration | 🟡 MEDIUM | Message handling | +| Phase 6: Genesis | 🟢 LOW | Initialization | +| Phase 7: Application | 🟢 LOW | Configuration | +| Phase 8: Contracts | 🟢 LOW | Path updates | +| Phase 9: Testing | 🟡 MEDIUM | Coverage gaps | +| Phase 10: Documentation | 🟢 LOW | Completeness | + +--- + +## Rollback Plan + +### Immediate Rollback (Day 1-7) +**Scenario:** Critical blocker discovered early + +**Action:** +1. Abandon migration branch +2. Return to ipc-recall for continued development +3. Document blockers +4. Plan remediation + +**Cost:** Minimal - early in migration + +### Mid-Migration Rollback (Day 7-21) +**Scenario:** Unexpected complexity, delayed beyond timeline + +**Action:** +1. Create snapshot of partial migration +2. Tag branch: `recall-migration-paused-YYYY-MM-DD` +3. Document completed phases +4. Return to ipc-recall temporarily +5. Plan revised approach + +**Cost:** Moderate - partial work done + +### Late Rollback (Day 21+) +**Scenario:** Integration issues found during final testing + +**Action:** +1. Keep feature-flag disabled on main +2. Fix issues in migration branch +3. Retest thoroughly +4. Merge when ready + +**Cost:** Higher - significant work invested + +### Post-Merge Rollback +**Scenario:** Production issues after merge to main + +**Action:** +1. **Immediate:** Disable Recall features via config +2. **Short-term:** Revert merge commit if critical +3. **Long-term:** Fix issues and re-enable + +**Protection Mechanisms:** +- [ ] Feature flags for Recall components +- [ ] Configuration to disable Recall actors +- [ ] Separate test vs. production deployments +- [ ] Canary deployments + +--- + +## Success Criteria + +### Phase Completion Criteria + +Each phase must meet these before proceeding: + +✅ **All code compiles without warnings** +✅ **All unit tests pass** +✅ **No regressions in existing functionality** +✅ **Code reviewed and approved** +✅ **Documentation updated** + +### Final Migration Acceptance + +Migration is complete when: + +- [ ] All Recall components integrated and working +- [ ] Full test suite passes (unit + integration + e2e) +- [ ] Performance benchmarks met +- [ ] Documentation complete +- [ ] Code reviewed by 2+ team members +- [ ] Production deployment plan approved +- [ ] Rollback procedures tested +- [ ] Monitoring and alerting configured + +--- + +## Resource Requirements + +### Team Composition + +**Recommended Team:** +- 2-3 Senior Rust/FVM developers +- 1 Solidity developer (contracts) +- 1 DevOps engineer (deployment) +- 1 QA engineer (testing) + +**Availability:** +- Full-time for 4-6 weeks +- Or part-time for 8-12 weeks + +### Infrastructure + +**Development:** +- [ ] Development testnet with 4-5 validators +- [ ] CI/CD pipeline for Recall branch +- [ ] Performance testing environment +- [ ] Staging environment + +**Monitoring:** +- [ ] Metrics collection (Prometheus) +- [ ] Log aggregation (Loki/ELK) +- [ ] Distributed tracing +- [ ] Alerting (Alertmanager) + +--- + +## Timeline Estimate + +### Optimistic (Expert Team, No Blockers) +**4-5 weeks** + +``` +Week 1: Phases 0-2 (Prep, Core, Iroh) +Week 2: Phases 3-4 (Executor, Actors) +Week 3: Phases 5-6 (VM, Genesis) +Week 4: Phases 7-8 (App, Contracts) +Week 5: Phases 9-10 (Testing, Docs) +``` + +### Realistic (Experienced Team, Minor Issues) +**6-8 weeks** + +``` +Weeks 1-2: Phases 0-3 +Weeks 3-4: Phases 4-5 +Weeks 5-6: Phases 6-8 +Weeks 7-8: Phases 9-10 + Buffer +``` + +### Conservative (Learning Required, Major Issues) +**10-12 weeks** + +``` +Weeks 1-3: Phases 0-3 + FVM learning +Weeks 4-6: Phases 4-5 + Issue resolution +Weeks 7-9: Phases 6-8 +Weeks 10-12: Phases 9-10 + Hardening +``` + +--- + +## Next Steps + +### Immediate Actions (This Week) + +1. **Decision:** Approve migration approach +2. **Staffing:** Assign team members +3. **Setup:** Create migration branch from main +4. **Kickoff:** Phase 0 preparation tasks +5. **Communication:** Notify stakeholders + +### Before Starting Phase 1 + +- [ ] Review this document with full team +- [ ] Set up project tracking (Jira/GitHub Projects) +- [ ] Create test environment +- [ ] Schedule daily standups +- [ ] Establish code review process +- [ ] Define success metrics +- [ ] Create risk register + +### Key Decisions Needed + +1. **FVM Strategy:** Stay on 4.3 temporarily or upgrade immediately? +2. **Iroh Version:** Which version to target? +3. **Genesis Approach:** Fresh genesis or state migration? +4. **Deployment:** Testnet first or devnet? +5. **Timeline:** Which estimate (optimistic/realistic/conservative)? + +--- + +## Appendix + +### A. Key Files Changed on Main (Sample) + +``` +High Impact: +- Cargo.toml (workspace reorganization) +- fendermint/actors/src/manifest.rs (actor registration) +- fendermint/app/src/app.rs (app initialization) +- fendermint/vm/interpreter/src/chain.rs (message handling) + +Medium Impact: +- fendermint/vm/genesis/src/lib.rs (genesis flow) +- contracts/binding/build.rs (contract bindings) +- fendermint/actors/build.rs (actor bundle) + +Low Impact: +- Various Cargo.toml version bumps +- CI/CD configuration +- Documentation files +``` + +### B. Recall Dependencies + +```toml +# Core Dependencies +fvm = "4.3.0" → "4.7.4" +fvm_shared = "4.3.0" → "4.7.4" +fvm_sdk = "4.3.0" → "4.7.4" +fvm_ipld_* = "0.2" → Check main version + +# Iroh Dependencies +iroh = "0.34.x" +iroh_blobs = "0.34.x" +quic_rpc = "0.14" + +# Async Runtime +tokio = "1.x" +async-trait = "0.1" +futures = "0.3" + +# Serialization +serde = "1.0" +fvm_ipld_encoding = "0.4" +``` + +### C. Useful Commands + +```bash +# Check diff between branches +git diff main..ipc-recall --stat + +# Find all Recall-specific files +find . -name "*blob*" -o -name "*recall*" -o -name "*iroh*" + +# Count lines of Recall code +cloc recall/ fendermint/actors/blob* fendermint/vm/iroh_resolver/ + +# Test specific component +cargo test -p recall_kernel -- --nocapture + +# Check for FVM API usage +rg "fvm::" --type rust | wc -l + +# Find all actor registrations +rg "register_actor|ACTOR_ID" fendermint/actors/ +``` + +### D. Contact Points + +**For Questions:** +- FVM compatibility: Review FVM repo issues/discussions +- Iroh integration: Check Iroh documentation +- Actor patterns: Reference other actors in fendermint/actors/ +- Vote tally: See fendermint/vm/topdown/src/voting.rs + +--- + +## Conclusion + +The migration of Recall storage from ipc-recall to main is a **significant undertaking** requiring 4-12 weeks depending on team experience and issues encountered. The incremental approach outlined here minimizes risk while providing clear checkpoints. + +**Key Success Factors:** +1. Strong Rust/FVM expertise on the team +2. Thorough testing at each phase +3. Early identification of blockers (FVM, Iroh) +4. Clear communication and decision-making +5. Realistic timeline expectations + +**Go/No-Go Decision Points:** +- ✋ **After Phase 2:** If Iroh integration blocked, pause and reassess +- ✋ **After Phase 3:** If FVM executor broken, may need FVM expert consultation +- ✋ **After Phase 5:** If VM integration issues, consider architectural changes + +With proper planning and execution, Recall storage can be successfully integrated into main, bringing decentralized storage capabilities to the IPC network. + +--- + +**Document Version:** 1.0 +**Last Updated:** 2024-11-04 +**Status:** Draft for Review +**Next Review:** After Phase 0 completion + diff --git a/docs/ipc/recall-migration-status.md b/docs/ipc/recall-migration-status.md new file mode 100644 index 0000000000..d06d14b78b --- /dev/null +++ b/docs/ipc/recall-migration-status.md @@ -0,0 +1,201 @@ +# Recall Migration Status + +## Current Progress + +### ✅ Phase 0: Preparation - COMPLETED +- [x] Created `recall-migration` branch from latest main (commit: 984fc4a4) +- [x] Copied `recall/` directory from ipc-recall branch +- [x] Added recall modules to workspace Cargo.toml +- [x] Added missing workspace dependencies: + - `ambassador = "0.3.5"` + - `iroh = "0.35"` + - `iroh-base = "0.35"` + - `iroh-blobs = "0.35"` + - `iroh-relay = "0.35"` + - `iroh-quinn = "0.13"` + - `n0-future = "0.1.2"` + - `quic-rpc = "0.20"` + - `replace_with = "0.1.7"` + - `entangler` (git dependency) + - `entangler_storage` (git dependency) + +### 🔄 Phase 1: Core Dependencies - IN PROGRESS + +**Current Status:** Setting up recall modules + +**Blockers Identified:** +1. `recall/executor` depends on `fendermint_actor_blobs_shared` which doesn't exist on main yet +2. `recall_sol_facade` workspace dependency reference found but source unknown +3. Need to port Recall actors before executor can compile + +**Next Steps:** +1. Copy Recall actor components from ipc-recall: + - `fendermint/actors/blobs/` (full directory with shared/) + - `fendermint/actors/bucket/` + - `fendermint/actors/blob_reader/` + - `fendermint/actors/recall_config/` +2. Update workspace to include these actors +3. Try compiling recall/ipld, recall/kernel first (no actor dependencies) +4. Then move to recall/syscalls, recall/executor + +## Branch Information + +**Branch Name:** `recall-migration` +**Based On:** `main` @ commit `984fc4a4` (feat: add f3 cert actor) +**Original Branch:** `ipc-recall` @ commit `567108af` (fix: non-determinism from actor debug flag) +**Gap:** 959 commits behind, 77 commits ahead + +## Components Ported So Far + +### ✅ Ported +- `recall/` directory structure (7 modules) +- Workspace dependencies added +- Documentation: + - `docs/ipc/recall-vote-tally.md` + - `docs/ipc/recall-migration-guide.md` + +### ⏳ Pending +- Recall actors (blobs, bucket, blob_reader, recall_config, timehub) +- VM integration (iroh_resolver) +- Application layer integration +- Contract updates +- Tests + +## Build Status + +**Current Error:** +``` +error: failed to load manifest for workspace member `/Users/philip/github/ipc/recall/executor` + +Caused by: + failed to parse manifest at `/Users/philip/github/ipc/recall/executor/Cargo.toml` + +Caused by: + cannot find `fendermint_actor_blobs_shared` in workspace +``` + +**Resolution:** Need to port actors first + +## Recommended Next Actions + +### Immediate (Today) +1. **Copy Recall actors from ipc-recall branch:** + ```bash + git checkout ipc-recall -- fendermint/actors/blobs/ + git checkout ipc-recall -- fendermint/actors/bucket/ + git checkout ipc-recall -- fendermint/actors/blob_reader/ + git checkout ipc-recall -- fendermint/actors/recall_config/ + ``` + +2. **Add actors to workspace Cargo.toml** + +3. **Test basic compilation:** + ```bash + cargo check -p recall_ipld + cargo check -p recall_kernel + cargo check -p fendermint_actor_blobs_shared + ``` + +### Short-term (This Week) +1. Fix FVM API compatibility issues in recall modules +2. Update contract binding imports in actor sol_facades +3. Port iroh_resolver VM component +4. Update chain interpreter for blob messages + +### Medium-term (Next Week) +1. Integration testing of uploaded → resolution → finalization flow +2. Genesis integration +3. Application layer (app.rs) updates +4. End-to-end testing + +## Risks & Mitigations + +### High Risk Items +1. **FVM 4.3 → 4.7.4 upgrade** + - **Risk:** API incompatibilities in kernel/executor + - **Mitigation:** Incremental testing, FVM changelog review + +2. **Iroh 0.35 compatibility** + - **Risk:** P2P layer might not work + - **Mitigation:** Test early, have fallback plan + +3. **Actor dependencies** + - **Risk:** Circular dependencies, complex build order + - **Mitigation:** Port in dependency order + +### Medium Risk Items +1. **Contract binding paths changed** + - **Mitigation:** Straightforward find/replace + +2. **Vote tally integration** + - **Mitigation:** Existing code in topdown/voting.rs + +## Key Decisions Made + +1. **Use incremental migration approach** rather than direct merge +2. **Start with recall/ modules** before fendermint components +3. **Use Iroh 0.35** (one version ahead of what recall branch had) +4. **Keep entanglement as external git dependency** + +## Timeline Estimate + +- **Phase 0 (Prep):** ✅ Complete (1 day) +- **Phase 1 (Core):** 🔄 In Progress (2-3 days remaining) +- **Phase 2 (Iroh):** ⏳ Not Started (2-3 days) +- **Phase 3 (Executor):** ⏳ Not Started (3-4 days) +- **Phase 4 (Actors):** ⏳ Not Started (5-7 days) +- **Phase 5+ (Integration):** ⏳ Not Started (8-10 days) + +**Total Estimated:** 6-8 weeks (realistic scenario) + +## Files Modified + +``` +Modified: + Cargo.toml (workspace configuration) + +Added: + recall/ (entire directory) + docs/ipc/recall-vote-tally.md (documentation) + docs/ipc/recall-migration-guide.md (documentation) + docs/ipc/recall-migration-status.md (this file) +``` + +## Useful Commands + +```bash +# Check status +git status + +# See what's in recall/ on ipc-recall +git show ipc-recall:recall/ + +# See what actors exist on ipc-recall +git show ipc-recall:fendermint/actors/ + +# Test compilation +cargo check -p recall_kernel + +# See dependency tree +cargo tree -p recall_kernel + +# Check for FVM usage +rg "fvm::" recall/ + +# View migration guide +code docs/ipc/recall-migration-guide.md +``` + +## Notes + +- All recall code uses FVM workspace dependencies, so will pick up FVM 4.7.4 +- Iroh bumped to 0.35 (was 0.34 in recall branch guide) +- Entanglement library hosted at github.com/recallnet/entanglement +- Some components will need iterative fixes as dependencies are resolved + +--- + +**Last Updated:** 2024-11-04 +**Status:** Phase 1 in progress +**Next Milestone:** Complete recall module compilation + diff --git a/docs/ipc/recall-vote-tally.md b/docs/ipc/recall-vote-tally.md new file mode 100644 index 0000000000..61c3c49a80 --- /dev/null +++ b/docs/ipc/recall-vote-tally.md @@ -0,0 +1,610 @@ +# Recall Storage: Vote Tally Mechanism + +## Overview + +The Recall storage layer (Basin network) uses a **weighted vote tally system** to achieve Byzantine Fault Tolerant (BFT) consensus on blob storage across the validator network. This document explains how validators vote on blob resolution and how the system determines when a blob has been successfully stored. + +## Table of Contents + +- [Core Concepts](#core-concepts) +- [Vote Tally Architecture](#vote-tally-architecture) +- [Voting Process](#voting-process) +- [Quorum Calculation](#quorum-calculation) +- [Vote Tallying Algorithm](#vote-tallying-algorithm) +- [Finalization Process](#finalization-process) +- [Security Guarantees](#security-guarantees) + +--- + +## Core Concepts + +### Validator Power + +Each validator in the network has a **voting weight** (also called "power") that corresponds to their stake in the network. Validators with higher stakes have proportionally more voting power when determining consensus. + +```rust +pub type Weight = u64; + +/// Current validator weights. These are the ones who will vote on the blocks, +/// so these are the weights that need to form a quorum. +power_table: TVar>, +``` + +### Vote Types + +When a validator attempts to download and verify a blob, it casts one of two vote types: + +- **Success Vote (`true`)**: The validator successfully downloaded and verified the blob from the source node +- **Failure Vote (`false`)**: The validator failed to download or verify the blob + +### Quorum Threshold + +The system requires a **supermajority** to finalize any decision. The quorum threshold is calculated as: + +``` +quorum_threshold = (total_voting_weight × 2 / 3) + 1 +``` + +This matches CometBFT's Byzantine Fault Tolerant consensus model and ensures the system can tolerate up to 1/3 of validators being malicious or offline. + +--- + +## Vote Tally Architecture + +The `VoteTally` structure maintains the state needed for consensus: + +```rust +pub struct VoteTally { + /// Current validator weights for voting + power_table: TVar>, + + /// Index votes received by blob + /// Maps: Blob -> Validator -> Vote (true=resolved, false=failed) + blob_votes: TVar>>, + + /// Pause flag to prevent vote additions during quorum calculation + pause_blob_votes: TVar, +} +``` + +### Key Features + +1. **Weighted Voting**: Each validator's vote is weighted by their stake +2. **Equivocation Prevention**: Validators cannot change a "resolved" vote to "failed" +3. **Concurrent Tallying**: Uses Software Transactional Memory (STM) for thread-safe operations +4. **Efficient Lookup**: Indexed by blob hash for fast quorum checks + +--- + +## Voting Process + +### 1. Blob Resolution Attempt + +When a validator picks up a blob from the "added" or "pending" queue, it attempts to download it from the specified source node: + +```rust +match client.resolve_iroh(task.hash(), size, source.id.into()).await { + Ok(Ok(())) => { + // Successfully downloaded and verified + tracing::debug!(hash = %task.hash(), "iroh blob resolved"); + atomically(|| task.set_resolved()).await; + + // Cast success vote + if add_own_vote( + task.hash(), + client, + vote_tally, + key, + subnet_id, + true, // resolved = true + to_vote, + ).await { + emit(BlobsFinalityVotingSuccess { + blob_hash: Some(task.hash().to_string()), + }); + } + } + Err(e) | Ok(Err(e)) => { + // Failed to download or verify + // Retry or cast failure vote after exhausting attempts + } +} +``` + +### 2. Vote Recording + +Each validator's vote is recorded with validation checks: + +```rust +pub fn add_blob_vote( + &self, + validator_key: K, + blob: O, + resolved: bool, +) -> StmResult> { + // Check if voting is paused during quorum calculation + if *self.pause_blob_votes.read()? { + retry()?; + } + + // Verify validator has voting power + if !self.has_power(&validator_key)? { + return abort(Error::UnpoweredValidator(validator_key)); + } + + let mut votes = self.blob_votes.read_clone()?; + let votes_for_blob = votes.entry(blob).or_default(); + + // Prevent equivocation: can't change "resolved" to "failed" + if let Some(existing_vote) = votes_for_blob.get(&validator_key) { + if *existing_vote { + return Ok(false); // Ignore later votes + } + } + + votes_for_blob.insert(validator_key, resolved); + self.blob_votes.write(votes)?; + + Ok(true) +} +``` + +### 3. Vote Propagation + +After recording their own vote, validators gossip it to peers via the P2P network: + +```rust +let vote = to_vote(vote_hash, resolved); +match VoteRecord::signed(&key, subnet_id, vote) { + Ok(vote) => { + let validator_key = ValidatorKey::from(key.public()); + + // Add to local tally + atomically_or_err(|| { + vote_tally.add_blob_vote( + validator_key.clone(), + vote_hash.as_bytes().to_vec(), + resolved, + ) + }).await; + + // Broadcast to peers + if let Err(e) = client.publish_vote(vote) { + tracing::error!(error = e.to_string(), "failed to publish vote"); + return false; + } + } +} +``` + +--- + +## Quorum Calculation + +### Standard Quorum (With Power Table) + +For subnets with a parent chain that provides validator power information: + +```rust +pub fn quorum_threshold(&self) -> Stm { + let total_weight: Weight = self.power_table.read().map(|pt| pt.values().sum())?; + + // Require 2/3 + 1 of total voting power + Ok(total_weight * 2 / 3 + 1) +} +``` + +**Example:** +- Total validator power: 100 +- Quorum threshold: (100 × 2 / 3) + 1 = 67 + +This means at least 67 units of voting power must agree for consensus. + +### Development Mode (Empty Power Table) + +For standalone/testing subnets without a parent chain: + +```rust +let quorum_threshold = if power_table.is_empty() { + 1 as Weight // At least one vote required +} else { + self.quorum_threshold()? +}; +``` + +--- + +## Vote Tallying Algorithm + +The system separately tallies votes for "resolved" and "failed" outcomes: + +```rust +pub fn find_blob_quorum(&self, blob: &O) -> Stm<(bool, bool)> { + self.pause_blob_votes.write(false)?; + + let votes = self.blob_votes.read()?; + let power_table = self.power_table.read()?; + let quorum_threshold = if power_table.is_empty() { + 1 as Weight + } else { + self.quorum_threshold()? + }; + + let mut resolved_weight = 0; + let mut failed_weight = 0; + let mut voters = im::HashSet::new(); + + let Some(votes_for_blob) = votes.get(blob) else { + return Ok((false, false)); // No votes yet + }; + + // Sum weighted votes + for (validator_key, resolved) in votes_for_blob { + if voters.insert(validator_key.clone()).is_none() { + // Get validator's current power (may be 0 if removed) + let power = if power_table.is_empty() { + 1 + } else { + power_table.get(validator_key).cloned().unwrap_or_default() + }; + + tracing::debug!("voter; key={}, power={}", validator_key.to_string(), power); + + if *resolved { + resolved_weight += power; + } else { + failed_weight += power; + } + } + } + + tracing::debug!( + resolved_weight, + failed_weight, + quorum_threshold, + "blob quorum; votes={}", + votes_for_blob.len() + ); + + // Check if either outcome reached quorum + if resolved_weight >= quorum_threshold { + Ok((true, true)) // Quorum reached: RESOLVED + } else if failed_weight >= quorum_threshold { + Ok((true, false)) // Quorum reached: FAILED + } else { + Ok((false, false)) // No quorum yet + } +} +``` + +### Return Values + +The function returns a tuple `(bool, bool)`: + +| Return Value | Meaning | +|--------------|---------| +| `(true, true)` | Quorum reached, blob **successfully stored** | +| `(true, false)` | Quorum reached, blob **failed to store** | +| `(false, false)` | No quorum reached yet, **keep waiting** | + +--- + +## Finalization Process + +### Proposing Finalization + +When a validator believes a blob has reached quorum, they can propose finalization in a block: + +```rust +ChainMessage::Ipc(IpcMessage::BlobFinalized(blob)) => { + // 1. Check if already finalized on-chain + let (is_blob_finalized, status) = + with_state_transaction(&mut state, |state| { + is_blob_finalized(state, blob.subscriber, blob.hash, blob.id.clone()) + })?; + + if is_blob_finalized { + tracing::warn!(hash = %blob.hash, "blob already finalized (status={:?})", status); + } + + // 2. Verify global quorum exists + let (is_globally_finalized, succeeded) = atomically(|| { + chain_env + .parent_finality_votes + .find_blob_quorum(&blob.hash.as_bytes().to_vec()) + }).await; + + if !is_globally_finalized { + tracing::warn!(hash = %blob.hash, "not globally finalized; rejecting"); + return Ok(false); + } + + // 3. Verify outcome matches proposal + if blob.succeeded != succeeded { + tracing::warn!( + hash = %blob.hash, + quorum = ?succeeded, + message = ?blob.succeeded, + "finalization mismatch; rejecting" + ); + return Ok(false); + } + + // 4. Accept proposal for inclusion in block + // ... +} +``` + +### On-Chain State Update + +Once finalized, the blob's status is updated in the Blobs Actor: + +- **If succeeded**: Status changes to `BlobStatus::Resolved` +- **If failed**: Status changes to `BlobStatus::Failed` + +The blob is then removed from the pending queues and recorded in the permanent state. + +--- + +## Security Guarantees + +### Byzantine Fault Tolerance + +The 2/3+1 quorum threshold provides BFT guarantees: + +- **Safety**: Can tolerate up to 1/3 Byzantine (malicious or faulty) validators +- **Liveness**: Can make progress as long as 2/3+ validators are online and honest + +### Equivocation Prevention + +The vote recording logic prevents validators from equivocating: + +```rust +if let Some(existing_vote) = votes_for_blob.get(&validator_key) { + if *existing_vote { + // A vote for "resolved" was already made, ignore later votes + return Ok(false); + } +} +``` + +Once a validator votes "resolved", they cannot later vote "failed" for the same blob. + +### Sybil Resistance + +Votes are weighted by stake, preventing Sybil attacks where an attacker creates many low-power validators. An attacker would need to control 1/3+ of the total stake to disrupt consensus. + +### Network Partition Tolerance + +If the network partitions: +- No partition can finalize blobs without 2/3+ of total voting power +- Once the partition heals, validators with the minority view will accept the majority chain + +--- + +## Vote Tally Flow Diagram + +``` +┌─────────────────────────────────────────────────────────────┐ +│ 1. Blob Added to Network │ +│ - Client uploads to their Iroh node │ +│ - Registers with Blobs Actor (on-chain) │ +│ - Blob enters "added" queue │ +└────────────────────┬────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 2. Validators Pick Up Blob │ +│ - Fetch from "added" queue │ +│ - Move to "pending" status │ +│ - Begin download attempt from source node │ +└────────────────────┬────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 3. Each Validator Casts Weighted Vote │ +│ ┌─────────────────┐ ┌─────────────────┐ │ +│ │ Download Success│ OR │ Download Failed │ │ +│ │ Vote: true │ │ Vote: false │ │ +│ │ Weight: stake │ │ Weight: stake │ │ +│ └─────────────────┘ └─────────────────┘ │ +└────────────────────┬────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 4. Votes Gossiped to Peers │ +│ - P2P network propagates signed votes │ +│ - Each validator updates their local tally │ +└────────────────────┬────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 5. Vote Tally Accumulation │ +│ resolved_weight = Σ(power of validators voting success) │ +│ failed_weight = Σ(power of validators voting failed) │ +│ quorum_threshold = (total_power × 2/3) + 1 │ +└────────────────────┬────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 6. Quorum Check │ +│ ┌──────────────────────┐ │ +│ │ resolved_weight │─ YES ──> Blob RESOLVED ✓ │ +│ │ >= quorum_threshold? │ │ +│ └──────────────────────┘ │ +│ ┌──────────────────────┐ │ +│ │ failed_weight │─ YES ──> Blob FAILED ✗ │ +│ │ >= quorum_threshold? │ │ +│ └──────────────────────┘ │ +│ │ │ +│ NO ──> Keep waiting for more votes │ +└────────────────────┬────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 7. Finalization Proposal │ +│ - Validator proposes BlobFinalized message │ +│ - Other validators verify quorum exists │ +│ - If consensus, include in block │ +└────────────────────┬────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 8. On-Chain State Update │ +│ - Blob status updated in Blobs Actor │ +│ - Removed from pending queue │ +│ - Subscription confirmed for subscriber │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +## Example Scenario + +### Network Setup + +``` +Validator A: Power = 40 +Validator B: Power = 35 +Validator C: Power = 25 +───────────────────────── +Total Power = 100 +Quorum Threshold = (100 × 2/3) + 1 = 67 +``` + +### Vote Progression for Blob `0xABCD...` + +**Time T1:** +``` +Validator A: ✓ resolved (weight: 40) +───────────────────────── +resolved_weight = 40 +failed_weight = 0 +Status: No quorum yet (40 < 67) +``` + +**Time T2:** +``` +Validator A: ✓ resolved (weight: 40) +Validator B: ✓ resolved (weight: 35) +───────────────────────── +resolved_weight = 75 +failed_weight = 0 +Status: QUORUM REACHED - RESOLVED ✓ +``` + +At T2, the blob can be finalized as successfully stored since `resolved_weight (75) >= quorum_threshold (67)`. + +### Alternative: Failure Scenario + +**Time T1:** +``` +Validator A: ✗ failed (weight: 40) +Validator C: ✗ failed (weight: 25) +───────────────────────── +resolved_weight = 0 +failed_weight = 65 +Status: No quorum yet (65 < 67) +``` + +**Time T2:** +``` +Validator A: ✗ failed (weight: 40) +Validator B: ✓ resolved (weight: 35) +Validator C: ✗ failed (weight: 25) +───────────────────────── +resolved_weight = 35 +failed_weight = 65 +Status: No quorum yet (neither reached 67) +``` + +In this scenario, no quorum is reached and the system waits for more validators to vote. + +--- + +## Implementation Notes + +### Concurrency Control + +The system uses Software Transactional Memory (STM) for thread-safe operations: + +```rust +// Atomic vote addition +let res = atomically_or_err(|| { + vote_tally.add_blob_vote( + validator_key.clone(), + vote_hash.as_bytes().to_vec(), + resolved, + ) +}).await; +``` + +### Pause Mechanism + +During quorum calculation, vote additions can be paused to prevent race conditions: + +```rust +pub fn pause_blob_votes_until_find_quorum(&self) -> Stm<()> { + self.pause_blob_votes.write(true) +} +``` + +The `find_blob_quorum` function automatically re-enables voting when complete. + +### Vote Cleanup + +Once a blob is finalized on-chain, votes are cleared to free memory: + +```rust +pub fn clear_blob(&self, blob: O) -> Stm<()> { + self.blob_votes.update_mut(|votes| { + votes.remove(&blob); + })?; + Ok(()) +} +``` + +--- + +## Metrics and Observability + +The system emits metrics for monitoring vote tally behavior: + +```rust +// Vote success/failure counters +BLOBS_FINALITY_VOTING_SUCCESS + .with_label_values(&[blob_hash]) + .inc(); + +BLOBS_FINALITY_VOTING_FAILURE + .with_label_values(&[blob_hash]) + .inc(); + +// Pending blob gauges +BLOBS_FINALITY_PENDING_BLOBS.set(pending_count as i64); +BLOBS_FINALITY_PENDING_BYTES.set(pending_bytes as i64); +``` + +These metrics help operators monitor: +- Vote distribution across blobs +- Time to reach quorum +- Failed vs. successful resolutions +- Queue sizes and backlogs + +--- + +## Related Documentation + +- [CometBFT Consensus](https://github.com/cometbft/cometbft) - The underlying BFT consensus algorithm +- [Iroh P2P Network](https://iroh.computer/) - The peer-to-peer blob transfer layer +- IPC Subnet Architecture - Parent-child chain relationship and validator power propagation +- Recall Storage Architecture - Overall system design + +--- + +## Conclusion + +The vote tally mechanism provides a robust, Byzantine Fault Tolerant method for achieving consensus on blob storage across the Recall network. By combining weighted voting, supermajority quorums, and equivocation prevention, the system ensures that blobs are only marked as "stored" when a sufficient majority of validators (by stake) have successfully downloaded and verified them. + +This design tolerates network partitions, validator failures, and up to 1/3 malicious actors while maintaining safety and liveness properties essential for a decentralized storage network. + diff --git a/docs/ipc/upgrade-strategy.md b/docs/ipc/upgrade-strategy.md new file mode 100644 index 0000000000..5b12f2bb1e --- /dev/null +++ b/docs/ipc/upgrade-strategy.md @@ -0,0 +1,1245 @@ +# IPC Upgrade Strategy: From Manual to Automated + +**Version:** 1.0 +**Date:** November 3, 2025 +**Status:** Planning + +## Executive Summary + +This document outlines a phased approach to evolve IPC's upgrade mechanism from manual coordination to fully automated, network-driven upgrades. The strategy addresses immediate needs (next 2 weeks) while building toward a production-grade, zero-coordination upgrade system over the next few months. + +### Key Requirements + +1. **Short-term (2 weeks):** Minimal downtime upgrades for IPC team-operated networks +2. **Medium-term (2-3 months):** Automated upgrades with "restart node and it upgrades" UX +3. **Long-term vision:** Network self-coordinates upgrades based on validator readiness +4. **Constraint:** No backward compatibility required; breaking changes acceptable with upgrade path +5. **Environment support:** Must work across testnet, mainnet, and private deployments + +--- + +## Current State Analysis + +### Two Independent Upgrade Systems + +#### 1. Smart Contract Upgrades (On-Chain Actors) + +**Components:** +- Gateway Diamond (singleton in every subnet) +- Subnet Actor Diamond (per-subnet logic in parent) +- Subnet Registry Diamond (factory contract) + +**Current Process:** +```bash +# Manual steps required: +1. Edit contract code in contracts/src/ +2. Convert subnet ID to ETH address via external tool (Beryx) +3. Set RPC_URL and PRIVATE_KEY environment variables +4. Run: make upgrade-sa-diamond SUBNET_ACTOR_ADDRESS=0x... NETWORK=calibrationnet +``` + +**Pain Points:** +- Requires private key holder to execute +- No coordination mechanism +- Manual address conversion +- No verification of success + +#### 2. Fendermint Binary Upgrades (Validator Nodes) + +**Current Mechanisms:** + +**A. UpgradeScheduler (State Migrations)** +- Hardcoded migrations compiled into binary +- Executed at predetermined block heights +- **Limitation:** Migrations must be known at compile time + +**B. halt_height (Binary Switching)** +```toml +# .fendermint/config/default.toml +halt_height = 10000 # Node exits with code 2 at this height +``` + +**Current Process:** +``` +1. Team discusses halt_height via Discord/Slack +2. Each operator manually edits config file +3. Each operator restarts Fendermint to load config +4. Wait for network to reach halt_height +5. All nodes halt simultaneously +6. Each operator manually: + - Stops process (if auto-restart enabled) + - Replaces binary + - Updates halt_height to 0 + - Restarts Fendermint +7. Network resumes +``` + +**Pain Points:** +- Requires out-of-band coordination (chat, email) +- Manual config editing on every node +- Requires process restarts before upgrade +- Simultaneous downtime for all nodes +- No verification all nodes upgraded +- No rollback mechanism +- High risk of human error +- If operator misses halt_height update, node becomes stuck + +--- + +## Phased Upgrade Strategy + +### Phase 1: Improved Manual Process (2 weeks) +**Goal:** Reduce downtime and coordination overhead for IPC team operations + +### Phase 2: Semi-Automated Coordination (2-3 months) +**Goal:** "Restart node with new binary, network handles the rest" UX + +### Phase 3: Network-Driven Upgrades (Future) +**Goal:** Network automatically schedules upgrades when quorum of nodes ready + +--- + +## Phase 1: Improved Manual Process + +**Timeline:** 2 weeks +**Target Users:** IPC team internal operations +**Downtime Goal:** < 30 seconds + +### 1.1 Upgrade Coordinator CLI Tool + +**New tool:** `ipc-cli upgrade` subcommands + +```bash +# Propose an upgrade (creates on-chain upgrade proposal) +ipc-cli upgrade propose \ + --height 15000 \ + --binary-url https://github.com/ipc/releases/v0.2.0/fendermint \ + --binary-hash sha256:abc123... \ + --contracts gateway,subnet-actor \ + --network calibration + +# Check upgrade status +ipc-cli upgrade status --network calibration + +# Signal node readiness (operator confirms binary downloaded) +ipc-cli upgrade ready --validator-address 0x... + +# Execute upgrade (updates contracts if specified) +ipc-cli upgrade execute --network calibration +``` + +**Benefits:** +- Single source of truth for upgrade plan +- Automated address conversion +- Built-in verification +- Coordination visible on-chain + +### 1.2 Upgrade Registry Smart Contract + +**New contract:** `UpgradeRegistry.sol` + +```solidity +struct UpgradeProposal { + uint64 id; + uint64 targetHeight; + bytes32 binaryHash; + string binaryUrl; + address proposer; + uint64 proposedAt; + bool executed; + mapping(address => bool) validatorReady; + uint64 readyCount; +} + +function proposeUpgrade( + uint64 targetHeight, + bytes32 binaryHash, + string calldata binaryUrl +) external returns (uint64 proposalId); + +function signalReady(uint64 proposalId) external; + +function getUpgradeStatus(uint64 proposalId) + external view returns (UpgradeProposal memory); +``` + +**Deployment:** +- One registry per subnet +- Gateway holds reference to current registry +- Can be upgraded via diamond pattern + +### 1.3 Fendermint Upgrade Monitor + +**New module:** `fendermint/app/src/upgrade_monitor.rs` + +```rust +pub struct UpgradeMonitor { + registry_contract: Address, + tendermint_client: TendermintClient, + current_proposal: Option, +} + +impl UpgradeMonitor { + // Query registry every N blocks + async fn check_for_upgrades(&self, current_height: BlockHeight); + + // Download and verify binary + async fn prepare_upgrade(&self, proposal: &UpgradeProposal) -> Result; + + // Update halt_height automatically + async fn set_halt_height(&self, height: BlockHeight) -> Result<()>; + + // Signal readiness after successful preparation + async fn signal_ready(&self, proposal_id: u64) -> Result<()>; +} +``` + +**Integration:** +- Runs as background task in Fendermint +- Queries registry every 100 blocks +- Auto-updates `halt_height` in memory (no config file edit needed) +- Logs all upgrade activities + +### 1.4 Process Flow (Phase 1) + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Step 1: Propose Upgrade (IPC Team Lead) │ +├─────────────────────────────────────────────────────────────┤ +│ $ ipc-cli upgrade propose --height 15000 --binary-url ... │ +│ ✓ Upgrade proposal #7 created │ +│ ✓ Target height: 15000 │ +│ ✓ Binary: v0.2.0 (sha256:abc123...) │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ Step 2: Fendermint Auto-Detects (All Validator Nodes) │ +├─────────────────────────────────────────────────────────────┤ +│ [INFO] Upgrade proposal #7 detected │ +│ [INFO] Downloading binary from IPFS... │ +│ [INFO] Verifying hash... ✓ │ +│ [INFO] Setting halt_height=15000 │ +│ [INFO] Signaling ready to registry │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ Step 3: Monitor Readiness (Anyone) │ +├─────────────────────────────────────────────────────────────┤ +│ $ ipc-cli upgrade status │ +│ Upgrade #7 (target height: 15000) │ +│ Ready: 4/4 validators (100%) │ +│ Current height: 14850 │ +│ ETA: ~2 minutes │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ Step 4: Automatic Halt (Block 15000) │ +├─────────────────────────────────────────────────────────────┤ +│ [INFO] Block 15000 reached │ +│ [INFO] Halting due to upgrade #7 │ +│ [INFO] Executing pre-upgrade tasks... │ +│ [INFO] Exiting with code 2 (upgrade halt) │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ Step 5: Binary Swap (Orchestrator or Manual) │ +├─────────────────────────────────────────────────────────────┤ +│ Option A: Manual (systemd, docker-compose, etc.) │ +│ - Operator updates binary in deployment config │ +│ - Restarts service │ +│ │ +│ Option B: Upgrade Orchestrator (planned Phase 2) │ +│ - Detects exit code 2 │ +│ - Swaps binary automatically │ +│ - Restarts Fendermint │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ Step 6: Resume (All Nodes) │ +├─────────────────────────────────────────────────────────────┤ +│ [INFO] Starting Fendermint v0.2.0 │ +│ [INFO] Detecting upgrade #7 completed │ +│ [INFO] Executing upgrade scheduler migrations... │ +│ [INFO] State migration completed │ +│ [INFO] Resuming consensus at height 15001 │ +└─────────────────────────────────────────────────────────────┘ +``` + +### 1.5 Implementation Tasks (Phase 1) + +1. **Create UpgradeRegistry contract** (2 days) + - Define schema + - Implement proposal/ready signaling + - Write tests + - Deploy to test networks + +2. **Add upgrade monitor to Fendermint** (3 days) + - Query registry contract + - Download/verify binaries + - Auto-update halt_height + - Signal readiness + +3. **Extend ipc-cli with upgrade commands** (2 days) + - `upgrade propose` + - `upgrade status` + - `upgrade ready` (manual signal if needed) + +4. **Integration testing** (2 days) + - 4-validator test network + - Simulate upgrade flow end-to-end + - Test failure scenarios + +5. **Documentation** (1 day) + - Operator guide + - Architecture docs + - Runbook for troubleshooting + +**Total:** ~10 days (2 weeks with buffer) + +--- + +## Phase 2: Semi-Automated Coordination + +**Timeline:** 2-3 months +**Target Users:** External subnet operators +**UX Goal:** Operator updates binary and restarts; network handles upgrade + +### 2.1 Upgrade Orchestrator (Cosmovisor-Style) + +**New binary:** `ipc-orchestrator` + +Wraps Fendermint process and manages lifecycle: + +```yaml +# orchestrator-config.yaml +fendermint: + binary_path: /usr/local/bin/fendermint + data_dir: ~/.fendermint + auto_download: true + binary_registry: ipfs://... + +upgrade: + auto_apply: true + backup_enabled: true + rollback_on_failure: true + max_downtime: 60s +``` + +**Features:** + +1. **Binary Management** + - Maintains directory of version binaries + - Downloads from IPFS/GitHub based on registry + - Verifies signatures and hashes + +2. **Automatic Upgrade Application** + - Monitors Fendermint exit codes + - Code 0: Normal exit + - Code 1: Error (don't restart) + - Code 2: Upgrade halt (apply upgrade) + +3. **Rollback Protection** + - Creates state backup before upgrade + - Sets timeout for new version (5 minutes) + - Reverts if new version fails to start + +4. **Health Monitoring** + - Checks if node is keeping up with consensus + - Alerts if node falls behind after upgrade + - Can trigger automatic rollback + +### 2.2 Enhanced Upgrade Proposals with Governance + +**Extended UpgradeRegistry:** + +```solidity +struct UpgradeProposal { + // ... existing fields ... + + // Governance fields + uint64 votingPeriod; + uint64 votingDeadline; + mapping(address => bool) votes; + uint64 yesVotes; + uint64 noVotes; + uint64 totalVotingPower; + + // Execution fields + uint64 executionWindow; // Blocks after targetHeight to complete + bytes migrationData; // Optional state migration params + + // Rollback + bool rolled_back; + string rollbackReason; +} + +function vote(uint64 proposalId, bool support) external; +function executeUpgrade(uint64 proposalId) external; +function rollbackUpgrade(uint64 proposalId, string calldata reason) external; +``` + +**Voting Mechanism:** +- Validators vote with voting power proportional to stake +- Proposal passes with 2/3+ majority +- Voting period: 7 days typical +- After passing, `targetHeight` set automatically + +### 2.3 Dynamic Upgrade Scheduling + +**Problem:** Hardcoded migrations in UpgradeScheduler aren't flexible + +**Solution:** Runtime-loadable upgrade handlers + +```rust +// fendermint/vm/interpreter/src/fvm/upgrades.rs + +pub enum UpgradeHandler { + // Existing: compiled-in function + Compiled(MigrationFunc), + + // New: WASM-based migration + Wasm { + code: Vec, + entry_point: String, + }, + + // New: Standard operations (no custom code) + Standard(StandardUpgrade), +} + +pub enum StandardUpgrade { + // Deploy new contract at address + DeployContract { + bytecode: Vec, + constructor_args: Vec, + }, + + // Upgrade existing contract + UpgradeContract { + address: Address, + new_code: Vec, + }, + + // Patch state (key-value updates) + PatchState { + updates: Vec<(Address, Vec, Vec)>, // (actor, key, value) + }, + + // No-op (binary upgrade only) + NoOp, +} +``` + +**Loading from UpgradeRegistry:** + +```rust +impl UpgradeMonitor { + async fn load_upgrade_handler(&self, proposal: &UpgradeProposal) + -> Result> + { + // Fetch migration data from proposal + let migration_type = proposal.migration_data.type; + + match migration_type { + MigrationType::Compiled => { + // Look up in built-in registry + get_compiled_migration(proposal.id) + } + MigrationType::Wasm => { + // Download WASM from IPFS + let wasm_code = ipfs_get(&proposal.migration_data.wasm_cid).await?; + Ok(UpgradeHandler::Wasm { + code: wasm_code, + entry_point: "migrate".to_string(), + }) + } + MigrationType::Standard => { + // Parse standard operations + let ops = decode_standard_ops(&proposal.migration_data.ops)?; + Ok(UpgradeHandler::Standard(ops)) + } + MigrationType::NoOp => { + Ok(UpgradeHandler::Standard(StandardUpgrade::NoOp)) + } + } + } +} +``` + +### 2.4 Operator Experience (Phase 2) + +**Before Upgrade (Operator):** + +```bash +# 1. Upgrade is proposed on-chain (by governance or admin) +# 2. Operator receives notification (email, Slack bot, etc.) +# 3. Operator reviews proposal + +$ ipc-orchestrator status +Current version: v0.1.5 +Pending upgrade: v0.2.0 + - Target height: 25000 (in ~5 days) + - Status: Approved by governance + - Required: Update binary before height 25000 + - Migration: Standard (deploy new contract) + +# 4. Operator updates config to auto-upgrade +$ ipc-orchestrator config set upgrade.auto_apply=true + +# That's it! Orchestrator handles the rest. +``` + +**During Upgrade (Automatic):** + +``` +[Height 24900] Orchestrator: Preparing for upgrade #12 +[Height 24900] Orchestrator: Downloading binary v0.2.0... +[Height 24900] Orchestrator: Binary verified (sha256:xyz789...) +[Height 24900] Orchestrator: Creating state backup... +[Height 24900] Orchestrator: Backup saved to ~/.fendermint/backups/upgrade-12 +[Height 24900] Orchestrator: Ready for upgrade +[Height 25000] Fendermint: Halting for upgrade #12 +[Height 25000] Fendermint: Exit code 2 +[Height 25000] Orchestrator: Detected upgrade halt +[Height 25000] Orchestrator: Swapping binary v0.1.5 → v0.2.0 +[Height 25000] Orchestrator: Starting Fendermint v0.2.0... +[Height 25001] Fendermint v0.2.0: Starting upgrade migration +[Height 25001] Fendermint v0.2.0: Deploying contract at 0xabc... +[Height 25001] Fendermint v0.2.0: Migration complete +[Height 25001] Fendermint v0.2.0: Resuming consensus +[Height 25002] Orchestrator: Health check passed +[Height 25002] Orchestrator: Upgrade #12 successful +``` + +**If Upgrade Fails:** + +``` +[Height 25001] Fendermint v0.2.0: Migration failed: contract deployment error +[Height 25001] Fendermint v0.2.0: Exit code 1 +[Height 25001] Orchestrator: ⚠️ New version failed to start +[Height 25001] Orchestrator: Initiating rollback... +[Height 25001] Orchestrator: Restoring state from backup +[Height 25001] Orchestrator: Swapping binary v0.2.0 → v0.1.5 +[Height 25001] Orchestrator: Starting Fendermint v0.1.5... +[Height 25002] Fendermint v0.1.5: Resuming consensus +[Height 25002] Orchestrator: ⚠️ Upgrade #12 rolled back +[Height 25002] Orchestrator: Signaling rollback to network... +``` + +### 2.5 Implementation Tasks (Phase 2) + +1. **Upgrade Orchestrator** (3 weeks) + - Process wrapper with lifecycle management + - Binary download/verification + - Backup/restore functionality + - Exit code monitoring + - Rollback logic + - Health checks + +2. **Enhanced UpgradeRegistry with Governance** (2 weeks) + - Voting mechanism + - Proposal lifecycle management + - Migration data storage + - Events for monitoring + +3. **Dynamic Upgrade Handlers** (2 weeks) + - WASM runtime integration + - Standard operation types + - Handler loading from registry + - Security sandboxing + +4. **Integration with Orchestrator** (1 week) + - Registry querying + - Automatic scheduling + - Readiness signaling + - Failure reporting + +5. **Testing & Validation** (2 weeks) + - Multi-node testnet upgrades + - Failure scenario testing + - Rollback testing + - Performance benchmarking + +6. **Documentation & Tooling** (1 week) + - Operator guide + - Upgrade proposal template + - Monitoring dashboards + - Alerting setup guide + +**Total:** ~11 weeks (~2.5 months) + +--- + +## Phase 3: Network-Driven Upgrades + +**Timeline:** Future (post-Phase 2) +**Goal:** Network self-coordinates based on validator readiness + +### 3.1 Readiness-Based Scheduling + +**Concept:** Don't set `targetHeight` in advance. Instead, network automatically schedules upgrade when enough validators signal readiness. + +```solidity +struct UpgradeProposal { + // ... existing fields ... + + // Readiness-based scheduling + uint64 readinessThreshold; // e.g., 67% (2/3 validators) + uint64 readinessDeadline; // If not ready by this height, cancel + uint64 schedulingWindow; // Hours between ready threshold and execution + + bool autoScheduled; + uint64 autoScheduledAt; + uint64 autoScheduledHeight; +} + +function checkAndSchedule(uint64 proposalId) external { + UpgradeProposal storage p = proposals[proposalId]; + + uint64 readyPower = calculateReadyVotingPower(proposalId); + uint64 totalPower = getTotalVotingPower(); + + if (readyPower * 100 / totalPower >= p.readinessThreshold) { + // Quorum reached! Schedule upgrade + uint64 currentHeight = block.number; + p.targetHeight = currentHeight + blocksInHours(p.schedulingWindow); + p.autoScheduled = true; + p.autoScheduledAt = block.timestamp; + p.autoScheduledHeight = currentHeight; + + emit UpgradeAutoScheduled(proposalId, p.targetHeight); + } +} +``` + +**Flow:** + +1. Upgrade proposed with `readinessThreshold=67%`, `schedulingWindow=24h` +2. Validators update binaries at their convenience +3. Each validator signals ready after successful binary download +4. When 67% ready, network automatically schedules upgrade in 24 hours +5. Remaining 33% have 24 hours to update or fall out of consensus + +### 3.2 Graceful Degradation for Late Upgraders + +**Problem:** What if validators miss the upgrade window? + +**Solution:** Extended compatibility window + +```rust +pub struct CompatibilityWindow { + /// Block height where upgrade executed + upgrade_height: BlockHeight, + + /// Blocks to allow old version to sync (grace period) + grace_period: u64, + + /// Old version can sync blocks but not validate + old_version_read_only: bool, +} + +impl Fendermint { + fn check_version_compatibility(&self, height: BlockHeight) -> Result { + if height < upgrade_height { + // Pre-upgrade blocks + Ok(VersionMode::Normal) + } else if height < upgrade_height + grace_period { + // Grace period: old version can sync but not validate + if self.version < required_version { + Ok(VersionMode::ReadOnly) + } else { + Ok(VersionMode::Normal) + } + } else { + // After grace period: must upgrade + if self.version < required_version { + Err(anyhow!("Version too old. Please upgrade to continue.")) + } else { + Ok(VersionMode::Normal) + } + } + } +} +``` + +**Validator Experience:** + +``` +Validator on old version after upgrade: + +[Height 30001] ⚠️ Network upgraded to v0.3.0 +[Height 30001] ⚠️ You are running v0.2.0 +[Height 30001] ⚠️ Entering read-only mode +[Height 30001] ℹ️ You can sync blocks but cannot validate +[Height 30001] ℹ️ Grace period: 1000 blocks (~8 hours) +[Height 30001] ℹ️ Upgrade before height 31001 to resume validation + +[Height 30500] ⚠️ Grace period remaining: 500 blocks (~4 hours) +[Height 30900] ⚠️ Grace period remaining: 100 blocks (~48 minutes) +[Height 30990] 🚨 Grace period remaining: 10 blocks (~5 minutes) + +[Height 31001] 🚨 Grace period expired +[Height 31001] 🚨 Shutting down. Please upgrade to v0.3.0. +``` + +### 3.3 Version Advertisement + +**Validators advertise version in consensus messages:** + +```rust +pub struct ValidatorInfo { + address: Address, + voting_power: u64, + binary_version: String, // e.g., "v0.3.0" + protocol_version: u64, // e.g., 3 +} + +// In CometBFT validator set +impl Validator { + fn to_tendermint_validator(&self) -> tendermint::Validator { + tendermint::Validator { + // ... standard fields ... + + // Custom field for version + extra: serde_json::to_vec(&ValidatorInfo { + address: self.address, + voting_power: self.power, + binary_version: env!("CARGO_PKG_VERSION").to_string(), + protocol_version: PROTOCOL_VERSION, + }).unwrap(), + } + } +} +``` + +**Network Dashboard:** + +``` +Subnet Validator Status + +Upgrade #15 (v0.3.0) - Auto-scheduling enabled +Ready: 8/12 validators (67%) ← Threshold: 67% +Status: ⚠️ Ready to schedule + +Ready Validators (8): + ✓ validator-1 v0.3.0 [Ready for 2 hours] + ✓ validator-2 v0.3.0 [Ready for 1 hour] + ✓ validator-3 v0.3.0 [Ready for 30 minutes] + ... + +Pending Validators (4): + ⏳ validator-9 v0.2.0 [Last seen: 2 mins ago] + ⏳ validator-10 v0.2.0 [Last seen: 5 mins ago] + ... + +⚡ Upgrade will auto-schedule in ~10 minutes if no more validators ready +📅 Estimated execution: 24 hours after scheduling +``` + +### 3.4 Implementation Tasks (Phase 3) + +This is a future phase, but high-level tasks: + +1. **Readiness-based scheduling logic** (2 weeks) +2. **Version advertisement in consensus** (2 weeks) +3. **Grace period & read-only mode** (2 weeks) +4. **Network monitoring dashboard** (1 week) +5. **Testing across scenarios** (2 weeks) + +**Total:** ~9 weeks + +--- + +## Smart Contract Upgrade Strategy + +Smart contract upgrades (Gateway, Subnet Actor, Registry) work differently from binary upgrades since they're on-chain state changes. + +### Current vs. Improved Flow + +**Current (Manual):** +```bash +1. Developer edits contracts/src/gateway/GatewayFacet.sol +2. Developer runs: make upgrade-gw-diamond NETWORK=calibration +3. Transaction sent from developer's wallet +4. Upgrade happens immediately (no coordination) +``` + +**Improved (Coordinated):** + +```bash +1. Developer edits contracts/src/gateway/GatewayFacet.sol +2. Developer proposes upgrade via registry: + $ ipc-cli upgrade propose-contract \ + --contract gateway \ + --facets GatewayFacet,CheckpointingFacet \ + --network calibration + +3. Registry emits event: ContractUpgradeProposed +4. Validators review bytecode diff (on-chain or via IPFS) +5. Validators vote (on-chain transaction) +6. If approved, scheduled for execution +7. Anyone can trigger execution after approval +``` + +### Coordinating Binary + Contract Upgrades + +Often both need to upgrade together. The upgraded Fendermint binary may depend on new contract interfaces. + +**Solution: Linked Upgrade Proposals** + +```solidity +struct UpgradeProposal { + // ... existing fields ... + + // Contract upgrades included in this proposal + address[] contractsToUpgrade; + bytes[] contractUpgradeData; + + // Execution order + bool upgradeContractsFirst; // true = contracts before halt +} +``` + +**Execution Flow:** + +``` +Proposal: Upgrade to v0.3.0 + new Gateway contract + +1. Proposal approved by governance +2. Ready threshold reached (67% validators) +3. Upgrade auto-scheduled for height 40000 + +[Height 39990] Pre-upgrade contract changes +[Height 39990] Execute contract upgrades (if upgradeContractsFirst=true) +[Height 39990] Gateway upgraded to v2 +[Height 39990] Subnet Actor upgraded to v2 + +[Height 40000] Binary upgrade halt +[Height 40000] Validators swap to Fendermint v0.3.0 +[Height 40000] Fendermint v0.3.0 starts +[Height 40000] Fendermint reads new contract interfaces ✓ +[Height 40001] Network resumes with both upgrades complete +``` + +--- + +## Migration Path from Current to Phase 1 + +### Week 1: Core Infrastructure + +**Day 1-2: UpgradeRegistry Contract** +``` +File: contracts/contracts/upgrade/UpgradeRegistry.sol +- Define proposal struct +- Implement propose/vote/signal ready +- Add query methods +- Write unit tests +``` + +**Day 3-4: Fendermint Upgrade Monitor** +``` +File: fendermint/app/src/upgrade/monitor.rs +- Query registry contract periodically +- Parse upgrade proposals +- Download/verify binaries +- Update halt_height dynamically +``` + +**Day 5: CLI Commands** +``` +File: ipc/cli/src/commands/upgrade/ +- upgrade propose +- upgrade status +- upgrade ready +``` + +### Week 2: Integration & Testing + +**Day 6-7: Integration Testing** +``` +- Deploy registry to test network +- 4-validator upgrade scenario +- Test failure cases +- Verify monitoring/alerting +``` + +**Day 8-9: Documentation** +``` +- docs/ipc/upgrade-guide.md +- docs/ipc/upgrade-operator-runbook.md +- Update README with upgrade info +``` + +**Day 10: Production Deployment** +``` +- Deploy UpgradeRegistry to Calibration testnet +- Update Fendermint binaries with monitor +- Announce new upgrade process +``` + +--- + +## Testing Strategy + +### Phase 1 Testing + +**Local 4-Validator Network:** +```bash +# scripts/test-upgrade.sh + +1. Start 4-validator testnet +2. Propose upgrade via CLI +3. Verify all nodes detect proposal +4. Verify all nodes download binary +5. Verify all nodes signal ready +6. Wait for halt_height +7. Verify all nodes halt with exit code 2 +8. Manually replace binaries +9. Verify all nodes resume +10. Verify state consistency +``` + +**Failure Scenarios:** +- One validator fails to download binary +- One validator halts early +- One validator doesn't halt +- Binary verification fails +- Network splits during upgrade + +### Phase 2 Testing + +**Automated Upgrade:** +- Orchestrator handles full upgrade cycle +- Test rollback on migration failure +- Test rollback on health check failure +- Test upgrade with contract changes + +**Governance:** +- Vote on upgrade proposal +- Vote rejection +- Vote timeout +- Emergency upgrade + +### Phase 3 Testing + +**Readiness-Based:** +- Auto-schedule when threshold reached +- Validators join after scheduling +- Validators miss upgrade window +- Grace period expiration + +--- + +## Monitoring & Observability + +### Metrics to Track + +**Upgrade Coordination:** +- `ipc_upgrade_proposal_count` - Total proposals created +- `ipc_upgrade_validators_ready` - Validators ready for upgrade +- `ipc_upgrade_time_to_ready` - Time from proposal to ready threshold +- `ipc_upgrade_completion_time` - Downtime duration + +**Binary Management:** +- `ipc_binary_download_duration` - Time to download binary +- `ipc_binary_verification_success` - Verification success rate +- `ipc_orchestrator_restarts` - Number of orchestrator restarts +- `ipc_upgrade_rollbacks` - Number of rollbacks + +**Consensus Health:** +- `ipc_consensus_lag` - Blocks behind after upgrade +- `ipc_validator_version_distribution` - Version distribution +- `ipc_upgrade_failures` - Failed upgrades + +### Alerting Rules + +```yaml +# alerts/upgrade.yml + +- alert: UpgradeProposalCreated + expr: increase(ipc_upgrade_proposal_count[5m]) > 0 + for: 1m + annotations: + summary: "New upgrade proposal #{{ $labels.proposal_id }}" + +- alert: ValidatorNotReady + expr: ipc_upgrade_validators_ready / ipc_total_validators < 0.67 + for: 1h + annotations: + summary: "Only {{ $value }}% validators ready for upgrade" + +- alert: UpgradeHaltImminent + expr: (ipc_upgrade_target_height - ipc_current_height) < 100 + for: 1m + annotations: + summary: "Upgrade halt in ~{{ $value }} blocks" + +- alert: UpgradeRollback + expr: increase(ipc_upgrade_rollbacks[5m]) > 0 + for: 1m + annotations: + summary: "⚠️ Upgrade rolled back on validator {{ $labels.validator }}" +``` + +--- + +## Security Considerations + +### Binary Verification + +**Problem:** Validators download binaries from IPFS/GitHub. How to prevent malicious binaries? + +**Solutions:** + +1. **Multi-signature Verification** + ``` + Binary must be signed by M of N core developers + Validators verify signatures before accepting + ``` + +2. **Reproducible Builds** + ``` + Build process documented + Validators can rebuild from source + Compare hash with distributed binary + ``` + +3. **Staged Rollout** + ``` + Deploy to testnet first + Monitor for 48 hours + Then deploy to mainnet + ``` + +### Migration Security + +**Problem:** WASM migrations in Phase 2/3 could be exploited + +**Solutions:** + +1. **Sandboxing** + ```rust + - Limit gas for migration execution + - Restrict syscalls (no network, limited file I/O) + - Read-only access to most state + - Explicit permissions for state modifications + ``` + +2. **Formal Verification** + ``` + Critical migrations reviewed by security auditor + Automated tests for common exploits + Require supermajority for WASM migrations (75% vs 67%) + ``` + +3. **Emergency Stop** + ```solidity + function emergencyHalt(uint64 proposalId, string reason) + external + onlyEmergencyMultisig + { + // Immediately cancel upgrade + // Broadcast halt to all validators + // Requires 3-of-5 emergency multisig + } + ``` + +--- + +## Cost-Benefit Analysis + +### Phase 1 Benefits +- ✅ Single source of truth for upgrades +- ✅ Eliminate manual config editing +- ✅ Reduce downtime from ~5 minutes to ~30 seconds +- ✅ Reduce operator errors +- ✅ Auditability (all upgrades on-chain) + +### Phase 1 Costs +- 🔨 2 weeks development +- 🔨 Additional on-chain storage (~1KB per proposal) +- 🔨 Network queries every 100 blocks (~negligible gas) + +### Phase 2 Benefits +- ✅ "Set and forget" operator experience +- ✅ Automatic rollback on failure +- ✅ Governance-driven upgrades +- ✅ Dynamic migrations (no recompilation) +- ✅ Supports external operators + +### Phase 2 Costs +- 🔨 2-3 months development +- 🔨 Additional operational complexity (orchestrator binary) +- 🔨 WASM runtime overhead (~5-10% during migration) +- 🔨 Increased on-chain data for migrations + +### Phase 3 Benefits +- ✅ Zero coordination overhead +- ✅ Self-healing network +- ✅ Gradual upgrades (late adopters have time) +- ✅ Production-grade UX + +### Phase 3 Costs +- 🔨 Additional 2-3 months development +- 🔨 More complex consensus logic +- 🔨 Grace period may delay finality for stragglers + +--- + +## Open Questions & Future Considerations + +### 1. Cross-Subnet Upgrade Coordination + +**Question:** If a parent subnet upgrades, should child subnets also upgrade? + +**Options:** +- A) Independent (children can run old version if compatible) +- B) Forced (parent upgrade triggers child upgrades) +- C) Coordinated (parent signals intent, children have window to upgrade) + +**Recommendation:** Option C with compatibility window + +### 2. Emergency Rollback Across Network + +**Question:** If 10% of validators fail to upgrade, should network roll back? + +**Options:** +- A) Continue with 90% (forking risk) +- B) Automatic rollback if <95% success +- C) Emergency governance vote to decide + +**Recommendation:** Option B with monitoring, Option C as override + +### 3. Multi-Version Consensus (Advanced) + +**Question:** Can network run multiple versions simultaneously? + +This is Phase 4+ territory, requires: +- Version-aware state transitions +- Backward-compatible consensus messages +- Complex testing matrix + +**Recommendation:** Defer until Phase 3 is proven in production + +### 4. Upgrade Scheduling Across Time Zones + +**Question:** Global validator set may prefer different upgrade windows + +**Solution:** Readiness-based scheduling (Phase 3) naturally handles this +- Validators in Europe ready first (morning) +- Validators in US ready next (their morning) +- Network schedules when threshold reached globally + +--- + +## Success Metrics + +### Phase 1 Success Criteria +- ✓ 100% of test upgrades succeed on testnet +- ✓ Average downtime < 60 seconds +- ✓ Zero manual config edits required +- ✓ All validators signal ready before halt + +### Phase 2 Success Criteria +- ✓ 95%+ of validators successfully auto-upgrade +- ✓ Rollback mechanism tested and working +- ✓ External subnet operators adopt new process +- ✓ Average downtime < 30 seconds + +### Phase 3 Success Criteria +- ✓ Network self-coordinates 90%+ of upgrades +- ✓ Late validators successfully sync during grace period +- ✓ No manual coordination needed +- ✓ Community operates upgrades without core team + +--- + +## Appendix A: Alternative Approaches Considered + +### A1. Hot Swapping (Rejected) + +**Idea:** Swap binary without halting node + +**Why Rejected:** +- Extremely complex (process isolation, state transfer) +- High risk of state corruption +- Not worth benefit for ~30 second downtime + +### A2. Blue-Green Validator Sets (Rejected) + +**Idea:** Two validator sets, upgrade one at a time + +**Why Rejected:** +- Requires 2x validators (expensive) +- Complex handoff logic +- Only eliminates downtime, not coordination problem + +### A3. Docker-Based Upgrades (Considered) + +**Idea:** Orchestrator pulls new Docker images + +**Why Considered:** +- Clean isolation +- Standard deployment pattern +- Easy rollback + +**Trade-offs:** +- Requires Docker (not all deployments use it) +- Slightly slower startup +- Additional dependency + +**Decision:** Support both Docker and binary-based in orchestrator + +--- + +## Appendix B: Glossary + +**halt_height:** Block height where Fendermint exits for upgrade + +**UpgradeScheduler:** Rust module that executes migrations at block heights + +**UpgradeRegistry:** Smart contract tracking upgrade proposals + +**Orchestrator:** Wrapper process managing Fendermint lifecycle + +**Migration:** State transformation executed during upgrade + +**Readiness threshold:** Percentage of validators needed to auto-schedule + +**Grace period:** Blocks where old version can sync but not validate + +**Diamond pattern:** EIP-2535 upgradable contract architecture + +**Binary hash:** Cryptographic hash verifying binary authenticity + +--- + +## Next Steps + +### Immediate Actions (This Week) + +1. **Review & Approve** this document with IPC team +2. **Create GitHub Issues** for Phase 1 tasks +3. **Set up test infrastructure** (4-validator testnet) +4. **Assign developers** to Phase 1 implementation + +### Week 1 Kickoff + +1. **Design review** for UpgradeRegistry contract +2. **Begin contract implementation** +3. **Set up monitoring/logging** for upgrade events +4. **Draft operator communications** for new process + +### Ongoing + +- Weekly sync on progress +- Update this doc as implementation reveals new requirements +- Gather operator feedback during Phase 1 +- Begin Phase 2 design during Phase 1 implementation + +--- + +**Document Maintainer:** IPC Core Team +**Last Updated:** November 3, 2025 +**Next Review:** After Phase 1 completion + diff --git a/fendermint/actors/Cargo.toml b/fendermint/actors/Cargo.toml index 153d52e9c3..d3cf7a5869 100644 --- a/fendermint/actors/Cargo.toml +++ b/fendermint/actors/Cargo.toml @@ -17,3 +17,12 @@ fendermint_actor_chainmetadata = { path = "chainmetadata", features = ["fil-acto fendermint_actor_f3_light_client = { path = "f3-light-client", features = ["fil-actor"] } fendermint_actor_gas_market_eip1559 = { path = "gas_market/eip1559", features = ["fil-actor"] } fendermint_actor_eam = { path = "eam", features = ["fil-actor"] } +fendermint_actor_init = { path = "init", features = ["fil-actor"] } +# Recall actors +fendermint_actor_adm = { path = "adm", features = ["fil-actor"] } +fendermint_actor_blobs = { path = "blobs", features = ["fil-actor"] } +fendermint_actor_blob_reader = { path = "blob_reader", features = ["fil-actor"] } +fendermint_actor_bucket = { path = "bucket", features = ["fil-actor"] } +fendermint_actor_machine = { path = "machine", features = ["fil-actor"] } +fendermint_actor_recall_config = { path = "recall_config", features = ["fil-actor"] } +fendermint_actor_timehub = { path = "timehub", features = ["fil-actor"] } diff --git a/fendermint/actors/adm/Cargo.toml b/fendermint/actors/adm/Cargo.toml new file mode 100644 index 0000000000..5e8e726230 --- /dev/null +++ b/fendermint/actors/adm/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "fendermint_actor_adm" +description = "ADM (Autonomous Data Management) actor for machine lifecycle management" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true, default-features = false } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +hex-literal = { workspace = true } +integer-encoding = { workspace = true } +log = { workspace = true } +multihash = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +recall_sol_facade = { workspace = true, features = ["machine"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_machine = { path = "../machine" } +recall_actor_sdk = { path = "../../../recall/actor_sdk" } + +[dev-dependencies] +fil_actors_runtime = { workspace = true, features = ["test_utils"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] + diff --git a/fendermint/actors/adm/src/ext.rs b/fendermint/actors/adm/src/ext.rs new file mode 100644 index 0000000000..03418ab8bf --- /dev/null +++ b/fendermint/actors/adm/src/ext.rs @@ -0,0 +1,56 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; + +pub mod init { + use super::*; + use cid::Cid; + use fvm_ipld_encoding::RawBytes; + use fvm_shared::address::Address; + + pub const EXEC_METHOD: u64 = 2; + + /// Init actor Exec Params. + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct ExecParams { + pub code_cid: Cid, + pub constructor_params: RawBytes, + } + + /// Init actor Exec Return value. + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct ExecReturn { + /// ID based address for created actor. + pub id_address: Address, + /// Reorg safe address for actor. + pub robust_address: Address, + } +} + +pub mod account { + pub const PUBKEY_ADDRESS_METHOD: u64 = 2; +} + +pub mod machine { + use super::*; + use fvm_shared::address::Address; + use std::collections::HashMap; + + pub const INIT_METHOD: u64 = 2; + + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct ConstructorParams { + /// The machine owner ID address. + pub owner: Address, + /// User-defined metadata. + pub metadata: HashMap, + } + + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct InitParams { + /// The machine ID address. + pub address: Address, + } +} diff --git a/fendermint/actors/adm/src/lib.rs b/fendermint/actors/adm/src/lib.rs new file mode 100644 index 0000000000..fe6805a595 --- /dev/null +++ b/fendermint/actors/adm/src/lib.rs @@ -0,0 +1,303 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; +use std::iter; + +use cid::Cid; +use ext::init::{ExecParams, ExecReturn}; +use fil_actors_runtime::{ + actor_dispatch_unrestricted, actor_error, deserialize_block, extract_send_result, + runtime::{builtins::Type, ActorCode, Runtime}, + ActorDowncast, ActorError, INIT_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_encoding::{ipld_block::IpldBlock, tuple::*, RawBytes}; +use fvm_shared::{address::Address, error::ExitCode, ActorID, METHOD_CONSTRUCTOR}; +use num_derive::FromPrimitive; +use recall_sol_facade::machine::Calls; + +// ADM actor ID as defined in fendermint/vm/actor_interface/src/adm.rs +pub const ADM_ACTOR_ID: ActorID = 17; + +use crate::sol_facade as sol; +use crate::sol_facade::{AbiCall, AbiCallRuntime, InputData}; +use crate::state::PermissionMode; +pub use crate::state::{Kind, Metadata, PermissionModeParams, State}; + +pub mod ext; +mod sol_facade; +mod state; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(AdmActor); + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + // Exported calls (computed via `frc42_dispatch::method_hash!` & hardcoded to avoid dependency issues) + CreateExternal = 1214262202, + UpdateDeployers = 1768606754, + ListMetadata = 2283215593, + GetMachineCode = 2892692559, //= frc42_dispatch::method_hash!("GetMachineCode"); + InvokeContract = 3844450837, //= frc42_dispatch::method_hash!("InvokeEVM") +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + pub machine_codes: HashMap, + pub permission_mode: PermissionModeParams, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct CreateExternalParams { + pub owner: Address, + pub kind: Kind, + pub metadata: HashMap, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, PartialEq, Eq)] +pub struct CreateExternalReturn { + pub actor_id: ActorID, + pub robust_address: Option
, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListMetadataParams { + pub owner: Address, +} + +fn create_machine( + rt: &impl Runtime, + owner: Address, + code_cid: Cid, + metadata: HashMap, +) -> Result { + let constructor_params = + RawBytes::serialize(ext::machine::ConstructorParams { owner, metadata })?; + let ret: ExecReturn = deserialize_block(extract_send_result(rt.send_simple( + &INIT_ACTOR_ADDR, + ext::init::EXEC_METHOD, + IpldBlock::serialize_cbor(&ExecParams { + code_cid, + constructor_params, + })?, + rt.message().value_received(), + ))?)?; + + // Initialize the machine with its address + let actor_id = ret.id_address.id().unwrap(); + let address = Address::new_id(actor_id); + extract_send_result(rt.send_simple( + &ret.id_address, + ext::machine::INIT_METHOD, + IpldBlock::serialize_cbor(&ext::machine::InitParams { address })?, + rt.message().value_received(), + ))?; + + Ok(CreateExternalReturn { + actor_id, + robust_address: Some(ret.robust_address), + }) +} + +fn ensure_deployer_allowed(rt: &impl Runtime) -> Result<(), ActorError> { + // The caller is guaranteed to be an ID address. + let caller_id = rt.message().caller().id().unwrap(); + + // Check if the caller is a contract. If it is, and we're in permissioned mode, + // then the contract was either there in genesis or has been deployed by a whitelisted + // account; in both cases it's been known up front whether it creates other contracts, + // and if that was undesireable it would not have been deployed as it is. + let code_cid = rt.get_actor_code_cid(&caller_id).expect("caller has code"); + if rt.resolve_builtin_actor_type(&code_cid) == Some(Type::EVM) { + return Ok(()); + } + + // Check if the caller is whitelisted. + let state: State = rt.state()?; + if !state.can_deploy(rt, caller_id)? { + return Err(ActorError::forbidden(String::from( + "sender not allowed to deploy contracts", + ))); + } + + Ok(()) +} + +pub struct AdmActor; + +impl AdmActor { + pub fn constructor(rt: &impl Runtime, args: ConstructorParams) -> Result<(), ActorError> { + let actor_id = rt.resolve_address(&rt.message().receiver()).unwrap(); + if actor_id != ADM_ACTOR_ID { + return Err(ActorError::forbidden(format!( + "The ADM must be deployed at {ADM_ACTOR_ID}, was deployed at {actor_id}" + ))); + } + rt.validate_immediate_caller_is(iter::once(&SYSTEM_ACTOR_ADDR))?; + + let st = State::new(rt.store(), args.machine_codes, args.permission_mode)?; + rt.create(&st) + } + + fn update_deployers(rt: &impl Runtime, deployers: Vec
) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + // Reject update if we're unrestricted. + let state: State = rt.state()?; + if !matches!(state.permission_mode, PermissionMode::AllowList(_)) { + return Err(ActorError::forbidden(String::from( + "deployers can only be updated in allowlist mode", + ))); + }; + + // Check that the caller is in the allowlist. + let caller_id = rt.message().caller().id().unwrap(); + if !state.can_deploy(rt, caller_id)? { + return Err(ActorError::forbidden(String::from( + "sender not allowed to update deployers", + ))); + } + + // Perform the update. + rt.transaction(|st: &mut State, rt| { + st.set_deployers(rt.store(), deployers).map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "failed to set deployers") + }) + })?; + + Ok(()) + } + + /// Create a new machine from off-chain. + pub fn create_external( + rt: &impl Runtime, + params: CreateExternalParams, + ) -> Result { + ensure_deployer_allowed(rt)?; + rt.validate_immediate_caller_accept_any()?; + + let owner_id = rt + .resolve_address(¶ms.owner) + .ok_or(ActorError::illegal_argument(format!( + "failed to resolve actor for address {}", + params.owner + )))?; + let owner = Address::new_id(owner_id); + let machine_code = Self::retrieve_machine_code(rt, params.kind)?; + let ret = create_machine(rt, owner, machine_code, params.metadata.clone())?; + let address = Address::new_id(ret.actor_id); + + // Save machine metadata. + rt.transaction(|st: &mut State, rt| { + st.set_metadata(rt.store(), owner, address, params.kind, params.metadata) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_ARGUMENT, + "failed to set machine metadata", + ) + }) + })?; + + Ok(ret) + } + + /// Returns a list of machine metadata by owner. + /// + /// Metadata includes machine kind and address. + pub fn list_metadata( + rt: &impl Runtime, + params: ListMetadataParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let owner_id = rt + .resolve_address(¶ms.owner) + .ok_or(ActorError::illegal_argument(format!( + "failed to resolve actor for address {}", + params.owner + )))?; + let owner_address = Address::new_id(owner_id); + + let st: State = rt.state()?; + let metadata = st.get_metadata(rt.store(), owner_address).map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "failed to get metadata") + })?; + Ok(metadata) + } + + fn invoke_contract( + rt: &impl Runtime, + params: sol::InvokeContractParams, + ) -> Result { + let input_data: InputData = params.try_into()?; + if sol::can_handle(&input_data) { + let output_data = match sol::parse_input(&input_data)? { + Calls::createBucket_0(call) => { + // function createBucket() external; + let params = call.params(rt); + let create_external_return = Self::create_external(rt, params)?; + call.returns(create_external_return) + } + Calls::createBucket_1(call) => { + // function createBucket(address owner, KeyValue[] memory metadata) external; + let params = call.params(); + let create_external_return = Self::create_external(rt, params)?; + call.returns(create_external_return) + } + Calls::createBucket_2(call) => { + // function createBucket(address owner) external; + let params = call.params(); + let create_external_return = Self::create_external(rt, params)?; + call.returns(create_external_return) + } + Calls::listBuckets_0(call) => { + let params = call.params(rt); + let list = Self::list_metadata(rt, params)?; + call.returns(list) + } + Calls::listBuckets_1(call) => { + let params = call.params(); + let list = Self::list_metadata(rt, params)?; + call.returns(list) + } + }; + Ok(sol::InvokeContractReturn { output_data }) + } else { + Err(actor_error!(illegal_argument, "invalid call".to_string())) + } + } + + pub fn get_machine_code(rt: &impl Runtime, kind: Kind) -> Result { + rt.validate_immediate_caller_accept_any()?; + Self::retrieve_machine_code(rt, kind) + } + + fn retrieve_machine_code(rt: &impl Runtime, kind: Kind) -> Result { + rt.state::()? + .get_machine_code(rt.store(), &kind)? + .ok_or(ActorError::not_found(format!( + "machine code for kind '{}' not found", + kind + ))) + } +} + +impl ActorCode for AdmActor { + type Methods = Method; + + fn name() -> &'static str { + "ADMAddressManager" + } + + actor_dispatch_unrestricted! { + Constructor => constructor, + CreateExternal => create_external, + UpdateDeployers => update_deployers, + ListMetadata => list_metadata, + GetMachineCode => get_machine_code, + InvokeContract => invoke_contract, + } +} diff --git a/fendermint/actors/adm/src/sol_facade.rs b/fendermint/actors/adm/src/sol_facade.rs new file mode 100644 index 0000000000..de653d9204 --- /dev/null +++ b/fendermint/actors/adm/src/sol_facade.rs @@ -0,0 +1,255 @@ +use fil_actors_runtime::runtime::Runtime; +use fil_actors_runtime::{actor_error, ActorError}; +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; +use fvm_ipld_encoding::{strict_bytes, tuple::*}; +use fvm_shared::address::Address; +use recall_sol_facade::machine as sol; +use recall_sol_facade::machine::{listBuckets_0Call, listBuckets_1Call, Calls}; +use recall_sol_facade::types::{Address as SolAddress, SolCall, SolInterface, H160}; +use std::collections::HashMap; + +use crate::{CreateExternalParams, CreateExternalReturn, Kind, ListMetadataParams, Metadata}; + +pub fn can_handle(input_data: &InputData) -> bool { + Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &InputData) -> Result { + Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +impl AbiCallRuntime for sol::createBucket_0Call { + type Params = CreateExternalParams; + type Returns = CreateExternalReturn; + type Output = Vec; + + fn params(&self, rt: &impl Runtime) -> Self::Params { + CreateExternalParams { + owner: rt.message().caller(), + kind: Kind::Bucket, + metadata: HashMap::default(), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let address = returns + .robust_address + .map(|address| H160::try_from(address).unwrap_or_default()) + .unwrap_or_default(); + let address: SolAddress = address.into(); + Self::abi_encode_returns(&(address,)) + } +} + +impl AbiCall for sol::createBucket_1Call { + type Params = CreateExternalParams; + type Returns = CreateExternalReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let owner: Address = H160::from(self.owner).into(); + let mut metadata = HashMap::with_capacity(self.metadata.len()); + for kv in self.metadata.clone() { + metadata.insert(kv.key, kv.value); + } + CreateExternalParams { + owner, + kind: Kind::Bucket, + metadata, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let address = returns + .robust_address + .map(|address| H160::try_from(address).unwrap_or_default()) + .unwrap_or_default(); + let address: SolAddress = address.into(); + Self::abi_encode_returns(&(address,)) + } +} + +impl AbiCall for sol::createBucket_2Call { + type Params = CreateExternalParams; + type Returns = CreateExternalReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let owner: Address = H160::from(self.owner).into(); + CreateExternalParams { + owner, + kind: Kind::Bucket, + metadata: HashMap::default(), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let address = returns + .robust_address + .map(|address| H160::try_from(address).unwrap_or_default()) + .unwrap_or_default(); + let address: SolAddress = address.into(); + Self::abi_encode_returns(&(address,)) + } +} + +impl AbiCallRuntime for listBuckets_0Call { + type Params = ListMetadataParams; + type Returns = Vec; + type Output = Vec; + + fn params(&self, rt: &impl Runtime) -> Self::Params { + ListMetadataParams { + owner: rt.message().caller(), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let machines: Vec = returns + .iter() + .map(|m| sol::Machine { + kind: sol_kind(m.kind), + addr: H160::try_from(m.address).unwrap_or_default().into(), + metadata: m + .metadata + .iter() + .map(|(k, v)| sol::KeyValue { + key: k.clone(), + value: v.clone(), + }) + .collect(), + }) + .collect(); + Self::abi_encode_returns(&(machines,)) + } +} + +impl AbiCall for listBuckets_1Call { + type Params = ListMetadataParams; + type Returns = Vec; + type Output = Vec; + + fn params(&self) -> Self::Params { + ListMetadataParams { + owner: H160::from(self.owner).into(), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let machines: Vec = returns + .iter() + .map(|m| sol::Machine { + kind: sol_kind(m.kind), + addr: H160::try_from(m.address).unwrap_or_default().into(), + metadata: m + .metadata + .iter() + .map(|(k, v)| sol::KeyValue { + key: k.clone(), + value: v.clone(), + }) + .collect(), + }) + .collect(); + Self::abi_encode_returns(&(machines,)) + } +} + +fn sol_kind(kind: Kind) -> u8 { + match kind { + Kind::Bucket => 0, + Kind::Timehub => 1, + } +} + +// --- Copied from recall_actor_sdk --- // + +#[derive(Default, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractParams { + #[serde(with = "strict_bytes")] + pub input_data: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractReturn { + #[serde(with = "strict_bytes")] + pub output_data: Vec, +} + +/// EVM call with selector (first 4 bytes) and calldata (remaining bytes) +pub struct InputData(Vec); + +impl InputData { + pub fn selector(&self) -> [u8; 4] { + let mut selector = [0u8; 4]; + selector.copy_from_slice(&self.0[0..4]); + selector + } + + pub fn calldata(&self) -> &[u8] { + &self.0[4..] + } +} + +impl TryFrom for InputData { + type Error = ActorError; + + fn try_from(value: InvokeContractParams) -> Result { + if value.input_data.len() < 4 { + return Err(ActorError::illegal_argument("input too short".to_string())); + } + Ok(InputData(value.input_data)) + } +} + +pub trait AbiCall { + type Params; + type Returns; + type Output; + fn params(&self) -> Self::Params; + fn returns(&self, returns: Self::Returns) -> Self::Output; +} + +pub trait AbiCallRuntime { + type Params; + type Returns; + type Output; + fn params(&self, rt: &impl fil_actors_runtime::runtime::Runtime) -> Self::Params; + fn returns(&self, returns: Self::Returns) -> Self::Output; +} + +#[derive(Debug, Clone)] +pub struct AbiEncodeError { + message: String, +} + +impl From for AbiEncodeError { + fn from(error: anyhow::Error) -> Self { + Self { + message: format!("failed to abi encode {}", error), + } + } +} + +impl From for AbiEncodeError { + fn from(message: String) -> Self { + Self { message } + } +} + +impl From for AbiEncodeError { + fn from(error: ActorError) -> Self { + Self { + message: format!("{}", error), + } + } +} + +impl From for ActorError { + fn from(error: AbiEncodeError) -> Self { + actor_error!(serialization, error.message) + } +} diff --git a/fendermint/actors/adm/src/state.rs b/fendermint/actors/adm/src/state.rs new file mode 100644 index 0000000000..1e6d0278d0 --- /dev/null +++ b/fendermint/actors/adm/src/state.rs @@ -0,0 +1,265 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::anyhow; +use cid::Cid; +use fil_actors_runtime::{runtime::Runtime, ActorError, Map2, MapKey, DEFAULT_HAMT_CONFIG}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, ActorID}; +use integer_encoding::VarInt; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fmt::Display; +use std::str::FromStr; + +type MachineCodeMap = Map2; +type DeployerMap = Map2; +type OwnerMap = Map2>; + +/// The args used to create the permission mode in storage. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum PermissionModeParams { + /// No restriction, everyone can deploy. + Unrestricted, + /// Only whitelisted addresses can deploy. + AllowList(Vec
), +} + +/// The permission mode for controlling who can deploy contracts. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum PermissionMode { + /// No restriction, everyone can deploy. + Unrestricted, + /// Only whitelisted addresses can deploy. + AllowList(Cid), // HAMT[Address]() +} + +/// The kinds of machines available. Their code Cids are given at genesis. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] +pub enum Kind { + /// An object storage bucket with S3-like key semantics. + Bucket, + /// An MMR timehub. + Timehub, +} + +impl MapKey for Kind { + fn from_bytes(b: &[u8]) -> Result { + if let Some((result, size)) = u64::decode_var(b) { + if size != b.len() { + return Err(format!("trailing bytes after varint in {:?}", b)); + } + match result { + 0 => Ok(Kind::Bucket), + 1 => Ok(Kind::Timehub), + _ => Err(format!("failed to decode kind from {}", result)), + } + } else { + Err(format!("failed to decode varint in {:?}", b)) + } + } + + fn to_bytes(&self) -> Result, String> { + let int = match self { + Self::Bucket => 0, + Self::Timehub => 1, + }; + Ok(int.encode_var_vec()) + } +} + +impl FromStr for Kind { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(match s { + "bucket" => Self::Bucket, + "timehub" => Self::Timehub, + _ => return Err(anyhow!("invalid machine kind")), + }) + } +} + +impl Display for Kind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self { + Self::Bucket => "bucket", + Self::Timehub => "timehub", + }; + write!(f, "{}", str) + } +} + +/// Machine metadata. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Metadata { + /// Machine kind. + pub kind: Kind, + /// Machine ID address. + pub address: Address, + /// User-defined data. + pub metadata: HashMap, +} + +/// ADM actor state representation. +#[derive(Debug, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// The root of a HAMT[u64]Cid containing available machine codes. + /// This is fixed at genesis. + pub machine_codes: Cid, + /// The permission mode controlling who can create machines. + /// This is fixed at genesis, but in allowlist mode, the set of deployers can be changed + /// by any member. + /// Modeled after the IPC EAM actor. + pub permission_mode: PermissionMode, + /// The root of a HAMT[Address]Vec containing address and kind metadata + /// keyed by owner robust address. + pub owners: Cid, +} + +impl State { + pub fn new( + store: &BS, + machine_codes: HashMap, + permission_mode: PermissionModeParams, + ) -> Result { + let mut machine_code_map = MachineCodeMap::empty(store, DEFAULT_HAMT_CONFIG, "machines"); + for (kind, code) in machine_codes { + machine_code_map.set(&kind, code)?; + } + let machine_codes = machine_code_map.flush()?; + + let permission_mode = match permission_mode { + PermissionModeParams::Unrestricted => PermissionMode::Unrestricted, + PermissionModeParams::AllowList(deployers) => { + let mut deployers_map = DeployerMap::empty(store, DEFAULT_HAMT_CONFIG, "deployers"); + for d in deployers { + deployers_map.set(&d, ())?; + } + PermissionMode::AllowList(deployers_map.flush()?) + } + }; + + let owners = OwnerMap::empty(store, DEFAULT_HAMT_CONFIG, "owners").flush()?; + + Ok(State { + machine_codes, + permission_mode, + owners, + }) + } + + pub fn get_machine_code( + &self, + store: &BS, + kind: &Kind, + ) -> Result, ActorError> { + let machine_code_map = + MachineCodeMap::load(store, &self.machine_codes, DEFAULT_HAMT_CONFIG, "machines")?; + let code = machine_code_map.get(kind).map(|c| c.cloned())?; + Ok(code) + } + + pub fn set_deployers( + &mut self, + store: &BS, + deployers: Vec
, + ) -> anyhow::Result<()> { + match self.permission_mode { + PermissionMode::Unrestricted => { + return Err(anyhow::anyhow!( + "cannot set deployers in unrestricted permission mode" + )); + } + PermissionMode::AllowList(_) => { + let mut deployers_map = DeployerMap::empty(store, DEFAULT_HAMT_CONFIG, "deployers"); + for d in deployers { + deployers_map.set(&d, ())?; + } + self.permission_mode = PermissionMode::AllowList(deployers_map.flush()?); + } + } + Ok(()) + } + + pub fn can_deploy(&self, rt: &impl Runtime, deployer: ActorID) -> Result { + Ok(match &self.permission_mode { + PermissionMode::Unrestricted => true, + PermissionMode::AllowList(cid) => { + let deployer_map = + DeployerMap::load(rt.store(), cid, DEFAULT_HAMT_CONFIG, "deployers")?; + let mut allowed = false; + deployer_map.for_each(|k, _| { + // Normalize allowed addresses to ID addresses, so we can compare any kind of allowlisted address. + // This includes f1, f2, f3, etc. + // We cannot normalize the allowlist at construction time because the addresses may not be bound to IDs yet (counterfactual usage). + // Unfortunately, API of Hamt::for_each won't let us stop iterating on match, so this is more wasteful than we'd like. We can optimize later. + // Hamt has implemented Iterator recently, but it's not exposed through Map2 (see ENG-800). + allowed = allowed || rt.resolve_address(&k) == Some(deployer); + Ok(()) + })?; + allowed + } + }) + } + + pub fn set_metadata( + &mut self, + store: &BS, + owner: Address, + address: Address, + kind: Kind, + metadata: HashMap, + ) -> anyhow::Result<()> { + let mut owner_map = OwnerMap::load(store, &self.owners, DEFAULT_HAMT_CONFIG, "owners")?; + let mut machine_metadata = owner_map + .get(&owner)? + .map(|machines| machines.to_owned()) + .unwrap_or_default(); + machine_metadata.push(Metadata { + kind, + address, + metadata, + }); + owner_map.set(&owner, machine_metadata)?; + self.owners = owner_map.flush()?; + Ok(()) + } + + pub fn get_metadata( + &self, + store: &BS, + owner: Address, + ) -> anyhow::Result> { + let owner_map = OwnerMap::load(store, &self.owners, DEFAULT_HAMT_CONFIG, "owners")?; + let metadata = owner_map + .get(&owner)? + .map(|m| m.to_owned()) + .unwrap_or_default(); + Ok(metadata) + } +} + +#[cfg(test)] +mod tests { + use cid::Cid; + + use crate::state::PermissionMode; + + #[test] + fn test_serialization() { + let p = PermissionMode::Unrestricted; + let v = fvm_ipld_encoding::to_vec(&p).unwrap(); + + let dp: PermissionMode = fvm_ipld_encoding::from_slice(&v).unwrap(); + assert_eq!(dp, p); + + let p = PermissionMode::AllowList(Cid::default()); + let v = fvm_ipld_encoding::to_vec(&p).unwrap(); + + let dp: PermissionMode = fvm_ipld_encoding::from_slice(&v).unwrap(); + assert_eq!(dp, p) + } +} diff --git a/fendermint/actors/adm_types/Cargo.toml b/fendermint/actors/adm_types/Cargo.toml new file mode 100644 index 0000000000..5200ca1097 --- /dev/null +++ b/fendermint/actors/adm_types/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "fil_actor_adm" +description = "ADM actor types and interface" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[dependencies] +serde = { workspace = true, features = ["derive"] } + diff --git a/fendermint/actors/adm_types/src/lib.rs b/fendermint/actors/adm_types/src/lib.rs new file mode 100644 index 0000000000..094802fdd1 --- /dev/null +++ b/fendermint/actors/adm_types/src/lib.rs @@ -0,0 +1,28 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! # fil_actor_adm - ADM Actor Types +//! +//! This crate provides the types and interface for the ADM (Autonomous Data Management) actor. +//! It's designed to be a lightweight dependency for actors that need to interact with ADM. + +use serde::{Deserialize, Serialize}; + +/// Types of machines that can be managed by ADM +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +pub enum Kind { + /// S3-like object storage with key-value semantics + Bucket, + /// MMR accumulator for timestamping + Timehub, +} + +impl std::fmt::Display for Kind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Kind::Bucket => write!(f, "bucket"), + Kind::Timehub => write!(f, "timehub"), + } + } +} diff --git a/fendermint/actors/blob_reader/Cargo.toml b/fendermint/actors/blob_reader/Cargo.toml new file mode 100644 index 0000000000..fda13b18b4 --- /dev/null +++ b/fendermint/actors/blob_reader/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "fendermint_actor_blob_reader" +description = "Singleton actor for reading blob bytes" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +fil_actors_runtime = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +frc42_dispatch = { workspace = true } +log = { workspace = true, features = ["std"] } +num-traits = { workspace = true } +num-derive = { workspace = true } +recall_sol_facade = { workspace = true, features = ["blob-reader"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_blobs_shared = { path = "../blobs/shared" } +recall_actor_sdk = { path = "../../../recall/actor_sdk" } +recall_ipld = { path = "../../../recall/ipld" } + +[dev-dependencies] +fendermint_actor_blobs_testing = { path = "../blobs/testing" } +fil_actors_evm_shared = { workspace = true } +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +hex-literal = { workspace = true } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/blob_reader/src/actor.rs b/fendermint/actors/blob_reader/src/actor.rs new file mode 100644 index 0000000000..98ec0c3952 --- /dev/null +++ b/fendermint/actors/blob_reader/src/actor.rs @@ -0,0 +1,384 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::bytes::B256; +use fil_actors_runtime::{ + actor_dispatch, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, FIRST_EXPORTED_METHOD_NUMBER, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::MethodNum; +use recall_actor_sdk::evm::emit_evm_event; + +use crate::shared::{ + CloseReadRequestParams, GetOpenReadRequestsParams, GetPendingReadRequestsParams, + GetReadRequestStatusParams, Method, OpenReadRequestParams, ReadRequestStatus, ReadRequestTuple, + SetReadRequestPendingParams, State, BLOB_READER_ACTOR_NAME, +}; +use crate::sol_facade::{ReadRequestClosed, ReadRequestOpened, ReadRequestPending}; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(ReadReqActor); + +pub struct ReadReqActor; + +impl ReadReqActor { + fn constructor(rt: &impl Runtime) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + let state = State::new(rt.store())?; + rt.create(&state) + } + + fn open_read_request( + rt: &impl Runtime, + params: OpenReadRequestParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let id = rt.transaction(|st: &mut State, _rt| { + st.open_read_request( + rt.store(), + params.hash, + params.offset, + params.len, + params.callback_addr, + params.callback_method, + ) + })?; + + emit_evm_event( + rt, + ReadRequestOpened { + id: &id, + blob_hash: ¶ms.hash, + read_offset: params.offset.into(), + read_length: params.len.into(), + callback: params.callback_addr, + method_num: params.callback_method, + }, + )?; + + Ok(id) + } + + fn get_read_request_status( + rt: &impl Runtime, + params: GetReadRequestStatusParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + let status = rt + .state::()? + .get_read_request_status(rt.store(), params.0)?; + Ok(status) + } + + fn get_open_read_requests( + rt: &impl Runtime, + params: GetOpenReadRequestsParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + rt.state::()?.get_read_requests_by_status( + rt.store(), + ReadRequestStatus::Open, + params.0, + ) + } + + fn get_pending_read_requests( + rt: &impl Runtime, + params: GetPendingReadRequestsParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + rt.state::()?.get_read_requests_by_status( + rt.store(), + ReadRequestStatus::Pending, + params.0, + ) + } + + fn set_read_request_pending( + rt: &impl Runtime, + params: SetReadRequestPendingParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + rt.transaction(|st: &mut State, _| st.set_read_request_pending(rt.store(), params.0))?; + emit_evm_event(rt, ReadRequestPending::new(¶ms.0)) + } + + fn close_read_request( + rt: &impl Runtime, + params: CloseReadRequestParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + rt.transaction(|st: &mut State, _| st.close_read_request(rt.store(), params.0))?; + emit_evm_event(rt, ReadRequestClosed::new(¶ms.0)) + } + + /// Fallback method for unimplemented method numbers. + pub fn fallback( + rt: &impl Runtime, + method: MethodNum, + _: Option, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + if method >= FIRST_EXPORTED_METHOD_NUMBER { + Ok(None) + } else { + Err(actor_error!(unhandled_message; "invalid method: {}", method)) + } + } +} + +impl ActorCode for ReadReqActor { + type Methods = Method; + + fn name() -> &'static str { + BLOB_READER_ACTOR_NAME + } + + actor_dispatch! { + Constructor => constructor, + + // User methods + OpenReadRequest => open_read_request, + + // System methods + GetReadRequestStatus => get_read_request_status, + GetOpenReadRequests => get_open_read_requests, + GetPendingReadRequests => get_pending_read_requests, + SetReadRequestPending => set_read_request_pending, + CloseReadRequest => close_read_request, + + _ => fallback, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::sol_facade::ReadRequestClosed; + use fendermint_actor_blobs_testing::new_hash; + use fil_actors_evm_shared::address::EthAddress; + use fil_actors_runtime::test_utils::{ + expect_empty, MockRuntime, ETHACCOUNT_ACTOR_CODE_ID, SYSTEM_ACTOR_CODE_ID, + }; + use fvm_ipld_encoding::ipld_block::IpldBlock; + use fvm_shared::address::Address; + use recall_actor_sdk::evm::to_actor_event; + + pub fn construct_and_verify() -> MockRuntime { + let rt = MockRuntime { + receiver: Address::new_id(10), + ..Default::default() + }; + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let result = rt + .call::(Method::Constructor as u64, None) + .unwrap(); + expect_empty(result); + rt.verify(); + rt.reset(); + rt + } + + fn expect_emitted_open_event(rt: &MockRuntime, params: &OpenReadRequestParams, id: &B256) { + let event = to_actor_event(ReadRequestOpened { + id, + blob_hash: ¶ms.hash, + read_offset: params.offset.into(), + read_length: params.len.into(), + callback: params.callback_addr, + method_num: params.callback_method, + }) + .unwrap(); + rt.expect_emitted_event(event); + } + + fn expect_emitted_pending_event(rt: &MockRuntime, params: &SetReadRequestPendingParams) { + let event = to_actor_event(ReadRequestPending::new(¶ms.0)).unwrap(); + rt.expect_emitted_event(event); + } + + fn expect_emitted_closed_event(rt: &MockRuntime, params: &CloseReadRequestParams) { + let event = to_actor_event(ReadRequestClosed::new(¶ms.0)).unwrap(); + rt.expect_emitted_event(event); + } + + #[test] + fn test_read_request_operations() { + let rt = construct_and_verify(); + + // Set up test addresses + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.set_origin(id_addr); + + // Create a test blob hash and callback details + let blob_hash = new_hash(1024).0; + let offset = 32u32; + let len = 1024u32; + let callback_method = 42u64; + + // Test opening a read request + rt.expect_validate_caller_any(); + let open_params = OpenReadRequestParams { + hash: blob_hash, + offset, + len, + callback_addr: f4_eth_addr, + callback_method, + }; + let expected_id = B256::from(1); + expect_emitted_open_event(&rt, &open_params, &expected_id); + let request_id = rt + .call::( + Method::OpenReadRequest as u64, + IpldBlock::serialize_cbor(&open_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + rt.verify(); + + // Test checking request status + rt.expect_validate_caller_any(); + let status_params = GetReadRequestStatusParams(request_id); + let result = rt + .call::( + Method::GetReadRequestStatus as u64, + IpldBlock::serialize_cbor(&status_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + assert!(matches!(result, Some(ReadRequestStatus::Open))); + rt.verify(); + + // Test getting open requests + rt.expect_validate_caller_any(); + let get_params = GetOpenReadRequestsParams(1); // Get just one request + let result = rt + .call::( + Method::GetOpenReadRequests as u64, + IpldBlock::serialize_cbor(&get_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + + assert_eq!(result.len(), 1); + let (req_id, req_blob_hash, req_offset, req_len, req_callback_addr, req_callback_method) = + &result[0]; + assert_eq!(req_id, &request_id); + assert_eq!(req_blob_hash, &blob_hash); + assert_eq!(req_offset, &offset); + assert_eq!(req_len, &len); + assert_eq!(req_callback_addr, &f4_eth_addr); + assert_eq!(req_callback_method, &callback_method); + rt.verify(); + + // Test setting request to pending + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let pending_params = SetReadRequestPendingParams(request_id); + expect_emitted_pending_event(&rt, &pending_params); + let result = rt.call::( + Method::SetReadRequestPending as u64, + IpldBlock::serialize_cbor(&pending_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Verify request is now pending + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); // Reset caller + rt.expect_validate_caller_any(); + let status_params = GetReadRequestStatusParams(request_id); + let result = rt + .call::( + Method::GetReadRequestStatus as u64, + IpldBlock::serialize_cbor(&status_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + assert!(matches!(result, Some(ReadRequestStatus::Pending))); + rt.verify(); + + // Test closing a request (requires system actor caller) + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let close_params = CloseReadRequestParams(request_id); + expect_emitted_closed_event(&rt, &close_params); + let result = rt.call::( + Method::CloseReadRequest as u64, + IpldBlock::serialize_cbor(&close_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Verify request no longer exists + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); // Reset caller + rt.expect_validate_caller_any(); + let status_params = GetReadRequestStatusParams(request_id); + let result = rt + .call::( + Method::GetReadRequestStatus as u64, + IpldBlock::serialize_cbor(&status_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + assert!(result.is_none()); + rt.verify(); + } + + #[test] + fn test_read_request_error_cases() { + let rt = construct_and_verify(); + + // Set up test addresses + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + // Test closing non-existent request + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let non_existent_request_id = B256([0u8; 32]); + let close_params = CloseReadRequestParams(non_existent_request_id); + let result = rt.call::( + Method::CloseReadRequest as u64, + IpldBlock::serialize_cbor(&close_params).unwrap(), + ); + assert!(result.is_err()); + rt.verify(); + + // Test closing request with the non-system caller + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let result = rt.call::( + Method::CloseReadRequest as u64, + IpldBlock::serialize_cbor(&close_params).unwrap(), + ); + assert!(result.is_err()); + rt.verify(); + } +} diff --git a/fendermint/actors/blob_reader/src/lib.rs b/fendermint/actors/blob_reader/src/lib.rs new file mode 100644 index 0000000000..a784389323 --- /dev/null +++ b/fendermint/actors/blob_reader/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod actor; +mod shared; +mod sol_facade; +mod state; + +pub use shared::*; diff --git a/fendermint/actors/blob_reader/src/shared.rs b/fendermint/actors/blob_reader/src/shared.rs new file mode 100644 index 0000000000..655806a6fd --- /dev/null +++ b/fendermint/actors/blob_reader/src/shared.rs @@ -0,0 +1,112 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt; + +use fendermint_actor_blobs_shared::bytes::B256; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, ActorID, MethodNum, METHOD_CONSTRUCTOR}; +use num_derive::FromPrimitive; +use serde::{Deserialize, Serialize}; + +pub use crate::state::State; + +pub const BLOB_READER_ACTOR_NAME: &str = "blob_reader"; +pub const BLOB_READER_ACTOR_ID: ActorID = 67; +pub const BLOB_READER_ACTOR_ADDR: Address = Address::new_id(BLOB_READER_ACTOR_ID); + +/// The status of a read request. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] +pub enum ReadRequestStatus { + /// Read request is open and waiting to be processed + #[default] + Open, + /// Read request is being processed + Pending, +} + +impl fmt::Display for ReadRequestStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ReadRequestStatus::Open => write!(f, "open"), + ReadRequestStatus::Pending => write!(f, "pending"), + } + } +} + +/// A request to read blob data. +#[derive(Clone, Debug, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct ReadRequest { + /// The hash of the blob to read data from. + pub blob_hash: B256, + /// The offset to start reading from. + pub offset: u32, + /// The length of data to read. + pub len: u32, + /// The address to call back when the read is complete. + pub callback_addr: Address, + /// The method to call back when the read is complete. + pub callback_method: MethodNum, + /// Status of the read request + pub status: ReadRequestStatus, +} + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + + // User methods + OpenReadRequest = frc42_dispatch::method_hash!("OpenReadRequest"), + + // System methods + GetReadRequestStatus = frc42_dispatch::method_hash!("GetReadRequestStatus"), + GetOpenReadRequests = frc42_dispatch::method_hash!("GetOpenReadRequests"), + GetPendingReadRequests = frc42_dispatch::method_hash!("GetPendingReadRequests"), + SetReadRequestPending = frc42_dispatch::method_hash!("SetReadRequestPending"), + CloseReadRequest = frc42_dispatch::method_hash!("CloseReadRequest"), +} + +/// Params for adding a read request. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct OpenReadRequestParams { + /// The hash of the blob to read. + pub hash: B256, + /// The offset to start reading from. + pub offset: u32, + /// The length of the read request. + pub len: u32, + /// The address to call back when the read is complete. + pub callback_addr: Address, + /// The method to call back when the read is complete. + pub callback_method: MethodNum, +} + +/// Params for getting read request status. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetReadRequestStatusParams(pub B256); + +/// Params for getting open read requests. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetOpenReadRequestsParams(pub u32); + +/// Params for getting pending read requests. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetPendingReadRequestsParams(pub u32); + +/// Params for setting a read request to pending. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SetReadRequestPendingParams(pub B256); + +/// Params for closing a read request. The ID of the read request. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct CloseReadRequestParams(pub B256); + +/// Return type for request queues. +pub type ReadRequestTuple = (B256, B256, u32, u32, Address, u64); diff --git a/fendermint/actors/blob_reader/src/sol_facade.rs b/fendermint/actors/blob_reader/src/sol_facade.rs new file mode 100644 index 0000000000..719de0a5f2 --- /dev/null +++ b/fendermint/actors/blob_reader/src/sol_facade.rs @@ -0,0 +1,66 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::bytes::B256; +use fvm_shared::{address::Address, MethodNum}; +use recall_actor_sdk::evm::TryIntoEVMEvent; +use recall_sol_facade::{blob_reader as sol, primitives::U256, types::H160}; + +pub struct ReadRequestOpened<'a> { + pub id: &'a B256, + pub blob_hash: &'a B256, + pub read_offset: u64, + pub read_length: u64, + pub callback: Address, + pub method_num: MethodNum, +} +impl TryIntoEVMEvent for ReadRequestOpened<'_> { + type Target = sol::Events; + + fn try_into_evm_event(self) -> Result { + let callback_address: H160 = self.callback.try_into()?; + Ok(sol::Events::ReadRequestOpened(sol::ReadRequestOpened { + id: self.id.0.into(), + blobHash: self.blob_hash.0.into(), + readOffset: U256::from(self.read_offset), + readLength: U256::from(self.read_length), + callbackAddress: callback_address.into(), + callbackMethod: U256::from(self.method_num), + })) + } +} + +pub struct ReadRequestPending<'a> { + pub id: &'a B256, +} +impl<'a> ReadRequestPending<'a> { + pub fn new(id: &'a B256) -> Self { + Self { id } + } +} +impl TryIntoEVMEvent for ReadRequestPending<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::ReadRequestPending(sol::ReadRequestPending { + id: self.id.0.into(), + })) + } +} + +pub struct ReadRequestClosed<'a> { + pub id: &'a B256, +} +impl<'a> ReadRequestClosed<'a> { + pub fn new(id: &'a B256) -> Self { + Self { id } + } +} +impl TryIntoEVMEvent for ReadRequestClosed<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::ReadRequestClosed(sol::ReadRequestClosed { + id: self.id.0.into(), + })) + } +} diff --git a/fendermint/actors/blob_reader/src/state.rs b/fendermint/actors/blob_reader/src/state.rs new file mode 100644 index 0000000000..1668808776 --- /dev/null +++ b/fendermint/actors/blob_reader/src/state.rs @@ -0,0 +1,176 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::bytes::B256; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use log::info; +use recall_ipld::hamt::{self, map::TrackedFlushResult}; + +use crate::shared::{ReadRequest, ReadRequestStatus, ReadRequestTuple}; + +const MAX_READ_REQUEST_LEN: u32 = 1024 * 1024; // 1MB + +/// The state represents all read requests. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// ReadRequests Hamt. + pub read_requests: ReadRequests, + /// Counter to sequence the requests + pub request_id_counter: u64, +} + +impl State { + pub fn new(store: &BS) -> Result { + let read_requests = ReadRequests::new(store)?; + Ok(State { + read_requests, + request_id_counter: 0, + }) + } + + pub fn open_read_request( + &mut self, + store: &BS, + blob_hash: B256, + offset: u32, + len: u32, + callback_addr: Address, + callback_method: u64, + ) -> Result { + // Validate length is not greater than the maximum allowed + if len > MAX_READ_REQUEST_LEN { + return Err(ActorError::illegal_argument(format!( + "read request length {} exceeds maximum allowed {}", + len, MAX_READ_REQUEST_LEN + ))); + } + + let request_id = self.next_request_id(); + let read_request = ReadRequest { + blob_hash, + offset, + len, + callback_addr, + callback_method, + status: ReadRequestStatus::Open, + }; + info!("opening a read request onchain: {:?}", request_id); + // will create a new request even if the request parameters are the same + let mut read_requests = self.read_requests.hamt(store)?; + self.read_requests + .save_tracked(read_requests.set_and_flush_tracked(&request_id, read_request)?); + Ok(request_id) + } + + pub fn get_read_request_status( + &self, + store: BS, + id: B256, + ) -> Result, ActorError> { + let read_requests = self.read_requests.hamt(store)?; + Ok(read_requests.get(&id)?.map(|r| r.status.clone())) + } + + pub fn get_read_requests_by_status( + &self, + store: BS, + status: ReadRequestStatus, + size: u32, + ) -> Result, ActorError> { + let read_requests = self.read_requests.hamt(store)?; + + let mut requests = Vec::new(); + read_requests.for_each(|id, request| { + if request.status == status && (requests.len() as u32) < size { + requests.push(( + id, + request.blob_hash, + request.offset, + request.len, + request.callback_addr, + request.callback_method, + )) + } + + Ok(()) + })?; + Ok(requests) + } + + /// Set a read request status to pending. + pub fn set_read_request_pending( + &mut self, + store: BS, + id: B256, + ) -> Result<(), ActorError> { + let mut read_requests = self.read_requests.hamt(store)?; + let mut request = read_requests + .get(&id)? + .ok_or_else(|| ActorError::not_found(format!("read request {} not found", id)))?; + + if !matches!(request.status, ReadRequestStatus::Open) { + return Err(ActorError::illegal_state(format!( + "read request {} is not in open state", + id + ))); + } + + request.status = ReadRequestStatus::Pending; + self.read_requests + .save_tracked(read_requests.set_and_flush_tracked(&id, request)?); + + Ok(()) + } + + pub fn close_read_request( + &mut self, + store: &BS, + request_id: B256, + ) -> Result<(), ActorError> { + if self.get_read_request_status(store, request_id)?.is_none() { + return Err(ActorError::not_found( + "cannot close read request, it does not exist".to_string(), + )); + } + + // remove the closed request + let mut read_requests = self.read_requests.hamt(store)?; + self.read_requests + .save_tracked(read_requests.delete_and_flush_tracked(&request_id)?.0); + Ok(()) + } + + fn next_request_id(&mut self) -> B256 { + self.request_id_counter += 1; + B256::from(self.request_id_counter) + } +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ReadRequests { + pub root: hamt::Root, + size: u64, +} + +impl ReadRequests { + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "read_requests")?; + Ok(Self { root, size: 0 }) + } + + pub fn hamt( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + pub fn save_tracked(&mut self, tracked_flush_result: TrackedFlushResult) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } +} diff --git a/fendermint/actors/blobs/Cargo.toml b/fendermint/actors/blobs/Cargo.toml new file mode 100644 index 0000000000..68d81032eb --- /dev/null +++ b/fendermint/actors/blobs/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "fendermint_actor_blobs" +description = "Singleton actor for blob management" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +cid = { workspace = true } +anyhow = { workspace = true } +fil_actors_runtime = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +log = { workspace = true, features = ["std"] } +num-traits = { workspace = true } +recall_sol_facade = { workspace = true, features = ["blobs", "credit", "gas"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_blobs_shared = { path = "./shared" } +fendermint_actor_recall_config_shared = { path = "../recall_config/shared" } +recall_actor_sdk = { path = "../../../recall/actor_sdk" } +recall_ipld = { path = "../../../recall/ipld" } + +# BLS signature verification +bls-signatures = { version = "0.13.1", default-features = false, features = ["blst"] } + +[dev-dependencies] +fil_actors_evm_shared = { workspace = true } +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +hex-literal = { workspace = true } +rand = { workspace = true } +cid = { workspace = true } + +fendermint_actor_blobs_testing = { path = "./testing" } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/blobs/shared/Cargo.toml b/fendermint/actors/blobs/shared/Cargo.toml new file mode 100644 index 0000000000..8dffa8b743 --- /dev/null +++ b/fendermint/actors/blobs/shared/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "fendermint_actor_blobs_shared" +description = "Shared resources for blobs" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +data-encoding = { workspace = true } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +serde = { workspace = true, features = ["derive"] } + +recall_ipld = { path = "../../../../recall/ipld" } + +[dev-dependencies] +blake3 = { workspace = true } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/blobs/shared/src/accounts.rs b/fendermint/actors/blobs/shared/src/accounts.rs new file mode 100644 index 0000000000..2348f2a9c9 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/accounts.rs @@ -0,0 +1,11 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod account; +mod params; +mod status; + +pub use account::*; +pub use params::*; +pub use status::*; diff --git a/fendermint/actors/blobs/shared/src/accounts/account.rs b/fendermint/actors/blobs/shared/src/accounts/account.rs new file mode 100644 index 0000000000..b93b6b213e --- /dev/null +++ b/fendermint/actors/blobs/shared/src/accounts/account.rs @@ -0,0 +1,33 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; + +use crate::credit::{Credit, CreditApproval}; + +/// The external (shared) view of an account. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Account { + /// Total size of all blobs managed by the account. + pub capacity_used: u64, + /// Current free credit in byte-blocks that can be used for new commitments. + pub credit_free: Credit, + /// Current committed credit in byte-blocks that will be used for debits. + pub credit_committed: Credit, + /// Optional default sponsor account address. + pub credit_sponsor: Option
, + /// The chain epoch of the last debit. + pub last_debit_epoch: ChainEpoch, + /// Credit approvals to other accounts from this account, keyed by receiver. + pub approvals_to: HashMap, + /// Credit approvals to this account from other accounts, keyed by sender. + pub approvals_from: HashMap, + /// The maximum allowed TTL for actor's blobs. + pub max_ttl: ChainEpoch, + /// The total token value an account has used to buy credits. + pub gas_allowance: TokenAmount, +} diff --git a/fendermint/actors/blobs/shared/src/accounts/params.rs b/fendermint/actors/blobs/shared/src/accounts/params.rs new file mode 100644 index 0000000000..68dc097ea5 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/accounts/params.rs @@ -0,0 +1,23 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use serde::{Deserialize, Serialize}; + +use super::AccountStatus; + +/// Params for setting account status. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct SetAccountStatusParams { + /// Address to set the account status for. + pub subscriber: Address, + /// Status to set. + pub status: AccountStatus, +} + +/// Params for getting an account. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetAccountParams(pub Address); diff --git a/fendermint/actors/blobs/shared/src/accounts/status.rs b/fendermint/actors/blobs/shared/src/accounts/status.rs new file mode 100644 index 0000000000..64b274b1bf --- /dev/null +++ b/fendermint/actors/blobs/shared/src/accounts/status.rs @@ -0,0 +1,40 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::clock::ChainEpoch; +use serde::{Deserialize, Serialize}; + +/// The status of an account. +/// This controls the max TTL that the user is allowed to set on their blobs. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] +pub enum AccountStatus { + // Default TTL. + #[default] + Default, + /// Reduced TTL. + Reduced, + /// Extended TTL. + Extended, +} + +impl AccountStatus { + /// Returns the max allowed TTL. + pub fn get_max_ttl(&self, default_max_ttl: ChainEpoch) -> ChainEpoch { + match self { + AccountStatus::Default => default_max_ttl, + AccountStatus::Reduced => 0, + AccountStatus::Extended => ChainEpoch::MAX, + } + } +} + +impl std::fmt::Display for AccountStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AccountStatus::Default => write!(f, "default"), + AccountStatus::Reduced => write!(f, "reduced"), + AccountStatus::Extended => write!(f, "extended"), + } + } +} diff --git a/fendermint/actors/blobs/shared/src/blobs.rs b/fendermint/actors/blobs/shared/src/blobs.rs new file mode 100644 index 0000000000..d7bf810c87 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs.rs @@ -0,0 +1,25 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use fvm_shared::address::Address; + +mod blob; +mod params; +mod status; +mod subscription; + +pub use blob::*; +pub use params::*; +pub use status::*; +pub use subscription::*; + +use crate::bytes::B256; + +/// Tuple representing a unique blob source. +pub type BlobSource = (Address, SubscriptionId, B256); + +/// The return type used when fetching "added" or "pending" blobs. +pub type BlobRequest = (B256, u64, HashSet); diff --git a/fendermint/actors/blobs/shared/src/blobs/blob.rs b/fendermint/actors/blobs/shared/src/blobs/blob.rs new file mode 100644 index 0000000000..b8f8f00144 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs/blob.rs @@ -0,0 +1,24 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::clock::ChainEpoch; + +use super::{BlobStatus, SubscriptionId}; +use crate::bytes::B256; + +/// The external (shared) view of a blob. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Blob { + /// The size of the content. + pub size: u64, + /// Blob metadata that contains information for blob recovery. + pub metadata_hash: B256, + /// Active subscribers (accounts) that are paying for the blob to expiry. + pub subscribers: HashMap, + /// Blob status. + pub status: BlobStatus, +} diff --git a/fendermint/actors/blobs/shared/src/blobs/params.rs b/fendermint/actors/blobs/shared/src/blobs/params.rs new file mode 100644 index 0000000000..0b6123802f --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs/params.rs @@ -0,0 +1,133 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use serde::{Deserialize, Serialize}; + +use super::{BlobStatus, SubscriptionId}; +use crate::bytes::B256; + +/// Params for adding a blob. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct AddBlobParams { + /// Address of the entity adding the blob. + pub from: Address, + /// Optional sponsor address. + /// Origin or caller must still have a delegation from a sponsor. + pub sponsor: Option
, + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Blob blake3 hash. + pub hash: B256, + /// Blake3 hash of the metadata to use for blob recovery. + pub metadata_hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// Blob size. + pub size: u64, + /// Blob time-to-live epochs. + /// If not specified, the current default TTL from the config actor is used. + pub ttl: Option, +} + +/// Params for getting a blob. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetBlobParams(pub B256); + +/// Params for getting blob status. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetBlobStatusParams { + /// The origin address that requested the blob. + /// This could be a wallet or machine. + pub subscriber: Address, + /// Blob blake3 hash. + pub hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, +} + +/// Params for getting added blobs. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetAddedBlobsParams(pub u32); + +/// Params for getting pending blobs. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetPendingBlobsParams(pub u32); + +/// Params for setting a blob to pending. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct SetBlobPendingParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// The address that requested the blob. + pub subscriber: Address, + /// Blob blake3 hash. + pub hash: B256, + /// Blob size. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, +} + +/// Params for finalizing a blob. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct FinalizeBlobParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// The address that requested the blob. + /// This could be a wallet or machine. + pub subscriber: Address, + /// Blob blake3 hash. + pub hash: B256, + /// Blob size. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// The status to set as final. + pub status: BlobStatus, + /// Aggregated BLS signature from node operators (48 bytes). + pub aggregated_signature: Vec, + /// Bitmap indicating which operators signed (bit position corresponds to operator index). + pub signer_bitmap: u128, +} + +/// Params for deleting a blob. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct DeleteBlobParams { + /// Account address that initiated the deletion. + pub from: Address, + /// Optional sponsor address. + /// Origin or caller must still have a delegation from a sponsor. + /// Must be used if the caller is the delegate who added the blob. + pub sponsor: Option
, + /// Blob blake3 hash. + pub hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, +} + +/// Params for overwriting a blob, i.e., deleting one and adding another. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct OverwriteBlobParams { + /// Blake3 hash of the blob to be deleted. + pub old_hash: B256, + /// Params for a new blob to add. + pub add: AddBlobParams, +} + +/// Params for trimming blob expiries. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct TrimBlobExpiriesParams { + /// Address to trim blob expiries for. + pub subscriber: Address, + /// Starting hash to trim expiries from. + pub starting_hash: Option, + /// Limit of blobs to trim expiries for. + /// This specifies the maximum number of blobs that will be examined for trimming. + pub limit: Option, +} diff --git a/fendermint/actors/blobs/shared/src/blobs/status.rs b/fendermint/actors/blobs/shared/src/blobs/status.rs new file mode 100644 index 0000000000..25435f3f80 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs/status.rs @@ -0,0 +1,30 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use serde::{Deserialize, Serialize}; + +/// The status of a blob. +#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] +pub enum BlobStatus { + /// Blob is added but not resolving. + #[default] + Added, + /// Blob is pending resolve. + Pending, + /// Blob was successfully resolved. + Resolved, + /// Blob resolution failed. + Failed, +} + +impl std::fmt::Display for BlobStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlobStatus::Added => write!(f, "added"), + BlobStatus::Pending => write!(f, "pending"), + BlobStatus::Resolved => write!(f, "resolved"), + BlobStatus::Failed => write!(f, "failed"), + } + } +} diff --git a/fendermint/actors/blobs/shared/src/blobs/subscription.rs b/fendermint/actors/blobs/shared/src/blobs/subscription.rs new file mode 100644 index 0000000000..6906d97d11 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs/subscription.rs @@ -0,0 +1,107 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::ActorError; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use recall_ipld::hamt::MapKey; +use serde::{Deserialize, Serialize}; + +use crate::bytes::B256; + +/// An object used to determine what [`Account`](s) are accountable for a blob, and for how long. +/// Subscriptions allow us to distribute the cost of a blob across multiple accounts that +/// have added the same blob. +#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Subscription { + /// Added block. + pub added: ChainEpoch, + /// Overlap with initial group expiry. + pub overlap: ChainEpoch, + /// Expiry block. + pub expiry: ChainEpoch, + /// Source Iroh node ID used for ingestion. + /// This might be unique to each instance of the same blob. + /// It's included here for record keeping. + pub source: B256, + /// The delegate origin that may have created the subscription via a credit approval. + pub delegate: Option
, + /// Whether the subscription failed due to an issue resolving the target blob. + pub failed: bool, +} + +/// User-defined identifier used to differentiate blob subscriptions for the same subscriber. +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct SubscriptionId { + inner: String, +} + +impl SubscriptionId { + /// Max ID length. + pub const MAX_LEN: usize = 64; + + /// Returns a new [`SubscriptionId`]. + pub fn new(value: &str) -> Result { + if value.len() > Self::MAX_LEN { + return Err(ActorError::illegal_argument(format!( + "subscription ID length is {} but must not exceed the maximum of {} characters", + value.len(), + Self::MAX_LEN + ))); + } + Ok(Self { + inner: value.to_string(), + }) + } +} + +impl From for String { + fn from(id: SubscriptionId) -> String { + id.inner + } +} + +impl TryFrom for SubscriptionId { + type Error = ActorError; + + fn try_from(value: String) -> Result { + Self::new(&value) + } +} + +impl std::fmt::Display for SubscriptionId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.inner.is_empty() { + write!(f, "default") + } else { + write!(f, "{}", self.inner) + } + } +} + +impl MapKey for SubscriptionId { + fn from_bytes(b: &[u8]) -> Result { + let inner = String::from_utf8(b.to_vec()).map_err(|e| e.to_string())?; + Self::new(&inner).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.inner.as_bytes().to_vec()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_subscription_id_length() { + let id_str = |len: usize| "a".repeat(len); + let id = SubscriptionId::new(&id_str(SubscriptionId::MAX_LEN)).unwrap(); + assert_eq!(id.inner, id_str(SubscriptionId::MAX_LEN)); + + let id = SubscriptionId::new(&id_str(SubscriptionId::MAX_LEN + 1)); + assert!(id.is_err()); + } +} diff --git a/fendermint/actors/blobs/shared/src/bytes.rs b/fendermint/actors/blobs/shared/src/bytes.rs new file mode 100644 index 0000000000..b61549ec38 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/bytes.rs @@ -0,0 +1,118 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::anyhow; +use data_encoding::{DecodeError, DecodeKind}; +use recall_ipld::hamt::MapKey; +use serde::{Deserialize, Serialize}; + +/// Container for 256 bits or 32 bytes. +#[derive( + Clone, Copy, Debug, Default, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize, +)] +#[serde(transparent)] +pub struct B256(pub [u8; 32]); + +impl AsRef<[u8]> for B256 { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl From<[u8; 32]> for B256 { + fn from(value: [u8; 32]) -> Self { + Self(value) + } +} + +impl From for [u8; 32] { + fn from(value: B256) -> Self { + value.0 + } +} + +impl From<&[u8; 32]> for B256 { + fn from(value: &[u8; 32]) -> Self { + Self(*value) + } +} + +impl TryFrom<&[u8]> for B256 { + type Error = anyhow::Error; + + fn try_from(slice: &[u8]) -> Result { + if slice.len() == 32 { + let mut array = [0u8; 32]; + array.copy_from_slice(slice); + Ok(Self(array)) + } else { + Err(anyhow!("hash slice must be exactly 32 bytes")) + } + } +} + +impl From for B256 { + fn from(value: u64) -> Self { + let mut padded = [0u8; 32]; + padded[24..].copy_from_slice(&value.to_be_bytes()); + Self(padded) + } +} + +impl std::str::FromStr for B256 { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + parse_array::<32>(s) + .map(Self::from) + .map_err(|e| anyhow::anyhow!(e)) + } +} + +/// Parse from a base32 string into a byte array +fn parse_array(input: &str) -> Result<[u8; N], DecodeError> { + data_encoding::BASE32_NOPAD + .decode(input.to_ascii_uppercase().as_bytes())? + .try_into() + .map_err(|_| DecodeError { + position: N, + kind: DecodeKind::Length, + }) +} + +impl std::fmt::Display for B256 { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut t = data_encoding::BASE32_NOPAD.encode(self.as_ref()); + t.make_ascii_lowercase(); + f.write_str(&t) + } +} + +impl MapKey for B256 { + fn from_bytes(b: &[u8]) -> Result { + b.try_into().map_err(|e: anyhow::Error| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.0.to_vec()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn test_display_parse_roundtrip() { + for i in 0..100 { + let b: B256 = blake3::hash(&[i]).as_bytes().into(); + let text = b.to_string(); + let b1 = text.parse::().unwrap(); + let b2 = B256::from_str(&text).unwrap(); + assert_eq!(b, b1); + assert_eq!(b, b2); + } + } +} diff --git a/fendermint/actors/blobs/shared/src/credit.rs b/fendermint/actors/blobs/shared/src/credit.rs new file mode 100644 index 0000000000..2a3b46b23a --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit.rs @@ -0,0 +1,19 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::econ::TokenAmount; + +mod allowance; +mod approval; +mod params; +mod token_rate; + +pub use allowance::*; +pub use approval::*; +pub use params::*; +pub use token_rate::*; + +/// Credit is counted the same way as tokens. +/// The smallest indivisible unit is 1 atto, and 1 credit = 1e18 atto credits. +pub type Credit = TokenAmount; diff --git a/fendermint/actors/blobs/shared/src/credit/allowance.rs b/fendermint/actors/blobs/shared/src/credit/allowance.rs new file mode 100644 index 0000000000..b462e4d5d4 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit/allowance.rs @@ -0,0 +1,44 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, econ::TokenAmount}; + +use crate::credit::Credit; + +/// Credit allowance for an account. +#[derive(Debug, Default, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct CreditAllowance { + /// The amount from the account. + pub amount: Credit, + /// The account's default sponsor. + pub sponsor: Option
, + /// The amount from the account's default sponsor. + pub sponsored_amount: Credit, +} + +impl CreditAllowance { + /// Returns the total allowance from self and default sponsor. + pub fn total(&self) -> Credit { + &self.amount + &self.sponsored_amount + } +} + +/// Gas allowance for an account. +#[derive(Debug, Default, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct GasAllowance { + /// The amount from the account. + pub amount: TokenAmount, + /// The account's default sponsor. + pub sponsor: Option
, + /// The amount from the account's default sponsor. + pub sponsored_amount: TokenAmount, +} + +impl GasAllowance { + /// Returns the total allowance from self and default sponsor. + pub fn total(&self) -> TokenAmount { + &self.amount + &self.sponsored_amount + } +} diff --git a/fendermint/actors/blobs/shared/src/credit/approval.rs b/fendermint/actors/blobs/shared/src/credit/approval.rs new file mode 100644 index 0000000000..397eb34b7d --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit/approval.rs @@ -0,0 +1,78 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::ActorError; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{clock::ChainEpoch, econ::TokenAmount}; + +use crate::credit::Credit; + +/// A credit approval from one account to another. +#[derive(Debug, Default, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct CreditApproval { + /// Optional credit approval limit. + pub credit_limit: Option, + /// Used to limit gas fee delegation. + pub gas_allowance_limit: Option, + /// Optional credit approval expiry epoch. + pub expiry: Option, + /// Counter for how much credit has been used via this approval. + pub credit_used: Credit, + /// Used to track gas fees paid for by the delegation + pub gas_allowance_used: TokenAmount, +} + +impl CreditApproval { + /// Returns a new credit approval. + pub fn new( + credit_limit: Option, + gas_allowance_limit: Option, + expiry: Option, + ) -> Self { + Self { + credit_limit, + gas_allowance_limit, + expiry, + ..Default::default() + } + } + + /// Validates whether the approval has enough allowance for the credit amount. + pub fn validate_credit_usage(&self, amount: &TokenAmount) -> Result<(), ActorError> { + if let Some(credit_limit) = self.credit_limit.as_ref() { + let unused = &(credit_limit - &self.credit_used); + if unused < amount { + return Err(ActorError::forbidden(format!( + "usage would exceed approval credit limit (available: {}; required: {})", + unused, amount + ))); + } + } + Ok(()) + } + + /// Validates whether the approval has enough allowance for the gas amount. + pub fn validate_gas_usage(&self, amount: &TokenAmount) -> Result<(), ActorError> { + if let Some(gas_limit) = self.gas_allowance_limit.as_ref() { + let unused = &(gas_limit - &self.gas_allowance_used); + if unused < amount { + return Err(ActorError::forbidden(format!( + "usage would exceed approval gas allowance (available: {}; required: {})", + unused, amount + ))); + } + } + Ok(()) + } + + /// Validates whether the approval has a valid expiration. + pub fn validate_expiration(&self, current_epoch: ChainEpoch) -> Result<(), ActorError> { + if let Some(expiry) = self.expiry { + if expiry <= current_epoch { + return Err(ActorError::forbidden("approval expired".into())); + } + } + Ok(()) + } +} diff --git a/fendermint/actors/blobs/shared/src/credit/params.rs b/fendermint/actors/blobs/shared/src/credit/params.rs new file mode 100644 index 0000000000..01f76a06a7 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit/params.rs @@ -0,0 +1,79 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use serde::{Deserialize, Serialize}; + +use super::Credit; + +/// Params for buying credits. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct BuyCreditParams(pub Address); + +/// Set credit sponsor. +/// If not present, the sponsor is unset. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SetSponsorParams(pub Option
); + +/// Params for updating credit. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct UpdateGasAllowanceParams { + /// Account address that initiated the update. + pub from: Address, + /// Optional account address that is sponsoring the update. + pub sponsor: Option
, + /// Token amount to add, which can be negative. + pub add_amount: TokenAmount, +} + +/// Params for approving credit. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ApproveCreditParams { + /// Account address that is receiving the approval. + pub to: Address, + /// Optional restriction on caller addresses, e.g., a bucket. + /// The receiver will only be able to use the approval via an allowlisted caller. + /// If not present, any caller is allowed. + pub caller_allowlist: Option>, + /// Optional credit approval limit. + /// If specified, the approval becomes invalid once the used credits reach the + /// specified limit. + pub credit_limit: Option, + /// Optional gas fee limit. + /// If specified, the approval becomes invalid once the used gas fees reach the + /// specified limit. + pub gas_fee_limit: Option, + /// Optional credit approval time-to-live epochs. + /// If specified, the approval becomes invalid after this duration. + pub ttl: Option, +} + +/// Params for revoking credit. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct RevokeCreditParams { + /// Account address whose approval is being revoked. + pub to: Address, + /// Optional caller address to remove from the caller allowlist. + /// If not present, the entire approval is revoked. + pub for_caller: Option
, +} + +/// Params for looking up a credit approval. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetCreditApprovalParams { + /// Account address that made the approval. + pub from: Address, + /// Account address that received the approval. + pub to: Address, +} + +/// Params for looking up credit allowance. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetGasAllowanceParams(pub Address); diff --git a/fendermint/actors/blobs/shared/src/credit/token_rate.rs b/fendermint/actors/blobs/shared/src/credit/token_rate.rs new file mode 100644 index 0000000000..6b816c3682 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit/token_rate.rs @@ -0,0 +1,157 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::ops::{Div, Mul}; + +use fvm_shared::{ + bigint::{BigInt, BigUint}, + econ::TokenAmount, +}; +use serde::{Deserialize, Serialize}; + +use super::Credit; + +/// TokenCreditRate determines how much atto credits can be bought by a certain amount of RECALL. +#[derive(Clone, Default, Debug, Serialize, Deserialize, Eq, PartialEq)] +pub struct TokenCreditRate { + rate: BigUint, +} + +impl TokenCreditRate { + pub const RATIO: u128 = 10u128.pow(18); + + pub fn from(rate: impl Into) -> Self { + Self { rate: rate.into() } + } + + pub fn rate(&self) -> &BigUint { + &self.rate + } +} + +impl std::fmt::Display for TokenCreditRate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.rate) + } +} + +impl Mul<&TokenCreditRate> for TokenAmount { + type Output = Credit; + + fn mul(self, rate: &TokenCreditRate) -> Self::Output { + let rate = BigInt::from(rate.rate.clone()); + (self * rate).div_floor(TokenCreditRate::RATIO) + } +} + +impl Div<&TokenCreditRate> for &Credit { + type Output = TokenAmount; + + fn div(self, rate: &TokenCreditRate) -> Self::Output { + #[allow(clippy::suspicious_arithmetic_impl)] + (self * TokenCreditRate::RATIO).div_floor(rate.rate.clone()) + } +} + +impl PartialOrd for TokenCreditRate { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for TokenCreditRate { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.rate.cmp(&other.rate) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_token_credit_rate() { + struct TestCase { + tokens: TokenAmount, + rate: TokenCreditRate, + expected: &'static str, + description: &'static str, + } + + let test_cases = vec![ + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(1usize), + expected: "0.000000000000000001", + description: "lower bound: 1 RECALL buys 1 atto credit", + }, + TestCase { + tokens: TokenAmount::from_nano(500000000), // 0.5 RECALL + rate: TokenCreditRate::from(1usize), + expected: "0.0", + description: "crossing lower bound. 0.5 RECALL cannot buy 1 atto credit", + }, + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(2usize), + expected: "0.000000000000000002", + description: "1 RECALL buys 2 atto credits", + }, + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(10u64.pow(18)), + expected: "1.0", + description: "1 RECALL buys 1 whole credit", + }, + TestCase { + tokens: TokenAmount::from_whole(50), + rate: TokenCreditRate::from(10u64.pow(18)), + expected: "50.0", + description: "50 RECALL buys 50 whole credits", + }, + TestCase { + tokens: TokenAmount::from_nano(233432100u64), + rate: TokenCreditRate::from(10u64.pow(18)), + expected: "0.2334321", + description: "0.2334321 RECALL buys 0.2334321 credits", + }, + TestCase { + tokens: TokenAmount::from_nano(233432100u64), + rate: TokenCreditRate::from(10u128.pow(36)), + expected: "233432100000000000.0", + description: "0.2334321 RECALL buys 233432100000000000 credits", + }, + TestCase { + tokens: TokenAmount::from_atto(1), // 1 attoRECALL + rate: TokenCreditRate::from(10u128.pow(36)), + expected: "1.0", + description: "1 atto RECALL buys 1 credit", + }, + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(10u128.pow(18).div(4)), + expected: "0.25", + description: "1 RECALL buys 0.25 credit", + }, + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(10u128.pow(18).div(3)), + expected: "0.333333333333333333", + description: "1 RECALL buys 0.333333333333333333 credit", + }, + ]; + + for t in test_cases { + let credits = t.tokens.clone() * &t.rate; + assert_eq!( + t.expected, + credits.to_string(), + "tc: {}, {}, {}", + t.description, + t.tokens, + t.rate + ); + } + } +} diff --git a/fendermint/actors/blobs/shared/src/lib.rs b/fendermint/actors/blobs/shared/src/lib.rs new file mode 100644 index 0000000000..b5d78a0992 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/lib.rs @@ -0,0 +1,54 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; +use fvm_shared::econ::TokenAmount; +use fvm_shared::{address::Address, ActorID}; + +use crate::credit::{Credit, TokenCreditRate}; + +pub mod accounts; +pub mod blobs; +pub mod bytes; +pub mod credit; +pub mod method; +pub mod operators; +pub mod sdk; + +/// The unique identifier for the blob actor in the system. +pub const BLOBS_ACTOR_ID: ActorID = 66; +/// The address of the blob actor, derived from its actor ID. +pub const BLOBS_ACTOR_ADDR: Address = Address::new_id(BLOBS_ACTOR_ID); + +/// The stats of the blob actor. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetStatsReturn { + /// The current token balance earned by the subnet. + pub balance: TokenAmount, + /// The total free storage capacity of the subnet. + pub capacity_free: u64, + /// The total used storage capacity of the subnet. + pub capacity_used: u64, + /// The total number of credits sold in the subnet. + pub credit_sold: Credit, + /// The total number of credits committed to active storage in the subnet. + pub credit_committed: Credit, + /// The total number of credits debited in the subnet. + pub credit_debited: Credit, + /// The token to credit rate. + pub token_credit_rate: TokenCreditRate, + /// Total number of debit accounts. + pub num_accounts: u64, + /// Total number of actively stored blobs. + pub num_blobs: u64, + /// Total number of blobs that are not yet added to the validator's resolve pool. + pub num_added: u64, + // Total bytes of all blobs that are not yet added to the validator's resolve pool. + pub bytes_added: u64, + /// Total number of currently resolving blobs. + pub num_resolving: u64, + /// Total bytes of all currently resolving blobs. + pub bytes_resolving: u64, +} diff --git a/fendermint/actors/blobs/shared/src/method.rs b/fendermint/actors/blobs/shared/src/method.rs new file mode 100644 index 0000000000..3718f09132 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/method.rs @@ -0,0 +1,49 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + + // EVM Interop + InvokeContract = frc42_dispatch::method_hash!("InvokeEVM"), + + // User methods + BuyCredit = frc42_dispatch::method_hash!("BuyCredit"), + ApproveCredit = frc42_dispatch::method_hash!("ApproveCredit"), + RevokeCredit = frc42_dispatch::method_hash!("RevokeCredit"), + SetAccountSponsor = frc42_dispatch::method_hash!("SetAccountSponsor"), + GetAccount = frc42_dispatch::method_hash!("GetAccount"), + GetCreditApproval = frc42_dispatch::method_hash!("GetCreditApproval"), + AddBlob = frc42_dispatch::method_hash!("AddBlob"), + GetBlob = frc42_dispatch::method_hash!("GetBlob"), + DeleteBlob = frc42_dispatch::method_hash!("DeleteBlob"), + OverwriteBlob = frc42_dispatch::method_hash!("OverwriteBlob"), + + // System methods + GetGasAllowance = frc42_dispatch::method_hash!("GetGasAllowance"), + UpdateGasAllowance = frc42_dispatch::method_hash!("UpdateGasAllowance"), + GetBlobStatus = frc42_dispatch::method_hash!("GetBlobStatus"), + GetAddedBlobs = frc42_dispatch::method_hash!("GetAddedBlobs"), + GetPendingBlobs = frc42_dispatch::method_hash!("GetPendingBlobs"), + SetBlobPending = frc42_dispatch::method_hash!("SetBlobPending"), + FinalizeBlob = frc42_dispatch::method_hash!("FinalizeBlob"), + DebitAccounts = frc42_dispatch::method_hash!("DebitAccounts"), + + // Admin methods + SetAccountStatus = frc42_dispatch::method_hash!("SetAccountStatus"), + TrimBlobExpiries = frc42_dispatch::method_hash!("TrimBlobExpiries"), + + // Metrics methods + GetStats = frc42_dispatch::method_hash!("GetStats"), + + // Node operator methods + RegisterNodeOperator = frc42_dispatch::method_hash!("RegisterNodeOperator"), + GetOperatorInfo = frc42_dispatch::method_hash!("GetOperatorInfo"), + GetActiveOperators = frc42_dispatch::method_hash!("GetActiveOperators"), +} diff --git a/fendermint/actors/blobs/shared/src/operators.rs b/fendermint/actors/blobs/shared/src/operators.rs new file mode 100644 index 0000000000..e612958276 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/operators.rs @@ -0,0 +1,41 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; + +/// Parameters for registering a node operator +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct RegisterNodeOperatorParams { + /// BLS public key (must be 48 bytes) + pub bls_pubkey: Vec, + /// RPC URL where the operator's node can be queried for signatures + pub rpc_url: String, +} + +/// Parameters for getting operator information +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetOperatorInfoParams { + /// Address of the operator + pub address: Address, +} + +/// Return type for getting operator information +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct OperatorInfo { + /// BLS public key + pub bls_pubkey: Vec, + /// RPC URL + pub rpc_url: String, + /// Whether the operator is active + pub active: bool, +} + +/// Return type for getting all active operators +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetActiveOperatorsReturn { + /// Ordered list of active operator addresses + /// Index in this list corresponds to bit position in signature bitmap + pub operators: Vec
, +} diff --git a/fendermint/actors/blobs/shared/src/sdk.rs b/fendermint/actors/blobs/shared/src/sdk.rs new file mode 100644 index 0000000000..77bd816270 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/sdk.rs @@ -0,0 +1,97 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::{deserialize_block, extract_send_result, runtime::Runtime, ActorError}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::{address::Address, sys::SendFlags, MethodNum}; + +use crate::{ + blobs::{ + AddBlobParams, Blob, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, Subscription, + }, + credit::{CreditApproval, GetCreditApprovalParams}, + method::Method, + BLOBS_ACTOR_ADDR, +}; + +/// Returns a credit approval from one account to another if it exists. +pub fn get_credit_approval( + rt: &impl Runtime, + from: Address, + to: Address, +) -> Result, ActorError> { + let params = GetCreditApprovalParams { from, to }; + + deserialize_block(extract_send_result(rt.send( + &BLOBS_ACTOR_ADDR, + Method::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(¶ms)?, + rt.message().value_received(), + None, + SendFlags::READ_ONLY, + ))?) +} + +/// Returns `true` if `from` and `to` are the same address, +/// or if `from` has a credit delegation to `to` that has not yet expired. +pub fn has_credit_approval( + rt: &impl Runtime, + from: Address, + to: Address, +) -> Result { + if from != to { + let approval = get_credit_approval(rt, from, to)?; + let curr_epoch = rt.curr_epoch(); + Ok(approval.is_some_and(|a| a.expiry.is_none_or(|e| e >= curr_epoch))) + } else { + Ok(true) + } +} + +/// Adds a blob. +pub fn add_blob(rt: &impl Runtime, params: AddBlobParams) -> Result { + let params = IpldBlock::serialize_cbor(¶ms)?; + deserialize_block(extract_send_result(rt.send_simple( + &BLOBS_ACTOR_ADDR, + Method::AddBlob as MethodNum, + params, + rt.message().value_received(), + ))?) +} + +/// Returns information about a blob. +pub fn get_blob(rt: &impl Runtime, params: GetBlobParams) -> Result, ActorError> { + deserialize_block(extract_send_result(rt.send( + &BLOBS_ACTOR_ADDR, + Method::GetBlob as MethodNum, + IpldBlock::serialize_cbor(¶ms)?, + rt.message().value_received(), + None, + SendFlags::READ_ONLY, + ))?) +} + +/// Deletes a blob. +pub fn delete_blob(rt: &impl Runtime, params: DeleteBlobParams) -> Result<(), ActorError> { + extract_send_result(rt.send_simple( + &BLOBS_ACTOR_ADDR, + Method::DeleteBlob as MethodNum, + IpldBlock::serialize_cbor(¶ms)?, + rt.message().value_received(), + ))?; + Ok(()) +} + +/// Overwrite a blob, i.e., delete one and add another in a single call. +pub fn overwrite_blob( + rt: &impl Runtime, + params: OverwriteBlobParams, +) -> Result { + deserialize_block(extract_send_result(rt.send_simple( + &BLOBS_ACTOR_ADDR, + Method::OverwriteBlob as MethodNum, + IpldBlock::serialize_cbor(¶ms)?, + rt.message().value_received(), + ))?) +} diff --git a/fendermint/actors/blobs/src/actor.rs b/fendermint/actors/blobs/src/actor.rs new file mode 100644 index 0000000000..9fbd7999b5 --- /dev/null +++ b/fendermint/actors/blobs/src/actor.rs @@ -0,0 +1,235 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{bytes::B256, method::Method}; +use fil_actors_runtime::{ + actor_dispatch, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, FIRST_EXPORTED_METHOD_NUMBER, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::MethodNum; +use recall_actor_sdk::evm::{InputData, InvokeContractParams, InvokeContractReturn}; + +use crate::{ + sol_facade::{blobs as sol_blobs, credit as sol_credit, AbiCall, AbiCallRuntime}, + State, BLOBS_ACTOR_NAME, +}; + +mod admin; +mod metrics; +mod system; +mod user; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(BlobsActor); + +/// Singleton actor for managing blob storage. +/// +/// The [`Address`]es stored in this actor's state _must_ be ID-based addresses for +/// efficient comparison with message origin and caller addresses, which are always ID-based. +/// [`Address`]es in the method params can be of any type. +/// They will be resolved to ID-based addresses. +/// +/// For simplicity, this actor currently manages both blobs and credit. +/// A future version of the protocol will likely separate them in some way. +pub struct BlobsActor; + +impl BlobsActor { + /// Creates a new [`BlobsActor`] state. + /// + /// This is only used in tests. This actor is created manually at genesis. + fn constructor(rt: &impl Runtime) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + let state = State::new(rt.store())?; + rt.create(&state) + } + + /// Invokes actor methods with EVM calldata. + fn invoke_contract( + rt: &impl Runtime, + params: InvokeContractParams, + ) -> Result { + let input_data: InputData = params.try_into()?; + if sol_blobs::can_handle(&input_data) { + let output_data = match sol_blobs::parse_input(&input_data)? { + sol_blobs::Calls::addBlob(call) => { + let params = call.params(rt)?; + Self::add_blob(rt, params)?; + call.returns(()) + } + sol_blobs::Calls::deleteBlob(call) => { + let params = call.params(rt)?; + Self::delete_blob(rt, params)?; + call.returns(()) + } + sol_blobs::Calls::getBlob(call) => { + let params = call.params()?; + let blob = Self::get_blob(rt, params)?; + call.returns(blob)? + } + sol_blobs::Calls::getStats(call) => { + let stats = Self::get_stats(rt)?; + call.returns(stats) + } + sol_blobs::Calls::overwriteBlob(call) => { + let params = call.params(rt)?; + Self::overwrite_blob(rt, params)?; + call.returns(()) + } + sol_blobs::Calls::trimBlobExpiries(call) => { + let params = call.params(); + let cursor = Self::trim_blob_expiries(rt, params)?; + call.returns(cursor) + } + }; + Ok(InvokeContractReturn { output_data }) + } else if sol_credit::can_handle(&input_data) { + let output_data = match sol_credit::parse_input(&input_data)? { + sol_credit::Calls::buyCredit_0(call) => { + // function buyCredit() external payable; + let params = call.params(rt); + Self::buy_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::buyCredit_1(call) => { + // function buyCredit(address recipient) external payable; + let params = call.params(); + Self::buy_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::approveCredit_0(call) => { + let params = call.params(); + Self::approve_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::approveCredit_1(call) => { + let params = call.params(); + Self::approve_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::approveCredit_2(call) => { + let params = call.params(); + Self::approve_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::revokeCredit_0(call) => { + let params = call.params(); + Self::revoke_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::revokeCredit_1(call) => { + let params = call.params(); + Self::revoke_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::setAccountSponsor(call) => { + let params = call.params(); + Self::set_account_sponsor(rt, params)?; + call.returns(()) + } + sol_credit::Calls::getAccount(call) => { + let params = call.params(); + let account_info = Self::get_account(rt, params)?; + call.returns(account_info)? + } + sol_credit::Calls::getCreditApproval(call) => { + let params = call.params(); + let credit_approval = Self::get_credit_approval(rt, params)?; + call.returns(credit_approval) + } + sol_credit::Calls::setAccountStatus(call) => { + let params = call.params()?; + Self::set_account_status(rt, params)?; + call.returns(()) + } + }; + Ok(InvokeContractReturn { output_data }) + } else { + Err(actor_error!(illegal_argument, "invalid call".to_string())) + } + } + + /// Fallback method for unimplemented method numbers. + fn fallback( + rt: &impl Runtime, + method: MethodNum, + _: Option, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + if method >= FIRST_EXPORTED_METHOD_NUMBER { + Ok(None) + } else { + Err(actor_error!(unhandled_message; "invalid method: {}", method)) + } + } +} + +impl ActorCode for BlobsActor { + type Methods = Method; + + fn name() -> &'static str { + BLOBS_ACTOR_NAME + } + + actor_dispatch! { + Constructor => constructor, + + // EVM interop + InvokeContract => invoke_contract, + + // User methods + BuyCredit => buy_credit, + ApproveCredit => approve_credit, + RevokeCredit => revoke_credit, + SetAccountSponsor => set_account_sponsor, + GetAccount => get_account, + GetCreditApproval => get_credit_approval, + AddBlob => add_blob, + GetBlob => get_blob, + DeleteBlob => delete_blob, + OverwriteBlob => overwrite_blob, + + // System methods + GetGasAllowance => get_gas_allowance, + UpdateGasAllowance => update_gas_allowance, + GetBlobStatus => get_blob_status, + GetAddedBlobs => get_added_blobs, + GetPendingBlobs => get_pending_blobs, + SetBlobPending => set_blob_pending, + FinalizeBlob => finalize_blob, + DebitAccounts => debit_accounts, + + // Admin methods + SetAccountStatus => set_account_status, + TrimBlobExpiries => trim_blob_expiries, + + // Metrics methods + GetStats => get_stats, + + // Node operator methods + RegisterNodeOperator => register_node_operator, + GetOperatorInfo => get_operator_info, + GetActiveOperators => get_active_operators, + + _ => fallback, + } +} + +/// Makes a syscall that will delete a blob from the underlying Iroh-based data store. +fn delete_from_disc(hash: B256) -> Result<(), ActorError> { + #[cfg(feature = "fil-actor")] + { + recall_actor_sdk::storage::delete_blob(hash.0).map_err(|en| { + ActorError::unspecified(format!("failed to delete blob from disc: {:?}", en)) + })?; + log::debug!("deleted blob {} from disc", hash); + Ok(()) + } + #[cfg(not(feature = "fil-actor"))] + { + log::debug!("mock deletion from disc (hash={})", hash); + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/actor/admin.rs b/fendermint/actors/blobs/src/actor/admin.rs new file mode 100644 index 0000000000..757ad3ac2d --- /dev/null +++ b/fendermint/actors/blobs/src/actor/admin.rs @@ -0,0 +1,74 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + accounts::SetAccountStatusParams, blobs::TrimBlobExpiriesParams, bytes::B256, +}; +use fendermint_actor_recall_config_shared::{get_config, require_caller_is_admin}; +use fil_actors_runtime::{runtime::Runtime, ActorError}; +use recall_actor_sdk::caller::{Caller, CallerOption}; + +use crate::{ + actor::{delete_from_disc, BlobsActor}, + State, +}; + +impl BlobsActor { + /// Sets the account status for an address. + /// + /// The `subscriber` address must be delegated (only delegated addresses can use credit). + pub fn set_account_status( + rt: &impl Runtime, + params: SetAccountStatusParams, + ) -> Result<(), ActorError> { + require_caller_is_admin(rt)?; + + let caller = Caller::new_delegated(rt, params.subscriber, None, CallerOption::None)?; + let config = get_config(rt)?; + + rt.transaction(|st: &mut State, rt| { + st.set_account_status( + rt.store(), + &config, + caller.state_address(), + params.status, + rt.curr_epoch(), + ) + }) + } + + /// Trims the subscription expiries for an account based on its current maximum allowed blob TTL. + /// + /// This is used in conjunction with `set_account_status` when reducing an account's maximum + /// allowed blob TTL. + /// Returns the number of subscriptions processed and the next key to continue iteration. + /// + /// The `subscriber` address must be delegated (only delegated addresses can use credit). + pub fn trim_blob_expiries( + rt: &impl Runtime, + params: TrimBlobExpiriesParams, + ) -> Result<(u32, Option), ActorError> { + require_caller_is_admin(rt)?; + + let caller = Caller::new_delegated(rt, params.subscriber, None, CallerOption::None)?; + let config = get_config(rt)?; + + let (processed, next_key, deleted_blobs) = rt.transaction(|st: &mut State, rt| { + st.trim_blob_expiries( + &config, + rt.store(), + caller.state_address(), + rt.curr_epoch(), + params.starting_hash, + params.limit, + ) + })?; + + for hash in deleted_blobs { + delete_from_disc(hash)?; + } + + Ok((processed, next_key)) + } +} diff --git a/fendermint/actors/blobs/src/actor/metrics.rs b/fendermint/actors/blobs/src/actor/metrics.rs new file mode 100644 index 0000000000..51dd636d3a --- /dev/null +++ b/fendermint/actors/blobs/src/actor/metrics.rs @@ -0,0 +1,23 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::GetStatsReturn; +use fendermint_actor_recall_config_shared::get_config; +use fil_actors_runtime::{runtime::Runtime, ActorError}; + +use crate::{actor::BlobsActor, State}; + +impl BlobsActor { + /// Returns credit and storage usage statistics. + pub fn get_stats(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let config = get_config(rt)?; + let stats = rt + .state::()? + .get_stats(&config, rt.current_balance()); + + Ok(stats) + } +} diff --git a/fendermint/actors/blobs/src/actor/system.rs b/fendermint/actors/blobs/src/actor/system.rs new file mode 100644 index 0000000000..5a3c4b6780 --- /dev/null +++ b/fendermint/actors/blobs/src/actor/system.rs @@ -0,0 +1,420 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::blobs::BlobRequest; +use fendermint_actor_blobs_shared::{ + blobs::{ + BlobStatus, FinalizeBlobParams, GetAddedBlobsParams, GetBlobStatusParams, + GetPendingBlobsParams, SetBlobPendingParams, + }, + credit::{Credit, GasAllowance, GetGasAllowanceParams, UpdateGasAllowanceParams}, + operators::{ + GetActiveOperatorsReturn, GetOperatorInfoParams, OperatorInfo, RegisterNodeOperatorParams, + }, +}; +use fendermint_actor_recall_config_shared::get_config; +use fil_actors_runtime::{runtime::Runtime, ActorError, SYSTEM_ACTOR_ADDR}; +use fvm_shared::error::ExitCode; +use num_traits::Zero; +use recall_actor_sdk::{ + caller::{Caller, CallerOption}, + evm::emit_evm_event, +}; + +use crate::{ + actor::{delete_from_disc, BlobsActor}, + sol_facade::{blobs as sol_blobs, credit::CreditDebited}, + state::blobs::{FinalizeBlobStateParams, SetPendingBlobStateParams}, + State, +}; + +impl BlobsActor { + /// Returns the gas allowance from a credit purchase for an address. + /// + /// This method is called by the recall executor, and as such, cannot fail. + pub fn get_gas_allowance( + rt: &impl Runtime, + params: GetGasAllowanceParams, + ) -> Result { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + let from_caller = match Caller::new(rt, params.0, None, CallerOption::None) { + Ok(caller) => caller, + Err(e) => { + return if e.exit_code() == ExitCode::USR_FORBIDDEN { + // Disallowed actor type (this is called by all txns so we can't error) + Ok(GasAllowance::default()) + } else { + Err(e) + }; + } + }; + + let allowance = rt.state::()?.get_gas_allowance( + rt.store(), + from_caller.state_address(), + rt.curr_epoch(), + )?; + + Ok(allowance) + } + + /// Updates gas allowance for the `from` address. + /// + /// The allowance update is applied to `sponsor` if it exists. + /// The `from` address must have an approval from `sponsor`. + /// The `from` address can be any actor, including those without delegated addresses. + /// This method is called by the recall executor, and as such, cannot fail. + pub fn update_gas_allowance( + rt: &impl Runtime, + params: UpdateGasAllowanceParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + let caller = Caller::new(rt, params.from, params.sponsor, CallerOption::None)?; + + rt.transaction(|st: &mut State, rt| { + st.update_gas_allowance( + rt.store(), + caller.state_address(), + caller.sponsor_state_address(), + params.add_amount, + rt.curr_epoch(), + ) + }) + } + + /// Returns the current [`BlobStatus`] for a blob by hash. + pub fn get_blob_status( + rt: &impl Runtime, + params: GetBlobStatusParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let caller = Caller::new(rt, params.subscriber, None, CallerOption::None)?; + + rt.state::()?.get_blob_status( + rt.store(), + caller.state_address(), + params.hash, + params.id, + ) + } + + /// Returns a list of [`BlobRequest`]s that are currently in the [`BlobStatus::Added`] state. + /// + /// All blobs that have been added but have not yet been picked up by validators for download + /// are in the [`BlobStatus::Added`] state. + pub fn get_added_blobs( + rt: &impl Runtime, + params: GetAddedBlobsParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + rt.state::()?.get_added_blobs(rt.store(), params.0) + } + + /// Returns a list of [`BlobRequest`]s that are currently in the [`BlobStatus::Pending`] state. + /// + /// All blobs that have been added and picked up by validators for download are in the + /// [`BlobStatus::Pending`] state. + /// These are the blobs that validators are currently coordinating to download. They will + /// vote on the final status ([`BlobStatus::Resolved`] or [`BlobStatus::Failed`]), which is + /// recorded on-chain with the `finalize_blob` method. + pub fn get_pending_blobs( + rt: &impl Runtime, + params: GetPendingBlobsParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + rt.state::()?.get_pending_blobs(rt.store(), params.0) + } + + /// Sets a blob to the [`BlobStatus::Pending`] state. + /// + /// The `subscriber` address must be delegated (only delegated addresses can use credit). + pub fn set_blob_pending( + rt: &impl Runtime, + params: SetBlobPendingParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + let caller = Caller::new_delegated(rt, params.subscriber, None, CallerOption::None)?; + + rt.transaction(|st: &mut State, rt| { + st.set_blob_pending( + rt.store(), + caller.state_address(), + SetPendingBlobStateParams::from_actor_params(params.clone()), + ) + })?; + + emit_evm_event( + rt, + sol_blobs::BlobPending { + subscriber: caller.event_address(), + hash: ¶ms.hash, + source: ¶ms.source, + }, + ) + } + + /// Finalizes a blob to the [`BlobStatus::Resolved`] or [`BlobStatus::Failed`] state. + /// + /// This is the final protocol step to add a blob, which is controlled by node operator consensus. + /// The [`BlobStatus::Resolved`] state means that a quorum of operators was able to download the blob. + /// The [`BlobStatus::Failed`] state means that a quorum of operators was not able to download the blob. + /// + /// # BLS Signature Verification + /// This method verifies the aggregated BLS signature from node operators to ensure: + /// 1. At least 2/3+ of operators signed the blob hash + /// 2. The aggregated signature is valid for the blob hash + pub fn finalize_blob(rt: &impl Runtime, params: FinalizeBlobParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let caller = Caller::new(rt, params.subscriber, None, CallerOption::None)?; + + // Get current blob status from state + let current_status = rt.state::()?.get_blob_status( + rt.store(), + caller.state_address(), + params.hash, + params.id.clone(), + )?; + + // Only finalize blobs that are in Added or Pending status + // (Resolved blobs are already finalized, Failed blobs cannot be retried) + if !matches!( + current_status, + Some(BlobStatus::Added) | Some(BlobStatus::Pending) + ) { + return Ok(()); + } + + Self::verify_blob_signatures(rt, ¶ms)?; + + let event_resolved = matches!(params.status, BlobStatus::Resolved); + + rt.transaction(|st: &mut State, rt| { + st.finalize_blob( + rt.store(), + caller.state_address(), + FinalizeBlobStateParams::from_actor_params(params.clone(), rt.curr_epoch()), + ) + })?; + + emit_evm_event( + rt, + sol_blobs::BlobFinalized { + subscriber: caller.event_address(), + hash: ¶ms.hash, + resolved: event_resolved, + }, + ) + } + + /// Verify aggregated BLS signatures for blob finalization + fn verify_blob_signatures( + rt: &impl Runtime, + params: &FinalizeBlobParams, + ) -> Result<(), ActorError> { + use bls_signatures::{ + verify_messages, PublicKey as BlsPublicKey, Serialize as BlsSerialize, + Signature as BlsSignature, + }; + + // Parse aggregated signature + let aggregated_sig = BlsSignature::from_bytes(¶ms.aggregated_signature) + .map_err(|e| ActorError::illegal_argument(format!("Invalid BLS signature: {:?}", e)))?; + + // Get active operators from state + let state = rt.state::()?; + let active_operators = state.operators.get_active_operators(); + let total_operators = active_operators.len(); + + if total_operators == 0 { + return Err(ActorError::illegal_state( + "No active operators registered".into(), + )); + } + + // Extract signer indices from bitmap and collect their public keys + let mut signer_pubkeys = Vec::new(); + let mut signer_count = 0; + + for (index, operator_addr) in active_operators.iter().enumerate() { + if index >= 128 { + break; // u128 bitmap can only hold 128 operators + } + + // Check if this operator signed (bit is set in bitmap) + if (params.signer_bitmap & (1u128 << index)) != 0 { + signer_count += 1; + + // Get operator info to retrieve BLS public key + let operator_info = + state + .operators + .get(rt.store(), operator_addr)? + .ok_or_else(|| { + ActorError::illegal_state(format!( + "Operator {} not found in state", + operator_addr + )) + })?; + + // Parse BLS public key + let pubkey = BlsPublicKey::from_bytes(&operator_info.bls_pubkey).map_err(|e| { + ActorError::illegal_state(format!( + "Invalid BLS public key for operator {}: {:?}", + operator_addr, e + )) + })?; + + signer_pubkeys.push(pubkey); + } + } + + // Check threshold: need at least 2/3+ of operators + let threshold = (total_operators * 2 + 2) / 3; // Ceiling of 2/3 + if signer_count < threshold { + return Err(ActorError::illegal_argument(format!( + "Insufficient signatures: got {}, need {} out of {}", + signer_count, threshold, total_operators + ))); + } + + if signer_pubkeys.is_empty() { + return Err(ActorError::illegal_state("No signer public keys".into())); + } + + // All operators signed the same message (the blob hash) + let hash_bytes = params.hash.0.as_slice(); + + // Create a vector of the message repeated for each signer + let messages: Vec<&[u8]> = vec![hash_bytes; signer_count]; + + // Verify the aggregated signature using verify_messages + // This verifies that the aggregated signature corresponds to the individual signatures + let verification_result = verify_messages(&aggregated_sig, &messages, &signer_pubkeys); + + if !verification_result { + return Err(ActorError::illegal_argument( + "BLS signature verification failed".into(), + )); + } + + log::info!( + "BLS signature verified: {} operators signed (threshold: {}/{})", + signer_count, + threshold, + total_operators + ); + + Ok(()) + } + + /// Debits accounts for current blob usage. + /// + /// This is called by the system actor every X blocks, where X is set in the recall config actor. + pub fn debit_accounts(rt: &impl Runtime) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + let config = get_config(rt)?; + + let mut credit_debited = Credit::zero(); + let (deletes, num_accounts, more_accounts) = rt.transaction(|st: &mut State, rt| { + let initial_credit_debited = st.credits.credit_debited.clone(); + let (deletes, more_accounts) = + st.debit_accounts(rt.store(), &config, rt.curr_epoch())?; + credit_debited = &st.credits.credit_debited - initial_credit_debited; + let num_accounts = st.accounts.len(); + Ok((deletes, num_accounts, more_accounts)) + })?; + + for hash in deletes { + delete_from_disc(hash)?; + } + + emit_evm_event( + rt, + CreditDebited { + amount: credit_debited, + num_accounts, + more_accounts, + }, + )?; + + Ok(()) + } + + /// Register a new node operator with BLS public key and RPC URL + /// + /// The caller's address will be registered as the operator address. + /// This method can be called by anyone who wants to become a node operator. + pub fn register_node_operator( + rt: &impl Runtime, + params: RegisterNodeOperatorParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + // Validate BLS public key length (must be 48 bytes) + if params.bls_pubkey.len() != 48 { + return Err(ActorError::illegal_argument( + "BLS public key must be exactly 48 bytes".into(), + )); + } + + // Validate RPC URL is not empty + if params.rpc_url.is_empty() { + return Err(ActorError::illegal_argument( + "RPC URL cannot be empty".into(), + )); + } + + let operator_address = rt.message().caller(); + + let index = rt.transaction(|st: &mut State, rt| { + let node_operator_info = crate::state::operators::NodeOperatorInfo { + bls_pubkey: params.bls_pubkey, + rpc_url: params.rpc_url, + registered_epoch: rt.curr_epoch(), + active: true, + }; + + st.operators + .register(rt.store(), operator_address, node_operator_info) + })?; + + Ok(index) + } + + /// Get information about a specific node operator + pub fn get_operator_info( + rt: &impl Runtime, + params: GetOperatorInfoParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let state = rt.state::()?; + let info = state.operators.get(rt.store(), ¶ms.address)?; + + Ok(info.map(|i| OperatorInfo { + bls_pubkey: i.bls_pubkey, + rpc_url: i.rpc_url, + active: i.active, + })) + } + + /// Get the ordered list of all active node operators + /// + /// The order of addresses in the returned list corresponds to the bit positions + /// in the signature bitmap used for BLS signature aggregation. + pub fn get_active_operators(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let state = rt.state::()?; + let operators = state.operators.get_active_operators(); + + Ok(GetActiveOperatorsReturn { operators }) + } +} diff --git a/fendermint/actors/blobs/src/actor/user.rs b/fendermint/actors/blobs/src/actor/user.rs new file mode 100644 index 0000000000..71361cf842 --- /dev/null +++ b/fendermint/actors/blobs/src/actor/user.rs @@ -0,0 +1,1169 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + accounts::{Account, GetAccountParams}, + blobs::{ + AddBlobParams, Blob, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, Subscription, + }, + credit::{ + ApproveCreditParams, BuyCreditParams, Credit, CreditApproval, GetCreditApprovalParams, + RevokeCreditParams, SetSponsorParams, + }, +}; +use fendermint_actor_recall_config_shared::get_config; +use fil_actors_runtime::{extract_send_result, runtime::Runtime, ActorError}; +use fvm_shared::{econ::TokenAmount, METHOD_SEND}; +use num_traits::Zero; +use recall_actor_sdk::{ + caller::{Caller, CallerOption}, + evm::emit_evm_event, + util::is_bucket_address, + util::to_delegated_address, +}; + +use crate::{ + actor::{delete_from_disc, BlobsActor}, + caller::DelegationOptions, + sol_facade::{ + blobs as sol_blobs, + credit::{CreditApproved, CreditPurchased, CreditRevoked}, + gas::{GasSponsorSet, GasSponsorUnset}, + }, + state::blobs::{AddBlobStateParams, DeleteBlobStateParams}, + State, +}; + +impl BlobsActor { + /// Buy credit with token. + /// + /// The `to` address must be delegated (only delegated addresses can own credit). + pub fn buy_credit(rt: &impl Runtime, params: BuyCreditParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let caller = Caller::new_delegated(rt, params.0, None, CallerOption::Auth)?; + let config = get_config(rt)?; + + let mut credit_amount = Credit::zero(); + let account = rt.transaction(|st: &mut State, rt| { + let pre_buy = st.credits.credit_sold.clone(); + let account = st.buy_credit( + rt.store(), + &config, + caller.state_address(), + rt.message().value_received(), + rt.curr_epoch(), + )?; + credit_amount = &st.credits.credit_sold - &pre_buy; + Ok(account) + })?; + + emit_evm_event( + rt, + CreditPurchased::new(caller.event_address(), credit_amount), + )?; + + account.to_shared(rt) + } + + /// Approve credit and gas usage from one account to another. + /// + /// The `from` address must be delegated (only delegated addresses can own credit). + /// The `from` address must be the message origin or caller. + /// The `to` address must be delegated (only delegated addresses can use credit). + /// The `to` address will be created if it does not exist. + /// TODO: Remove the `caller_allowlist` parameter. + pub fn approve_credit( + rt: &impl Runtime, + params: ApproveCreditParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let from_caller = + Caller::new_delegated(rt, rt.message().caller(), None, CallerOption::Auth)?; + let to_caller = Caller::new_delegated(rt, params.to, None, CallerOption::Create)?; + let config = get_config(rt)?; + + let approval = rt.transaction(|st: &mut State, rt| { + let approval = st.approve_credit( + &config, + rt.store(), + from_caller.state_address(), + to_caller.state_address(), + DelegationOptions { + credit_limit: params.credit_limit, + gas_fee_limit: params.gas_fee_limit, + ttl: params.ttl, + }, + rt.curr_epoch(), + ); + + // For convenience, set the approvee's sponsor to the approver if it was created + if to_caller.created() { + st.set_account_sponsor( + &config, + rt.store(), + to_caller.state_address(), + Some(from_caller.state_address()), + rt.curr_epoch(), + )?; + } + approval + })?; + + emit_evm_event( + rt, + CreditApproved { + from: from_caller.event_address(), + to: to_caller.event_address(), + credit_limit: approval.credit_limit.clone(), + gas_fee_limit: approval.gas_allowance_limit.clone(), + expiry: approval.expiry, + }, + )?; + + Ok(approval) + } + + /// Revoke credit and gas usage from one account to another. + /// + /// The `from` address must be delegated (only delegated addresses can own credit). + /// The `from` address must be the message origin or caller. + /// The `to` address must be delegated (only delegated addresses can use credit). + pub fn revoke_credit(rt: &impl Runtime, params: RevokeCreditParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from_caller = + Caller::new_delegated(rt, rt.message().caller(), None, CallerOption::Auth)?; + let to_caller = Caller::new_delegated(rt, params.to, None, CallerOption::None)?; + + rt.transaction(|st: &mut State, rt| { + st.revoke_credit( + rt.store(), + from_caller.state_address(), + to_caller.state_address(), + ) + })?; + + emit_evm_event( + rt, + CreditRevoked::new(from_caller.event_address(), to_caller.event_address()), + )?; + + Ok(()) + } + + /// Sets or unsets a default credit and gas sponsor from one account to another. + /// + /// If `sponsor` does not exist, the default sponsor is unset. + /// The `from` address must be delegated (only delegated addresses can use credit). + /// The `from` address must be the message origin or caller. + /// The `sponsor` address must be delegated (only delegated addresses can own credit). + pub fn set_account_sponsor( + rt: &impl Runtime, + params: SetSponsorParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let caller = + Caller::new_delegated(rt, rt.message().caller(), params.0, CallerOption::Auth)?; + let config = get_config(rt)?; + + rt.transaction(|st: &mut State, rt| { + st.set_account_sponsor( + &config, + rt.store(), + caller.state_address(), + caller.sponsor_state_address(), + rt.curr_epoch(), + ) + })?; + + if let Some(sponsor) = caller.sponsor_address() { + emit_evm_event(rt, GasSponsorSet::mew(sponsor))?; + } else { + emit_evm_event(rt, GasSponsorUnset::new())?; + } + + Ok(()) + } + + /// Returns the account for an address. + pub fn get_account( + rt: &impl Runtime, + params: GetAccountParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let caller = Caller::new(rt, params.0, None, CallerOption::None)?; + + let account = rt + .state::()? + .get_account(rt.store(), caller.state_address())? + .map(|mut account| { + // Resolve the credit sponsor + account.credit_sponsor = account + .credit_sponsor + .map(|sponsor| to_delegated_address(rt, sponsor)) + .transpose()?; + + account.to_shared(rt) + }); + + account.transpose() + } + + /// Returns the credit approval from one account to another if it exists. + pub fn get_credit_approval( + rt: &impl Runtime, + params: GetCreditApprovalParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from_caller = Caller::new(rt, params.from, None, CallerOption::None)?; + let to_caller = Caller::new(rt, params.to, None, CallerOption::None)?; + + let approval = rt.state::()?.get_credit_approval( + rt.store(), + from_caller.state_address(), + to_caller.state_address(), + )?; + + Ok(approval) + } + + /// Adds or updates a blob subscription. + /// + /// The subscriber will only need credits for blobs that are not already covered by one of + /// their existing subscriptions. + /// + /// The `sponsor` will be the subscriber (the account responsible for payment), if it exists + /// and there is an approval from `sponsor` to `from`. + /// + /// The `from` address must be delegated (only delegated addresses can use credit). + /// The `sponsor` address must be delegated (only delegated addresses can use credit). + pub fn add_blob(rt: &impl Runtime, params: AddBlobParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let from = if is_bucket_address(rt, rt.message().caller())? { + params.from + } else { + rt.message().caller() + }; + let caller = Caller::new_delegated(rt, from, params.sponsor, CallerOption::Auth)?; + let token_amount = rt.message().value_received(); + let config = get_config(rt)?; + + let mut capacity_used = 0; + let (sub, token_rebate) = rt.transaction(|st: &mut State, rt| { + let initial_capacity_used = st.blobs.bytes_size(); + let res = st.add_blob( + rt.store(), + &config, + caller.state_address(), + caller.sponsor_state_address(), + AddBlobStateParams::from_actor_params( + params.clone(), + rt.curr_epoch(), + token_amount, + ), + )?; + capacity_used = st.blobs.bytes_size() - initial_capacity_used; + Ok(res) + })?; + + // Send back unspent tokens + if !token_rebate.is_zero() { + extract_send_result(rt.send_simple( + &caller.state_address(), + METHOD_SEND, + None, + token_rebate, + ))?; + } + + emit_evm_event( + rt, + sol_blobs::BlobAdded { + subscriber: caller.event_address(), + hash: ¶ms.hash, + size: params.size, + expiry: sub.expiry, + bytes_used: capacity_used, + }, + )?; + + Ok(sub) + } + + /// Returns a blob by hash if it exists. + pub fn get_blob(rt: &impl Runtime, params: GetBlobParams) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + match rt.state::()?.get_blob(rt.store(), params.0)? { + Some(blob) => Ok(Some(blob.to_shared(rt)?)), + None => Ok(None), + } + } + + /// Deletes a blob subscription. + /// + /// The `sponsor` will be the subscriber (the account responsible for payment), if it exists + /// and there is an approval from `sponsor` to `from`. + /// + /// The `from` address must be delegated (only delegated addresses can use credit). + /// The `sponsor` address must be delegated (only delegated addresses can use credit). + pub fn delete_blob(rt: &impl Runtime, params: DeleteBlobParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from = if is_bucket_address(rt, rt.message().caller())? { + params.from + } else { + rt.message().caller() + }; + + let caller = Caller::new_delegated(rt, from, params.sponsor, CallerOption::Auth)?; + + let mut capacity_released = 0; + let (_, size, _) = rt.transaction(|st: &mut State, rt| { + let initial_capacity_used = st.blobs.bytes_size(); + let res = st.delete_blob( + rt.store(), + caller.state_address(), + caller.sponsor_state_address(), + DeleteBlobStateParams::from_actor_params(params.clone(), rt.curr_epoch()), + )?; + capacity_released = initial_capacity_used - st.blobs.bytes_size(); + Ok(res) + })?; + + emit_evm_event( + rt, + sol_blobs::BlobDeleted { + subscriber: caller.event_address(), + hash: ¶ms.hash, + size, + bytes_released: capacity_released, + }, + )?; + + Ok(()) + } + + /// Deletes a blob subscription and adds another in a single call. + /// + /// This method is more efficient than two separate calls to `delete_blob` and `add_blob`, + /// and is useful for some blob workflows like replacing a key in a bucket actor. + /// + /// The `sponsor` will be the subscriber (the account responsible for payment), if it exists + /// and there is an approval from `sponsor` to `from`. + /// + /// The `from` address must be delegated (only delegated addresses can use credit). + /// The `sponsor` address must be delegated (only delegated addresses can use credit). + pub fn overwrite_blob( + rt: &impl Runtime, + params: OverwriteBlobParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let from = if is_bucket_address(rt, rt.message().caller())? { + params.add.from + } else { + rt.message().caller() + }; + + let caller = Caller::new_delegated(rt, from, params.add.sponsor, CallerOption::Auth)?; + let config = get_config(rt)?; + + // Determine if we need to delete an existing blob before adding the new one + let overwrite = params.old_hash != params.add.hash; + + let add_hash = params.add.hash; + let add_size = params.add.size; + let mut capacity_released = 0; + let mut capacity_used = 0; + + // To ensure atomicity, we combine the two independent calls into a single transaction. + let (delete, delete_size, sub) = rt.transaction(|st: &mut State, rt| { + let add_params = params.add; + + let initial_capacity_used = st.blobs.bytes_size(); + let (delete, delete_size, _) = if overwrite { + st.delete_blob( + rt.store(), + caller.state_address(), + caller.sponsor_state_address(), + DeleteBlobStateParams { + hash: params.old_hash, + id: add_params.id.clone(), + epoch: rt.curr_epoch(), + skip_credit_return: false, + }, + )? + } else { + (false, 0, false) + }; + capacity_released = initial_capacity_used - st.blobs.bytes_size(); + + let initial_capacity_used = st.blobs.bytes_size(); + let (subscription, _) = st.add_blob( + rt.store(), + &config, + caller.state_address(), + caller.sponsor_state_address(), + AddBlobStateParams::from_actor_params( + add_params, + rt.curr_epoch(), + TokenAmount::zero(), + ), + )?; + capacity_used = st.blobs.bytes_size() - initial_capacity_used; + + Ok((delete, delete_size, subscription)) + })?; + + if delete { + delete_from_disc(params.old_hash)?; + } + + if overwrite { + emit_evm_event( + rt, + sol_blobs::BlobDeleted { + subscriber: caller.event_address(), + hash: ¶ms.old_hash, + size: delete_size, + bytes_released: capacity_released, + }, + )?; + } + emit_evm_event( + rt, + sol_blobs::BlobAdded { + subscriber: caller.event_address(), + hash: &add_hash, + size: add_size, + expiry: sub.expiry, + bytes_used: capacity_used, + }, + )?; + + Ok(sub) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::{ + construct_and_verify, expect_emitted_add_event, expect_emitted_approve_event, + expect_emitted_purchase_event, expect_emitted_revoke_event, expect_get_config, + }; + use cid::Cid; + use fendermint_actor_blobs_shared::{ + blobs::{BlobStatus, SubscriptionId}, + method::Method, + }; + use fendermint_actor_blobs_testing::{new_hash, new_pk, setup_logs}; + use fil_actors_evm_shared::address::EthAddress; + use fil_actors_runtime::test_utils::{ + MockRuntime, ETHACCOUNT_ACTOR_CODE_ID, EVM_ACTOR_CODE_ID, + }; + // TODO: Re-enable when ADM actor is available + // use fil_actors_runtime::ADM_ACTOR_ADDR; + use fvm_ipld_encoding::ipld_block::IpldBlock; + use fvm_shared::sys::SendFlags; + use fvm_shared::{ + address::Address, bigint::BigInt, clock::ChainEpoch, error::ExitCode, MethodNum, + }; + use recall_actor_sdk::util::Kind; + + // TODO: Re-enable when ADM actor is available + // Stub ADM_ACTOR_ADDR for tests + const ADM_ACTOR_ADDR: Address = Address::new_id(99); + + fn expect_retrieve_bucket_code_cid(rt: &MockRuntime, code_cid: Cid) { + rt.expect_send( + ADM_ACTOR_ADDR, + 2892692559 as MethodNum, + IpldBlock::serialize_cbor(&Kind::Bucket).unwrap(), + TokenAmount::zero(), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor(&code_cid).unwrap(), + ExitCode::OK, + None, + ); + } + + #[test] + fn test_buy_credit() { + setup_logs(); + let rt = construct_and_verify(); + + // TODO(bcalza): Choose a rate different than default + let token_credit_rate = BigInt::from(1000000000000000000u64); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.set_origin(id_addr); + + let tokens = 1; + let mut expected_credits = + Credit::from_atto(1000000000000000000u64 * tokens * &token_credit_rate); + let mut expected_gas_allowance = TokenAmount::from_whole(tokens); + rt.set_received(TokenAmount::from_whole(tokens)); + rt.expect_validate_caller_any(); + let fund_params = BuyCreditParams(f4_eth_addr); + expect_get_config(&rt); + expect_emitted_purchase_event(&rt, &fund_params, expected_credits.clone()); + let result = rt + .call::( + Method::BuyCredit as u64, + IpldBlock::serialize_cbor(&fund_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(result.credit_free, expected_credits); + assert_eq!(result.gas_allowance, expected_gas_allowance); + rt.verify(); + + let additional_credits = Credit::from_atto(1000000000u64 * tokens * &token_credit_rate); + expected_credits += &additional_credits; + expected_gas_allowance += TokenAmount::from_nano(tokens); + rt.set_received(TokenAmount::from_nano(tokens)); + rt.expect_validate_caller_any(); + let fund_params = BuyCreditParams(f4_eth_addr); + expect_get_config(&rt); + expect_emitted_purchase_event(&rt, &fund_params, additional_credits); + let result = rt + .call::( + Method::BuyCredit as u64, + IpldBlock::serialize_cbor(&fund_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(result.credit_free, expected_credits); + assert_eq!(result.gas_allowance, expected_gas_allowance); + rt.verify(); + + let additional_credits = Credit::from_atto(tokens * &token_credit_rate); + expected_credits += &additional_credits; + expected_gas_allowance += TokenAmount::from_atto(tokens); + rt.set_received(TokenAmount::from_atto(tokens)); + rt.expect_validate_caller_any(); + let fund_params = BuyCreditParams(f4_eth_addr); + expect_get_config(&rt); + expect_emitted_purchase_event(&rt, &fund_params, additional_credits); + let result = rt + .call::( + Method::BuyCredit as u64, + IpldBlock::serialize_cbor(&fund_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(result.credit_free, expected_credits); + assert_eq!(result.gas_allowance, expected_gas_allowance); + rt.verify(); + } + + #[test] + fn test_approve_credit() { + setup_logs(); + let rt = construct_and_verify(); + + // Credit owner + let owner_id_addr = Address::new_id(110); + let owner_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let owner_f4_eth_addr = Address::new_delegated(10, &owner_eth_addr.0).unwrap(); + rt.set_delegated_address(owner_id_addr.id().unwrap(), owner_f4_eth_addr); + + // Credit receiver + let to_id_addr = Address::new_id(111); + let to_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000001" + )); + let to_f4_eth_addr = Address::new_delegated(10, &to_eth_addr.0).unwrap(); + rt.set_delegated_address(to_id_addr.id().unwrap(), to_f4_eth_addr); + rt.set_address_actor_type(to_id_addr, *ETHACCOUNT_ACTOR_CODE_ID); + + // Proxy EVM contract on behalf of the credit owner + let proxy_id_addr = Address::new_id(112); + let proxy_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000002" + )); + let proxy_f4_eth_addr = Address::new_delegated(10, &proxy_eth_addr.0).unwrap(); + rt.set_delegated_address(proxy_id_addr.id().unwrap(), proxy_f4_eth_addr); + rt.set_address_actor_type(proxy_id_addr, *EVM_ACTOR_CODE_ID); + + // Caller/origin is the same as from (i.e., the standard case) + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, owner_id_addr); + rt.set_origin(owner_id_addr); + rt.expect_validate_caller_any(); + expect_get_config(&rt); + let approve_params = ApproveCreditParams { + to: to_id_addr, + caller_allowlist: None, + credit_limit: None, + gas_fee_limit: None, + ttl: None, + }; + expect_emitted_approve_event( + &rt, + owner_f4_eth_addr, + to_f4_eth_addr, + approve_params.credit_limit.clone(), + approve_params.gas_fee_limit.clone(), + 0, + ); + let result = rt.call::( + Method::ApproveCredit as u64, + IpldBlock::serialize_cbor(&approve_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Proxy caller (caller mismatch with from, hence proxy is the one who approves) + rt.set_caller(*EVM_ACTOR_CODE_ID, proxy_id_addr); + rt.set_origin(owner_id_addr); + rt.expect_validate_caller_any(); + expect_get_config(&rt); + let approve_params = ApproveCreditParams { + to: to_id_addr, + caller_allowlist: None, + credit_limit: None, + gas_fee_limit: None, + ttl: None, + }; + expect_emitted_approve_event( + &rt, + proxy_f4_eth_addr, + to_f4_eth_addr, + approve_params.credit_limit.clone(), + approve_params.gas_fee_limit.clone(), + 0, + ); + let result = rt.call::( + Method::ApproveCredit as u64, + IpldBlock::serialize_cbor(&approve_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + } + + #[test] + fn test_approve_credit_to_new_account() { + setup_logs(); + let rt = construct_and_verify(); + + // Credit owner + let owner_id_addr = Address::new_id(110); + let owner_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let owner_f4_eth_addr = Address::new_delegated(10, &owner_eth_addr.0).unwrap(); + rt.set_delegated_address(owner_id_addr.id().unwrap(), owner_f4_eth_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, owner_id_addr); + rt.set_origin(owner_id_addr); + + // Use a new receiver that doesn't exist in the FVM + let receiver_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000001" + )); + let receiver_f4_eth_addr = Address::new_delegated(10, &receiver_eth_addr.0).unwrap(); + + rt.expect_validate_caller_any(); + rt.expect_send_simple( + receiver_f4_eth_addr, + METHOD_SEND, + None, + TokenAmount::zero(), + None, + ExitCode::OK, + ); + let approve_params = ApproveCreditParams { + to: receiver_f4_eth_addr, // Use the external address to force the ID lookup to fail + caller_allowlist: None, + credit_limit: None, + gas_fee_limit: None, + ttl: None, + }; + let result = rt.call::( + Method::ApproveCredit as u64, + IpldBlock::serialize_cbor(&approve_params).unwrap(), + ); + // This test should pass, but in the mock runtime, sending a token to an address does not + // create the actor, like it does in the real FVM runtime. + // The result is that the second call to to_id_address in the approve_credit method still + // fails after the call to send with a "not found" error. + // However, we are able to test that the call to send did happen using + // rt.expect_send_simple above. + assert!(result.is_err()); + assert_eq!(result.unwrap_err().exit_code(), ExitCode::USR_NOT_FOUND); + rt.verify(); + } + + #[test] + fn test_revoke_credit() { + setup_logs(); + let rt = construct_and_verify(); + + // Credit owner + let owner_id_addr = Address::new_id(110); + let owner_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let owner_f4_eth_addr = Address::new_delegated(10, &owner_eth_addr.0).unwrap(); + rt.set_delegated_address(owner_id_addr.id().unwrap(), owner_f4_eth_addr); + + // Credit receiver + let to_id_addr = Address::new_id(111); + let to_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000001" + )); + let to_f4_eth_addr = Address::new_delegated(10, &to_eth_addr.0).unwrap(); + rt.set_delegated_address(to_id_addr.id().unwrap(), to_f4_eth_addr); + rt.set_address_actor_type(to_id_addr, *ETHACCOUNT_ACTOR_CODE_ID); + + // Proxy EVM contract on behalf of the credit owner + let proxy_id_addr = Address::new_id(112); + let proxy_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000002" + )); + let proxy_f4_eth_addr = Address::new_delegated(10, &proxy_eth_addr.0).unwrap(); + rt.set_delegated_address(proxy_id_addr.id().unwrap(), proxy_f4_eth_addr); + rt.set_address_actor_type(proxy_id_addr, *EVM_ACTOR_CODE_ID); + + // Set up the approval to revoke + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, owner_id_addr); + rt.set_origin(owner_id_addr); + rt.expect_validate_caller_any(); + expect_get_config(&rt); + let approve_params = ApproveCreditParams { + to: to_id_addr, + caller_allowlist: None, + credit_limit: None, + gas_fee_limit: None, + ttl: None, + }; + expect_emitted_approve_event( + &rt, + owner_f4_eth_addr, + to_f4_eth_addr, + approve_params.credit_limit.clone(), + approve_params.gas_fee_limit.clone(), + 0, + ); + let result = rt.call::( + Method::ApproveCredit as u64, + IpldBlock::serialize_cbor(&approve_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Caller/origin is the same as from (i.e., the standard case) + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, owner_id_addr); + rt.set_origin(owner_id_addr); + rt.expect_validate_caller_any(); + let revoke_params = RevokeCreditParams { + to: to_id_addr, + for_caller: None, + }; + expect_emitted_revoke_event(&rt, owner_f4_eth_addr, to_f4_eth_addr); + let result = rt.call::( + Method::RevokeCredit as u64, + IpldBlock::serialize_cbor(&revoke_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Proxy caller (caller mismatch with from, but is correct origin) + rt.set_caller(*EVM_ACTOR_CODE_ID, proxy_id_addr); + rt.set_origin(owner_id_addr); + rt.expect_validate_caller_any(); + let revoke_params = RevokeCreditParams { + to: to_id_addr, + for_caller: None, + }; + let result = rt.call::( + Method::RevokeCredit as u64, + IpldBlock::serialize_cbor(&revoke_params).unwrap(), + ); + // This should be a state error, not from the actor API + assert!(result.is_err()); + assert!(result.err().unwrap().msg().contains("not found"),); + rt.verify(); + + // Caller/origin mismatch with from + rt.set_caller(*EVM_ACTOR_CODE_ID, proxy_id_addr); + rt.set_origin(owner_id_addr); + rt.expect_validate_caller_any(); + let revoke_params = RevokeCreditParams { + to: to_id_addr, + for_caller: None, + }; + let result = rt.call::( + Method::RevokeCredit as u64, + IpldBlock::serialize_cbor(&revoke_params).unwrap(), + ); + let expected_return = Err(ActorError::not_found(format!( + "{} not found in accounts", + proxy_id_addr + ))); + assert_eq!(result, expected_return); + rt.verify(); + } + + #[test] + fn test_add_blob() { + setup_logs(); + let rt = construct_and_verify(); + + let token_credit_rate = BigInt::from(1000000000000000000u64); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.set_origin(id_addr); + rt.set_epoch(ChainEpoch::from(0)); + + // Try without first funding + rt.expect_validate_caller_any(); + let hash = new_hash(1024); + let add_params = AddBlobParams { + from: id_addr, + sponsor: None, + source: new_pk(), + hash: hash.0, + metadata_hash: new_hash(1024).0, + id: SubscriptionId::default(), + size: hash.1, + ttl: Some(3600), + }; + // TODO: Re-enable when ADM bucket actor is available + // expect_retrieve_bucket_code_cid(&rt, *ETHACCOUNT_ACTOR_CODE_ID); + expect_get_config(&rt); + let result = rt.call::( + Method::AddBlob as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ); + assert!(result.is_err()); + rt.verify(); + + // Fund an account + let tokens = 1; + let received = TokenAmount::from_whole(tokens); + let expected_credits = + Credit::from_atto(1000000000000000000u64 * tokens * &token_credit_rate); + rt.set_received(received.clone()); + rt.expect_validate_caller_any(); + let fund_params = BuyCreditParams(f4_eth_addr); + expect_get_config(&rt); + expect_emitted_purchase_event(&rt, &fund_params, expected_credits); + let result = rt.call::( + Method::BuyCredit as u64, + IpldBlock::serialize_cbor(&fund_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Try with sufficient balance + rt.set_received(TokenAmount::zero()); + rt.set_epoch(ChainEpoch::from(5)); + rt.expect_validate_caller_any(); + // TODO: Re-enable when ADM bucket actor is available + // expect_retrieve_bucket_code_cid(&rt, *ETHACCOUNT_ACTOR_CODE_ID); + expect_get_config(&rt); + expect_emitted_add_event(&rt, 5, &add_params, f4_eth_addr, add_params.size); + let subscription = rt + .call::( + Method::AddBlob as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(subscription.added, 5); + assert_eq!(subscription.expiry, 3605); + assert_eq!(subscription.delegate, None); + rt.verify(); + + // Get it back + rt.expect_validate_caller_any(); + let get_params = GetBlobParams(hash.0); + let blob = rt + .call::( + Method::GetBlob as u64, + IpldBlock::serialize_cbor(&get_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + assert!(blob.is_some()); + let blob = blob.unwrap(); + assert_eq!(blob.size, add_params.size); + assert_eq!(blob.metadata_hash, add_params.metadata_hash); + assert_eq!(blob.subscribers.len(), 1); + assert_eq!(blob.status, BlobStatus::Added); + } + + #[test] + fn test_add_blob_inline_buy() { + setup_logs(); + let rt = construct_and_verify(); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.set_origin(id_addr); + rt.set_epoch(ChainEpoch::from(0)); + + // Try sending a lot + rt.expect_validate_caller_any(); + let hash = new_hash(1024); + let add_params = AddBlobParams { + from: id_addr, + sponsor: None, + source: new_pk(), + hash: hash.0, + metadata_hash: new_hash(1024).0, + id: SubscriptionId::default(), + size: hash.1, + ttl: Some(3600), + }; + let tokens_sent = TokenAmount::from_whole(1); + rt.set_received(tokens_sent.clone()); + rt.set_balance(tokens_sent.clone()); + let tokens_required_atto = add_params.size * add_params.ttl.unwrap() as u64; + let expected_tokens_unspent = tokens_sent.atto() - tokens_required_atto; + // TODO: Re-enable when ADM bucket actor is available + // expect_retrieve_bucket_code_cid(&rt, *ETHACCOUNT_ACTOR_CODE_ID); + expect_get_config(&rt); + expect_emitted_add_event(&rt, 0, &add_params, f4_eth_addr, add_params.size); + rt.expect_send_simple( + id_addr, + METHOD_SEND, + None, + TokenAmount::from_atto(expected_tokens_unspent), + None, + ExitCode::OK, + ); + let result = rt.call::( + Method::AddBlob as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Try sending zero + rt.expect_validate_caller_any(); + rt.set_received(TokenAmount::zero()); + let hash = new_hash(1024); + let add_params = AddBlobParams { + from: id_addr, + sponsor: None, + hash: hash.0, + metadata_hash: new_hash(1024).0, + source: new_pk(), + id: SubscriptionId::default(), + size: hash.1, + ttl: Some(3600), + }; + // TODO: Re-enable when ADM bucket actor is available + // expect_retrieve_bucket_code_cid(&rt, *ETHACCOUNT_ACTOR_CODE_ID); + expect_get_config(&rt); + let response = rt.call::( + Method::AddBlob as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ); + assert!(response.is_err()); + rt.verify(); + + // Try sending the exact amount + let tokens_required_atto = add_params.size * add_params.ttl.unwrap() as u64; + let tokens_sent = TokenAmount::from_atto(tokens_required_atto); + rt.set_received(tokens_sent.clone()); + rt.expect_validate_caller_any(); + let hash = new_hash(1024); + let add_params = AddBlobParams { + from: id_addr, + sponsor: None, + hash: hash.0, + metadata_hash: new_hash(1024).0, + source: new_pk(), + id: SubscriptionId::default(), + size: hash.1, + ttl: Some(3600), + }; + // TODO: Re-enable when ADM bucket actor is available + // expect_retrieve_bucket_code_cid(&rt, *ETHACCOUNT_ACTOR_CODE_ID); + expect_get_config(&rt); + expect_emitted_add_event(&rt, 0, &add_params, f4_eth_addr, add_params.size); + let result = rt.call::( + Method::AddBlob as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + } + + #[test] + fn test_add_blob_with_sponsor() { + setup_logs(); + let rt = construct_and_verify(); + + let token_credit_rate = BigInt::from(1000000000000000000u64); + + // Credit sponsor + let sponsor_id_addr = Address::new_id(110); + let sponsor_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let sponsor_f4_eth_addr = Address::new_delegated(10, &sponsor_eth_addr.0).unwrap(); + rt.set_delegated_address(sponsor_id_addr.id().unwrap(), sponsor_f4_eth_addr); + + // Credit spender + let spender_id_addr = Address::new_id(111); + let spender_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000001" + )); + let spender_f4_eth_addr = Address::new_delegated(10, &spender_eth_addr.0).unwrap(); + rt.set_delegated_address(spender_id_addr.id().unwrap(), spender_f4_eth_addr); + rt.set_address_actor_type(spender_id_addr, *ETHACCOUNT_ACTOR_CODE_ID); + + // Sponsor buys credit + let tokens = 1; + let received = TokenAmount::from_whole(tokens); + let expected_credits = + Credit::from_atto(1000000000000000000u64 * tokens * &token_credit_rate); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, sponsor_id_addr); + rt.set_received(received); + rt.expect_validate_caller_any(); + let fund_params = BuyCreditParams(sponsor_f4_eth_addr); + expect_get_config(&rt); + expect_emitted_purchase_event(&rt, &fund_params, expected_credits); + let response = rt.call::( + Method::BuyCredit as u64, + IpldBlock::serialize_cbor(&fund_params).unwrap(), + ); + assert!(response.is_ok()); + rt.verify(); + + // Sponsors approve credit + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, sponsor_id_addr); + rt.set_origin(sponsor_id_addr); + rt.expect_validate_caller_any(); + expect_get_config(&rt); + let approve_params = ApproveCreditParams { + to: spender_id_addr, + caller_allowlist: None, + credit_limit: None, + gas_fee_limit: None, + ttl: None, + }; + expect_emitted_approve_event( + &rt, + sponsor_f4_eth_addr, + spender_f4_eth_addr, + approve_params.credit_limit.clone(), + approve_params.gas_fee_limit.clone(), + 0, + ); + let response = rt.call::( + Method::ApproveCredit as u64, + IpldBlock::serialize_cbor(&approve_params).unwrap(), + ); + assert!(response.is_ok()); + rt.verify(); + + // Try sending zero + rt.set_origin(spender_id_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, spender_id_addr); + rt.expect_validate_caller_any(); + rt.set_received(TokenAmount::zero()); + let hash = new_hash(1024); + let add_params = AddBlobParams { + from: spender_id_addr, + sponsor: Some(sponsor_id_addr), + hash: hash.0, + metadata_hash: new_hash(1024).0, + source: new_pk(), + id: SubscriptionId::default(), + size: hash.1, + ttl: Some(3600), + }; + // TODO: Re-enable when ADM bucket actor is available + // expect_retrieve_bucket_code_cid(&rt, *ETHACCOUNT_ACTOR_CODE_ID); + expect_get_config(&rt); + expect_emitted_add_event(&rt, 0, &add_params, sponsor_f4_eth_addr, add_params.size); + let response = rt.call::( + Method::AddBlob as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ); + assert!(response.is_ok()); + rt.verify(); + + // Try sending non-zero -> cannot buy for a sponsor, tokens are sent back + rt.set_origin(spender_id_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, spender_id_addr); + rt.expect_validate_caller_any(); + let received = TokenAmount::from_whole(1); + rt.set_received(received.clone()); + rt.set_balance(received.clone()); + let hash = new_hash(1024); + let add_params = AddBlobParams { + from: spender_id_addr, + sponsor: Some(sponsor_id_addr), + hash: hash.0, + metadata_hash: new_hash(1024).0, + source: new_pk(), + id: SubscriptionId::default(), + size: hash.1, + ttl: Some(3600), + }; + // TODO: Re-enable when ADM bucket actor is available + // expect_retrieve_bucket_code_cid(&rt, *ETHACCOUNT_ACTOR_CODE_ID); + expect_get_config(&rt); + expect_emitted_add_event(&rt, 0, &add_params, sponsor_f4_eth_addr, add_params.size); + rt.expect_send_simple( + spender_id_addr, + METHOD_SEND, + None, + received, + None, + ExitCode::OK, + ); + let response = rt.call::( + Method::AddBlob as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ); + assert!(response.is_ok()); + rt.verify(); + } +} diff --git a/fendermint/actors/blobs/src/caller.rs b/fendermint/actors/blobs/src/caller.rs new file mode 100644 index 0000000000..f3f8eae40d --- /dev/null +++ b/fendermint/actors/blobs/src/caller.rs @@ -0,0 +1,748 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::{ + Credit, CreditAllowance, CreditApproval, GasAllowance, +}; +use fendermint_actor_recall_config_shared::RecallConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use log::debug; +use num_traits::Zero; +use recall_ipld::hamt; + +use crate::state::accounts::Account; + +/// Helper for managing blobs actor state caller. +#[allow(clippy::large_enum_variant)] +pub enum Caller<'a, BS: Blockstore> { + Default((Address, Account)), + Sponsored(Delegation<'a, &'a BS>), +} + +impl<'a, BS: Blockstore> Caller<'a, BS> { + /// Loads the caller and optional sponsor account with its delegation. + pub fn load( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + caller: Address, + sponsor: Option
, + ) -> Result { + let account = accounts.get_or_err(&caller)?; + Self::load_account(store, accounts, caller, account, sponsor) + } + + /// Loads the caller and the caller's default sponsor with its delegation. + /// If the sponsor does not exist or the caller does not have an approval from + /// the default sponsor, a default caller type is returned. + pub fn load_with_default_sponsor( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + caller: Address, + ) -> Result { + let account = accounts.get_or_err(&caller)?; + match Self::load_account( + store, + accounts, + caller, + account.clone(), + account.credit_sponsor, + ) { + Ok(caller) => Ok(caller), + Err(_) => Self::load_account(store, accounts, caller, account, None), + } + } + + /// Loads the caller and optional sponsor account with its delegation. + /// The caller account will be created if one does not exist. + pub fn load_or_create( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + max_ttl: ChainEpoch, + ) -> Result { + let account = + accounts.get_or_create(&caller, || Account::new(store, current_epoch, max_ttl))?; + Self::load_account(store, accounts, caller, account, sponsor) + } + + /// Loads the caller and optional sponsor account with its delegation. + pub fn load_account( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + caller: Address, + caller_account: Account, + sponsor: Option
, + ) -> Result { + let sponsor = sponsor.unwrap_or(caller); + if sponsor != caller { + let delegation = Delegation::load(store, accounts, sponsor, caller, caller_account)?; + Ok(Self::Sponsored(delegation)) + } else { + Ok(Self::Default((caller, caller_account))) + } + } + + /// Returns the caller address. + #[allow(dead_code)] + pub fn address(&self) -> Address { + match self { + Self::Default((address, _)) => *address, + Self::Sponsored(delegation) => delegation.to, + } + } + + /// Returns the subscriber address. + /// The subscriber is the account responsible for credit and gas fees. + /// The subscriber is the caller or the sponsor if one exists. + pub fn subscriber_address(&self) -> Address { + match self { + Self::Default((address, _)) => *address, + Self::Sponsored(delegation) => delegation.from, + } + } + + /// Returns the delegate address. + /// The delegate only exists if there's a sponsor. + /// If present, the delegate address will be the caller address. + pub fn delegate_address(&self) -> Option
{ + match self { + Self::Default(_) => None, + Self::Sponsored(delegation) => Some(delegation.to), + } + } + + /// Returns the underlying delegate approval. + /// The delegate only exists if there's a sponsor. + pub fn delegate_approval(&self) -> Option<&CreditApproval> { + match self { + Self::Default(_) => None, + Self::Sponsored(delegation) => Some(&delegation.approval_to), + } + } + + /// Returns the subscriber account. + /// The subscriber is the account responsible for credit and gas fees. + /// The subscriber is the caller or the sponsor if one exists. + pub fn subscriber(&self) -> &Account { + match self { + Self::Default((_, account)) => account, + Self::Sponsored(delegation) => &delegation.from_account, + } + } + + /// Returns the subscriber account as a mutable reference. + /// The subscriber is the account responsible for credit and gas fees. + /// The subscriber is the caller or the sponsor if one exists. + #[allow(dead_code)] + pub fn subscriber_mut(&mut self) -> &mut Account { + match self { + Self::Default((_, account)) => account, + Self::Sponsored(delegation) => &mut delegation.from_account, + } + } + + /// Returns whether the caller is a delegate. + pub fn is_delegate(&self) -> bool { + matches!(self, Self::Sponsored(_)) + } + + /// Sets the default sponsor for the caller or the delegate. + pub fn set_default_sponsor(&mut self, sponsor: Option
) { + match self { + Self::Default((_, account)) => account.credit_sponsor = sponsor, + Self::Sponsored(delegation) => { + delegation.to_account.credit_sponsor = sponsor; + } + } + } + + /// Adds credit and gas allowances to the subscriber. + pub fn add_allowances(&mut self, credit: &Credit, value: &TokenAmount) { + match self { + Self::Default((_, account)) => { + account.credit_free += credit; + account.gas_allowance += value; + } + Self::Sponsored(delegation) => { + delegation.from_account.credit_free += credit; + delegation.from_account.gas_allowance += value; + } + } + + debug!("added {} credits to {}", credit, self.subscriber_address()); + debug!( + "added {} gas fee allowance to {}", + value, + self.subscriber_address() + ); + } + + /// Returns the credit allowance for the subscriber. + #[allow(dead_code)] + pub fn credit_allowance(&self, current_epoch: ChainEpoch) -> CreditAllowance { + match self { + Self::Default((_, account)) => CreditAllowance { + amount: account.credit_free.clone(), + ..Default::default() + }, + Self::Sponsored(delegation) => delegation.credit_allowance(current_epoch), + } + } + + /// Returns the gas allowance for the subscriber. + pub fn gas_allowance(&self, current_epoch: ChainEpoch) -> GasAllowance { + match self { + Self::Default((_, account)) => GasAllowance { + amount: account.gas_allowance.clone(), + ..Default::default() + }, + Self::Sponsored(delegation) => delegation.gas_allowance(current_epoch), + } + } + + /// Commits new capacity for the subscriber. + pub fn commit_capacity( + &mut self, + size: u64, + cost: &Credit, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + // Check the subscriber's free credit + if &self.subscriber().credit_free < cost { + return Err(ActorError::insufficient_funds(format!( + "account {} has insufficient credit (available: {}; required: {})", + self.subscriber_address(), + &self.subscriber().credit_free, + cost + ))); + } + match self { + Self::Default((_, account)) => { + account.capacity_used = account.capacity_used.saturating_add(size); + account.credit_free -= cost; + account.credit_committed += cost; + } + Self::Sponsored(delegation) => { + delegation.use_credit_allowance(cost, current_epoch)?; + delegation.from_account.capacity_used = + delegation.from_account.capacity_used.saturating_add(size); + delegation.from_account.credit_free -= cost; + delegation.from_account.credit_committed += cost; + } + } + + debug!("used {} bytes from {}", size, self.subscriber_address()); + debug!( + "committed {} credits from {}", + cost, + self.subscriber_address() + ); + + Ok(()) + } + + /// Releases capacity for the subscriber. + pub fn release_capacity(&mut self, size: u64, cost: &Credit) { + match self { + Self::Default((_, account)) => { + account.capacity_used = account.capacity_used.saturating_sub(size); + account.credit_free += cost; + account.credit_committed -= cost; + } + Self::Sponsored(delegation) => { + delegation.return_credit_allowance(cost); + delegation.from_account.capacity_used = + delegation.from_account.capacity_used.saturating_sub(size); + delegation.from_account.credit_free += cost; + delegation.from_account.credit_committed -= cost; + } + } + + debug!("released {} bytes to {}", size, self.subscriber_address()); + debug!("released {} credits to {}", cost, self.subscriber_address()); + } + + /// Debit credits from the subscriber. + pub fn debit_credit(&mut self, amount: &Credit, current_epoch: ChainEpoch) { + match self { + Self::Default((_, account)) => { + account.credit_committed -= amount; + account.last_debit_epoch = current_epoch; + } + Self::Sponsored(delegation) => { + delegation.from_account.credit_committed -= amount; + delegation.from_account.last_debit_epoch = current_epoch; + } + } + + debug!( + "debited {} credits from {}", + amount, + self.subscriber_address() + ); + } + + /// Refund credit to the subscriber. + pub fn refund_credit(&mut self, amount: &Credit, correction: &Credit) { + match self { + Self::Default((_, account)) => { + account.credit_free += amount - correction; + account.credit_committed += correction; + } + Self::Sponsored(delegation) => { + delegation.from_account.credit_free += amount - correction; + delegation.from_account.credit_committed += correction; + } + } + + debug!( + "refunded {} credits to {}", + amount - correction, + self.subscriber_address() + ); + } + + /// Returns committed credits to the subscriber. + pub fn return_committed_credit(&mut self, amount: &Credit) { + match self { + Self::Default((_, account)) => { + account.credit_committed += amount; + } + Self::Sponsored(delegation) => { + delegation.from_account.credit_committed += amount; + } + } + + debug!( + "returned {} committed credits to {}", + amount, + self.subscriber_address() + ); + } + + /// Updates gas allowance for the subscriber. + pub fn update_gas_allowance( + &mut self, + add_amount: &TokenAmount, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + match self { + Self::Default((_, account)) => { + account.gas_allowance += add_amount; + } + Self::Sponsored(delegation) => { + if add_amount.is_positive() { + delegation.return_gas_allowance(add_amount); + } else if add_amount.is_negative() { + delegation.use_gas_allowance(&-add_amount, current_epoch)?; + } + delegation.from_account.gas_allowance += add_amount; + } + } + + if add_amount.is_positive() { + debug!( + "refunded {} atto to {}", + add_amount.atto(), + self.subscriber_address() + ); + } else { + debug!( + "debited {} atto from {}", + -add_amount.atto(), + self.subscriber_address() + ); + } + Ok(()) + } + + /// Validates the delegate expiration. + pub fn validate_delegate_expiration( + &self, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + match self { + Self::Default(_) => Ok(()), + Self::Sponsored(delegation) => delegation.validate_expiration(current_epoch), + } + } + + /// Validates a blob TTL for the subscriber. + pub fn validate_ttl_usage( + &self, + config: &RecallConfig, + ttl: Option, + ) -> Result { + let ttl = ttl.unwrap_or(config.blob_default_ttl); + if ttl < config.blob_min_ttl { + return Err(ActorError::illegal_argument(format!( + "minimum blob TTL is {}", + config.blob_min_ttl + ))); + } else if ttl > self.subscriber().max_ttl { + return Err(ActorError::forbidden(format!( + "attempt to add a blob with TTL ({}) that exceeds account's max allowed TTL ({})", + ttl, + self.subscriber().max_ttl, + ))); + } + Ok(ttl) + } + + /// Saves state to accounts. + pub fn save( + &mut self, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + match self { + Self::Default((address, account)) => { + accounts.set(address, account.clone())?; + Ok(()) + } + Self::Sponsored(delegation) => delegation.save(accounts), + } + } + + /// Cancels the optional delegation and converts to the default caller type. + pub fn cancel_delegation( + &mut self, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + match self { + Self::Default(_) => Ok(()), + Self::Sponsored(delegation) => { + delegation.cancel(accounts)?; + // Delegation is now invalid, convert to the default caller type + *self = Self::Default((delegation.to, delegation.to_account.clone())); + Ok(()) + } + } + } +} + +/// Helper for handling credit approvals. +pub struct Delegation<'a, BS: Blockstore> { + /// The issuer address. + from: Address, + /// The issuer account. + from_account: Account, + /// The recipient address. + to: Address, + /// The recipient account. + to_account: Account, + /// Approvals from issuer to recipient. + approvals_from: hamt::map::Hamt<'a, BS, Address, CreditApproval>, + /// Approvals to recipient from issuer. + approvals_to: hamt::map::Hamt<'a, BS, Address, CreditApproval>, + /// Approval from issuer to recipient. + approval_from: CreditApproval, + /// Approval to recipient from issuer. + approval_to: CreditApproval, +} + +/// Options for creating a new delegation. +#[derive(Debug, Default)] +pub struct DelegationOptions { + /// Optional credit limit. + pub credit_limit: Option, + /// Optional gas fee limit. + pub gas_fee_limit: Option, + /// Optional time-to-live (TTL). + pub ttl: Option, +} + +impl<'a, BS: Blockstore> Delegation<'a, &'a BS> { + /// Loads an existing delegation. + pub fn load( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + from: Address, + to: Address, + to_account: Account, + ) -> Result { + if from == to { + return Err(ActorError::illegal_argument( + "'from' and 'to' addresses must be different".into(), + )); + } + + let from_account = accounts.get_or_err(&from)?; + let approvals_to = from_account.approvals_to.hamt(store)?; + let approval_to = approvals_to.get(&to)?.ok_or(ActorError::forbidden(format!( + "approval to {} from {} not found", + to, from + )))?; + let approvals_from = to_account.approvals_from.hamt(store)?; + let approval_from = approvals_from + .get(&from)? + .ok_or(ActorError::forbidden(format!( + "approval from {} to {} not found", + from, to + )))?; + + Ok(Self { + from, + from_account, + to, + to_account, + approvals_from, + approvals_to, + approval_from, + approval_to, + }) + } + + /// Creates a new delegation from one account to another. + pub fn update_or_create( + store: &'a BS, + config: &RecallConfig, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + from: Address, + to: Address, + options: DelegationOptions, + current_epoch: ChainEpoch, + ) -> Result { + if let Some(ttl) = options.ttl { + if ttl < config.blob_min_ttl { + return Err(ActorError::illegal_argument(format!( + "minimum approval TTL is {}", + config.blob_min_ttl + ))); + } + } + + let expiry = options.ttl.map(|t| i64::saturating_add(t, current_epoch)); + let approval = CreditApproval { + credit_limit: options.credit_limit.clone(), + gas_allowance_limit: options.gas_fee_limit.clone(), + expiry, + credit_used: Credit::zero(), + gas_allowance_used: TokenAmount::zero(), + }; + + // Get or create accounts + let from_account = accounts.get_or_create(&from, || { + Account::new(store, current_epoch, config.blob_default_ttl) + })?; + let to_account = accounts.get_or_create(&to, || { + Account::new(store, current_epoch, config.blob_default_ttl) + })?; + + // Get or create approvals + let approvals_to = from_account.approvals_to.hamt(store)?; + let approvals_from = to_account.approvals_from.hamt(store)?; + let mut approval_to = approvals_to.get_or_create(&to, || Ok(approval.clone()))?; + let mut approval_from = approvals_from.get_or_create(&from, || Ok(approval))?; + if approval_from != approval_to { + return Err(ActorError::illegal_state(format!( + "'from' account ({}) approval does not match 'to' account ({}) approval", + from, to, + ))); + } + + // Validate approval changes (check one of them since they are equal) + if let Some(limit) = options.credit_limit.as_ref() { + if &approval_to.credit_used > limit { + return Err(ActorError::illegal_argument(format!( + "limit cannot be less than amount of already used credits ({})", + approval_to.credit_used + ))); + } + } + if let Some(limit) = options.gas_fee_limit.as_ref() { + if &approval_to.gas_allowance_used > limit { + return Err(ActorError::illegal_argument(format!( + "limit cannot be less than amount of already used gas fees ({})", + approval_to.gas_allowance_used + ))); + } + } + + approval_from.credit_limit = options.credit_limit.clone(); + approval_from.gas_allowance_limit = options.gas_fee_limit.clone(); + approval_from.expiry = expiry; + approval_to.credit_limit = options.credit_limit; + approval_to.gas_allowance_limit = options.gas_fee_limit; + approval_to.expiry = expiry; + + debug!( + "approval created from {} to {} (credit limit: {:?}; gas fee limit: {:?}, expiry: {:?}", + from, + to, + approval_from.credit_limit, + approval_from.gas_allowance_limit, + approval_from.expiry + ); + + Ok(Self { + to, + to_account, + from, + from_account, + approvals_from, + approvals_to, + approval_from, + approval_to, + }) + } + + /// Return credit allowance to the delegation. + pub fn return_credit_allowance(&mut self, amount: &Credit) { + self.approval_from.credit_used -= amount; + self.approval_to.credit_used -= amount; + } + + /// Use credit allowance from the delegation. + pub fn use_credit_allowance( + &mut self, + amount: &Credit, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + self.validate_expiration(current_epoch)?; + self.validate_credit_usage(amount)?; + self.approval_from.credit_used += amount; + self.approval_to.credit_used += amount; + Ok(()) + } + + /// Return gas allowance to the delegation. + pub fn return_gas_allowance(&mut self, amount: &TokenAmount) { + self.approval_from.gas_allowance_used -= amount; + self.approval_to.gas_allowance_used -= amount; + } + + /// Use gas allowance from the delegation. + pub fn use_gas_allowance( + &mut self, + amount: &TokenAmount, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + self.validate_expiration(current_epoch)?; + self.validate_gas_usage(amount)?; + self.approval_from.gas_allowance_used += amount; + self.approval_to.gas_allowance_used += amount; + Ok(()) + } + + /// Saves state to accounts. + pub fn save( + &mut self, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + // Save the "from" account's "to" approval + self.from_account.approvals_to.save_tracked( + self.approvals_to + .set_and_flush_tracked(&self.to, self.approval_to.clone())?, + ); + // Save the "to" account's "from" approval + self.to_account.approvals_from.save_tracked( + self.approvals_from + .set_and_flush_tracked(&self.from, self.approval_from.clone())?, + ); + // Save the "from" account + accounts.set(&self.from, self.from_account.clone())?; + // Save the "to" account + accounts.set(&self.to, self.to_account.clone())?; + Ok(()) + } + + /// Cancels the underlying approval and saves state to accounts. + pub fn cancel( + &mut self, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + // Remove the "from" account's "to" approval + self.from_account + .approvals_to + .save_tracked(self.approvals_to.delete_and_flush_tracked(&self.to)?.0); + // Remove the "to" account's "from" approval + self.to_account + .approvals_from + .save_tracked(self.approvals_from.delete_and_flush_tracked(&self.from)?.0); + // Save the "from" account + accounts.set(&self.from, self.from_account.clone())?; + // Save the "to" account + accounts.set(&self.to, self.to_account.clone())?; + + debug!("approval canceled from {} to {}", self.from, self.to); + Ok(()) + } + + /// Returns the underlying approval. + pub fn approval(&self) -> &CreditApproval { + &self.approval_to + } + + /// Returns the credit allowance for the subscriber. + #[allow(dead_code)] + pub fn credit_allowance(&self, current_epoch: ChainEpoch) -> CreditAllowance { + let mut allowance = CreditAllowance { + amount: self.to_account.credit_free.clone(), + sponsor: Some(self.from), + sponsored_amount: Credit::zero(), + }; + if self.validate_expiration(current_epoch).is_err() { + return allowance; + } + let approval_used = self.approval_to.credit_used.clone(); + let approval_allowance = self.from_account.credit_free.clone(); + let approval_allowance = self + .approval_to + .credit_limit + .clone() + .map_or(approval_allowance.clone(), |limit| { + (limit - approval_used).min(approval_allowance) + }); + allowance.sponsored_amount = approval_allowance; + allowance + } + + /// Returns the gas allowance for the subscriber. + pub fn gas_allowance(&self, current_epoch: ChainEpoch) -> GasAllowance { + let mut allowance = GasAllowance { + amount: self.to_account.gas_allowance.clone(), + sponsor: Some(self.from), + sponsored_amount: TokenAmount::zero(), + }; + if self.validate_expiration(current_epoch).is_err() { + return allowance; + } + let approval_used = self.approval_to.gas_allowance_used.clone(); + let approval_allowance = self.from_account.gas_allowance.clone(); + let approval_allowance = self + .approval_to + .gas_allowance_limit + .clone() + .map_or(approval_allowance.clone(), |limit| { + (limit - approval_used).min(approval_allowance) + }); + allowance.sponsored_amount = approval_allowance; + allowance + } + + /// Verifies that the delegation's expiry is valid for the current epoch. + pub fn validate_expiration(&self, current_epoch: ChainEpoch) -> Result<(), ActorError> { + self.approval_from.validate_expiration(current_epoch)?; + self.approval_to.validate_expiration(current_epoch)?; + Ok(()) + } + + /// Validates whether the delegation can use the amount of credit. + pub fn validate_credit_usage(&self, amount: &Credit) -> Result<(), ActorError> { + self.approval_from.validate_credit_usage(amount)?; + self.approval_to.validate_credit_usage(amount)?; + Ok(()) + } + + /// Validates whether the delegation can use the amount of gas. + pub fn validate_gas_usage(&self, amount: &TokenAmount) -> Result<(), ActorError> { + self.approval_from.validate_gas_usage(amount)?; + self.approval_to.validate_gas_usage(amount)?; + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/lib.rs b/fendermint/actors/blobs/src/lib.rs new file mode 100644 index 0000000000..e7889e0e19 --- /dev/null +++ b/fendermint/actors/blobs/src/lib.rs @@ -0,0 +1,13 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod actor; +mod caller; +mod shared; +mod sol_facade; +mod state; +#[cfg(test)] +mod testing; + +pub use shared::*; diff --git a/fendermint/actors/blobs/src/shared.rs b/fendermint/actors/blobs/src/shared.rs new file mode 100644 index 0000000000..d130f2a553 --- /dev/null +++ b/fendermint/actors/blobs/src/shared.rs @@ -0,0 +1,8 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +pub use crate::state::State; + +/// The name of the blob actor. +pub const BLOBS_ACTOR_NAME: &str = "blobs"; diff --git a/fendermint/actors/blobs/src/sol_facade/blobs.rs b/fendermint/actors/blobs/src/sol_facade/blobs.rs new file mode 100644 index 0000000000..451c99fd28 --- /dev/null +++ b/fendermint/actors/blobs/src/sol_facade/blobs.rs @@ -0,0 +1,305 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + blobs::{ + AddBlobParams, Blob, BlobStatus, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, + TrimBlobExpiriesParams, + }, + bytes::B256, + GetStatsReturn, +}; +use fil_actors_runtime::{actor_error, runtime::Runtime, ActorError}; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use num_traits::Zero; +use recall_actor_sdk::evm::TryIntoEVMEvent; +pub use recall_sol_facade::blobs::Calls; +use recall_sol_facade::{ + blobs as sol, + primitives::U256, + types::{BigUintWrapper, SolCall, SolInterface, H160}, +}; + +use crate::sol_facade::{AbiCall, AbiCallRuntime, AbiEncodeError}; + +// ----- Events ----- // + +pub struct BlobAdded<'a> { + pub subscriber: Address, + pub hash: &'a B256, + pub size: u64, + pub expiry: ChainEpoch, + pub bytes_used: u64, +} + +impl TryIntoEVMEvent for BlobAdded<'_> { + type Target = sol::Events; + + fn try_into_evm_event(self) -> Result { + let subscriber: H160 = self.subscriber.try_into()?; + Ok(sol::Events::BlobAdded(sol::BlobAdded { + subscriber: subscriber.into(), + hash: self.hash.0.into(), + size: U256::from(self.size), + expiry: U256::from(self.expiry), + bytesUsed: U256::from(self.bytes_used), + })) + } +} + +pub struct BlobPending<'a> { + pub subscriber: Address, + pub hash: &'a B256, + pub source: &'a B256, +} +impl TryIntoEVMEvent for BlobPending<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let subscriber: H160 = self.subscriber.try_into()?; + Ok(sol::Events::BlobPending(sol::BlobPending { + subscriber: subscriber.into(), + hash: self.hash.0.into(), + sourceId: self.source.0.into(), + })) + } +} + +pub struct BlobFinalized<'a> { + pub subscriber: Address, + pub hash: &'a B256, + pub resolved: bool, +} +impl TryIntoEVMEvent for BlobFinalized<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let subscriber: H160 = self.subscriber.try_into()?; + Ok(sol::Events::BlobFinalized(sol::BlobFinalized { + subscriber: subscriber.into(), + hash: self.hash.0.into(), + resolved: self.resolved, + })) + } +} + +pub struct BlobDeleted<'a> { + pub subscriber: Address, + pub hash: &'a B256, + pub size: u64, + pub bytes_released: u64, +} +impl TryIntoEVMEvent for BlobDeleted<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let subscriber: H160 = self.subscriber.try_into()?; + Ok(sol::Events::BlobDeleted(sol::BlobDeleted { + subscriber: subscriber.into(), + hash: self.hash.0.into(), + size: U256::from(self.size), + bytesReleased: U256::from(self.bytes_released), + })) + } +} + +// ----- Calls ----- // + +pub fn can_handle(input_data: &recall_actor_sdk::evm::InputData) -> bool { + Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &recall_actor_sdk::evm::InputData) -> Result { + Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +fn blob_status_as_solidity_enum(blob_status: BlobStatus) -> u8 { + match blob_status { + BlobStatus::Added => 0, + BlobStatus::Pending => 1, + BlobStatus::Resolved => 2, + BlobStatus::Failed => 3, + } +} + +impl AbiCallRuntime for sol::addBlobCall { + type Params = Result; + type Returns = (); + type Output = Vec; + fn params(&self, rt: &impl Runtime) -> Self::Params { + let sponsor: Option
= H160::from(self.sponsor).as_option().map(|a| a.into()); + let source = B256(self.source.into()); + let hash = B256(self.blobHash.into()); + let metadata_hash = B256(self.metadataHash.into()); + let subscription_id = self.subscriptionId.clone().try_into()?; + let size = self.size; + let ttl = if self.ttl.is_zero() { + None + } else { + Some(self.ttl as ChainEpoch) + }; + let from = rt.message().caller(); + Ok(AddBlobParams { + sponsor, + source, + hash, + metadata_hash, + id: subscription_id, + size, + ttl, + from, + }) + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCallRuntime for sol::deleteBlobCall { + type Params = Result; + type Returns = (); + type Output = Vec; + fn params(&self, rt: &impl Runtime) -> Self::Params { + let subscriber = H160::from(self.subscriber).as_option().map(|a| a.into()); + let hash = B256(self.blobHash.into()); + let subscription_id = self.subscriptionId.clone().try_into()?; + let from = rt.message().caller(); + Ok(DeleteBlobParams { + sponsor: subscriber, + hash, + id: subscription_id, + from, + }) + } + fn returns(&self, _: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&()) + } +} + +impl AbiCall for sol::getBlobCall { + type Params = Result; + type Returns = Option; + type Output = Result, AbiEncodeError>; + fn params(&self) -> Self::Params { + let blob_hash = B256(self.blobHash.into()); + Ok(GetBlobParams(blob_hash)) + } + fn returns(&self, blob: Self::Returns) -> Self::Output { + let blob = if let Some(blob) = blob { + sol::Blob { + size: blob.size, + metadataHash: blob.metadata_hash.0.into(), + status: blob_status_as_solidity_enum(blob.status), + subscriptions: blob + .subscribers + .iter() + .map(|(subscription_id, expiry)| sol::Subscription { + expiry: *expiry as u64, + subscriptionId: subscription_id.clone().into(), + }) + .collect(), + } + } else { + sol::Blob { + size: 0, + metadataHash: [0u8; 32].into(), + status: blob_status_as_solidity_enum(BlobStatus::Failed), + subscriptions: Vec::default(), + } + }; + Ok(Self::abi_encode_returns(&(blob,))) + } +} + +impl AbiCall for sol::getStatsCall { + type Params = (); + type Returns = GetStatsReturn; + type Output = Vec; + fn params(&self) -> Self::Params {} + fn returns(&self, stats: Self::Returns) -> Self::Output { + let subnet_stats = sol::SubnetStats { + balance: BigUintWrapper::from(stats.balance).into(), + capacityFree: stats.capacity_free, + capacityUsed: stats.capacity_used, + creditSold: BigUintWrapper::from(stats.credit_sold).into(), + creditCommitted: BigUintWrapper::from(stats.credit_committed).into(), + creditDebited: BigUintWrapper::from(stats.credit_debited).into(), + tokenCreditRate: BigUintWrapper(stats.token_credit_rate.rate().clone()).into(), + numAccounts: stats.num_accounts, + numBlobs: stats.num_blobs, + numAdded: stats.num_added, + bytesAdded: stats.bytes_added, + numResolving: stats.num_resolving, + bytesResolving: stats.bytes_resolving, + }; + Self::abi_encode_returns(&(subnet_stats,)) + } +} + +impl AbiCallRuntime for sol::overwriteBlobCall { + type Params = Result; + type Returns = (); + type Output = Vec; + fn params(&self, rt: &impl Runtime) -> Self::Params { + let old_hash = B256(self.oldHash.into()); + let sponsor = H160::from(self.sponsor).as_option().map(|a| a.into()); + let source = B256(self.source.into()); + let hash = B256(self.blobHash.into()); + let metadata_hash = B256(self.metadataHash.into()); + let subscription_id = self.subscriptionId.clone().try_into()?; + let size = self.size; + let ttl = if self.ttl.is_zero() { + None + } else { + Some(self.ttl as ChainEpoch) + }; + let from = rt.message().caller(); + Ok(OverwriteBlobParams { + old_hash, + add: AddBlobParams { + sponsor, + source, + hash, + metadata_hash, + id: subscription_id, + size, + ttl, + from, + }, + }) + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCall for sol::trimBlobExpiriesCall { + type Params = TrimBlobExpiriesParams; + type Returns = (u32, Option); + type Output = Vec; + + fn params(&self) -> Self::Params { + let limit = self.limit; + let limit = if limit.is_zero() { None } else { Some(limit) }; + let hash: [u8; 32] = self.startingHash.into(); + let hash = if hash == [0; 32] { + None + } else { + Some(B256(hash)) + }; + TrimBlobExpiriesParams { + subscriber: H160::from(self.subscriber).into(), + limit, + starting_hash: hash, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let next_key = returns.1; + let next_key = next_key.unwrap_or_default(); + let cursor = sol::TrimBlobExpiries { + processed: returns.0, + nextKey: next_key.0.into(), + }; + Self::abi_encode_returns(&(cursor,)) + } +} diff --git a/fendermint/actors/blobs/src/sol_facade/credit.rs b/fendermint/actors/blobs/src/sol_facade/credit.rs new file mode 100644 index 0000000000..c59e83bbb5 --- /dev/null +++ b/fendermint/actors/blobs/src/sol_facade/credit.rs @@ -0,0 +1,442 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::{HashMap, HashSet}; + +use anyhow::Error; +use fendermint_actor_blobs_shared::{ + accounts::{Account, AccountStatus, GetAccountParams, SetAccountStatusParams}, + credit::{ + ApproveCreditParams, BuyCreditParams, Credit, CreditApproval, GetCreditApprovalParams, + RevokeCreditParams, SetSponsorParams, + }, +}; +use fil_actors_runtime::{actor_error, runtime::Runtime, ActorError}; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use recall_actor_sdk::{evm::TryIntoEVMEvent, util::token_to_biguint}; +pub use recall_sol_facade::credit::Calls; +use recall_sol_facade::{ + credit as sol, + primitives::U256, + types::{BigUintWrapper, SolCall, SolInterface, H160}, +}; + +use crate::sol_facade::{AbiCall, AbiCallRuntime, AbiEncodeError}; + +pub struct CreditPurchased { + from: Address, + amount: TokenAmount, +} +impl CreditPurchased { + pub fn new(from: Address, amount: TokenAmount) -> Self { + Self { from, amount } + } +} +impl TryIntoEVMEvent for CreditPurchased { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let from: H160 = self.from.try_into()?; + let amount = token_to_biguint(Some(self.amount)); + Ok(sol::Events::CreditPurchased(sol::CreditPurchased { + from: from.into(), + amount: BigUintWrapper(amount).into(), + })) + } +} + +pub struct CreditApproved { + pub from: Address, + pub to: Address, + pub credit_limit: Option, + pub gas_fee_limit: Option, + pub expiry: Option, +} +impl TryIntoEVMEvent for CreditApproved { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let from: H160 = self.from.try_into()?; + let to: H160 = self.to.try_into()?; + let credit_limit = token_to_biguint(self.credit_limit); + let gas_fee_limit = token_to_biguint(self.gas_fee_limit); + Ok(sol::Events::CreditApproved(sol::CreditApproved { + from: from.into(), + to: to.into(), + creditLimit: BigUintWrapper(credit_limit).into(), + gasFeeLimit: BigUintWrapper(gas_fee_limit).into(), + expiry: U256::from(self.expiry.unwrap_or_default()), + })) + } +} + +pub struct CreditRevoked { + pub from: Address, + pub to: Address, +} +impl CreditRevoked { + pub fn new(from: Address, to: Address) -> Self { + Self { from, to } + } +} +impl TryIntoEVMEvent for CreditRevoked { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let from: H160 = self.from.try_into()?; + let to: H160 = self.to.try_into()?; + Ok(sol::Events::CreditRevoked(sol::CreditRevoked { + from: from.into(), + to: to.into(), + })) + } +} + +pub struct CreditDebited { + pub amount: TokenAmount, + pub num_accounts: u64, + pub more_accounts: bool, +} +impl TryIntoEVMEvent for CreditDebited { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let amount = token_to_biguint(Some(self.amount)); + Ok(sol::Events::CreditDebited(sol::CreditDebited { + amount: BigUintWrapper(amount).into(), + numAccounts: U256::from(self.num_accounts), + moreAccounts: self.more_accounts, + })) + } +} + +// ----- Calls ----- // + +pub fn can_handle(input_data: &recall_actor_sdk::evm::InputData) -> bool { + Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &recall_actor_sdk::evm::InputData) -> Result { + Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +/// function buyCredit() external payable; +impl AbiCallRuntime for sol::buyCredit_0Call { + type Params = BuyCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self, rt: &impl Runtime) -> Self::Params { + let recipient = rt.message().caller(); + BuyCreditParams(recipient) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function buyCredit(address recipient) external payable; +impl AbiCall for sol::buyCredit_1Call { + type Params = BuyCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let recipient: Address = H160::from(self.recipient).into(); + BuyCreditParams(recipient) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function approveCredit(address to) external; +impl AbiCall for sol::approveCredit_0Call { + type Params = ApproveCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + ApproveCreditParams { + to, + caller_allowlist: None, + credit_limit: None, + gas_fee_limit: None, + ttl: None, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function approveCredit(address to, address[] memory caller, uint256 creditLimit, uint256 gasFeeLimit, uint64 ttl) external; +impl AbiCall for sol::approveCredit_1Call { + type Params = ApproveCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + let caller_allowlist: HashSet
= HashSet::from_iter( + self.caller + .iter() + .map(|sol_address| H160::from(*sol_address).into()), + ); + let credit_limit: Credit = BigUintWrapper::from(self.creditLimit).into(); + let gas_fee_limit: TokenAmount = BigUintWrapper::from(self.gasFeeLimit).into(); + let ttl = self.ttl; + ApproveCreditParams { + to, + caller_allowlist: Some(caller_allowlist), + credit_limit: Some(credit_limit), + gas_fee_limit: Some(gas_fee_limit), + ttl: Some(ttl as ChainEpoch), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function approveCredit(address to, address[] memory caller) external; +impl AbiCall for sol::approveCredit_2Call { + type Params = ApproveCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + let caller_allowlist: HashSet
= HashSet::from_iter( + self.caller + .iter() + .map(|sol_address| H160::from(*sol_address).into()), + ); + ApproveCreditParams { + to, + caller_allowlist: Some(caller_allowlist), + credit_limit: None, + gas_fee_limit: None, + ttl: None, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function revokeCredit(address to, address caller) external; +impl AbiCall for sol::revokeCredit_0Call { + type Params = RevokeCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + let caller: Address = H160::from(self.caller).into(); + RevokeCreditParams { + to, + for_caller: Some(caller), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function revokeCredit(address to) external; +impl AbiCall for sol::revokeCredit_1Call { + type Params = RevokeCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + RevokeCreditParams { + to, + for_caller: None, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function setAccountSponsor(address from, address sponsor) external; +impl AbiCall for sol::setAccountSponsorCall { + type Params = SetSponsorParams; // FIXME SU Needs runtime for "from" + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let sponsor = H160::from(self.sponsor); + let sponsor: Option
= if sponsor.is_null() { + None + } else { + Some(sponsor.into()) + }; + SetSponsorParams(sponsor) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +fn convert_approvals( + approvals: HashMap, +) -> Result, Error> { + approvals + .iter() + .map(|(address, credit_approval)| { + let approval = sol::Approval { + addr: H160::try_from(*address)?.into(), + approval: sol::CreditApproval { + creditLimit: credit_approval + .credit_limit + .clone() + .map(BigUintWrapper::from) + .unwrap_or_default() + .into(), + gasFeeLimit: credit_approval + .gas_allowance_limit + .clone() + .map(BigUintWrapper::from) + .unwrap_or_default() + .into(), + expiry: credit_approval.expiry.unwrap_or_default() as u64, + creditUsed: BigUintWrapper::from(credit_approval.credit_used.clone()).into(), + gasFeeUsed: BigUintWrapper::from(credit_approval.gas_allowance_used.clone()) + .into(), + }, + }; + Ok(approval) + }) + .collect::, Error>>() +} + +/// function getAccount(address addr) external view returns (Account memory account); +impl AbiCall for sol::getAccountCall { + type Params = GetAccountParams; + type Returns = Option; + type Output = Result, AbiEncodeError>; + + fn params(&self) -> Self::Params { + let address: Address = H160::from(self.addr).into(); + GetAccountParams(address) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let sol_account = if let Some(account) = returns { + let credit_sponsor: H160 = account + .credit_sponsor + .map(H160::try_from) + .transpose()? + .unwrap_or_default(); + let approvals_from = convert_approvals(account.approvals_from)?; + let approvals_to = convert_approvals(account.approvals_to)?; + sol::Account { + capacityUsed: account.capacity_used, + creditFree: BigUintWrapper::from(account.credit_free).into(), + creditCommitted: BigUintWrapper::from(account.credit_committed).into(), + creditSponsor: credit_sponsor.into(), + lastDebitEpoch: account.last_debit_epoch as u64, + approvalsFrom: approvals_from, + approvalsTo: approvals_to, + maxTtl: account.max_ttl as u64, + gasAllowance: BigUintWrapper::from(account.gas_allowance).into(), + } + } else { + sol::Account { + capacityUsed: u64::default(), + creditFree: U256::default(), + creditCommitted: U256::default(), + creditSponsor: H160::default().into(), + lastDebitEpoch: u64::default(), + approvalsTo: Vec::default(), + approvalsFrom: Vec::default(), + maxTtl: u64::default(), + gasAllowance: U256::default(), + } + }; + Ok(Self::abi_encode_returns(&(sol_account,))) + } +} + +/// function getCreditApproval(address from, address to) external view returns (CreditApproval memory approval); +impl AbiCall for sol::getCreditApprovalCall { + type Params = GetCreditApprovalParams; + type Returns = Option; + type Output = Vec; + + fn params(&self) -> Self::Params { + let from = H160::from(self.from); + let to = H160::from(self.to); + GetCreditApprovalParams { + from: from.into(), + to: to.into(), + } + } + + fn returns(&self, value: Self::Returns) -> Self::Output { + let approval_result = if let Some(credit_approval) = value { + sol::CreditApproval { + creditLimit: credit_approval + .credit_limit + .clone() + .map(BigUintWrapper::from) + .unwrap_or_default() + .into(), + gasFeeLimit: credit_approval + .gas_allowance_limit + .clone() + .map(BigUintWrapper::from) + .unwrap_or_default() + .into(), + expiry: credit_approval.expiry.unwrap_or_default() as u64, + creditUsed: BigUintWrapper::from(credit_approval.credit_used.clone()).into(), + gasFeeUsed: BigUintWrapper::from(credit_approval.gas_allowance_used.clone()).into(), + } + } else { + sol::CreditApproval { + creditLimit: BigUintWrapper::default().into(), + gasFeeLimit: BigUintWrapper::default().into(), + expiry: u64::default(), + creditUsed: BigUintWrapper::default().into(), + gasFeeUsed: BigUintWrapper::default().into(), + } + }; + Self::abi_encode_returns(&(approval_result,)) + } +} + +/// function setAccountStatus(address subscriber, TtlStatus ttlStatus) external; +impl AbiCall for sol::setAccountStatusCall { + type Params = Result; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let subscriber = H160::from(self.subscriber); + let ttl_status = match self.ttlStatus { + 0 => AccountStatus::Default, + 1 => AccountStatus::Reduced, + 2 => AccountStatus::Extended, + _ => return Err(actor_error!(illegal_argument, "invalid account status")), + }; + Ok(SetAccountStatusParams { + subscriber: subscriber.into(), + status: ttl_status, + }) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} diff --git a/fendermint/actors/blobs/src/sol_facade/gas.rs b/fendermint/actors/blobs/src/sol_facade/gas.rs new file mode 100644 index 0000000000..137efc8b50 --- /dev/null +++ b/fendermint/actors/blobs/src/sol_facade/gas.rs @@ -0,0 +1,40 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::Error; +use fvm_shared::address::Address; +use recall_actor_sdk::evm::TryIntoEVMEvent; +use recall_sol_facade::gas as sol; +use recall_sol_facade::types::H160; + +pub struct GasSponsorSet { + sponsor: Address, +} +impl GasSponsorSet { + pub fn mew(sponsor: Address) -> Self { + Self { sponsor } + } +} +impl TryIntoEVMEvent for GasSponsorSet { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let sponsor: H160 = self.sponsor.try_into()?; + Ok(sol::Events::GasSponsorSet(sol::GasSponsorSet { + sponsor: sponsor.into(), + })) + } +} + +pub struct GasSponsorUnset {} +impl GasSponsorUnset { + pub fn new() -> Self { + Self {} + } +} +impl TryIntoEVMEvent for GasSponsorUnset { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::GasSponsorUnset(sol::GasSponsorUnset {})) + } +} diff --git a/fendermint/actors/blobs/src/sol_facade/mod.rs b/fendermint/actors/blobs/src/sol_facade/mod.rs new file mode 100644 index 0000000000..ff19938b6f --- /dev/null +++ b/fendermint/actors/blobs/src/sol_facade/mod.rs @@ -0,0 +1,11 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use recall_actor_sdk::declare_abi_call; + +declare_abi_call!(); + +pub mod blobs; +pub mod credit; +pub mod gas; diff --git a/fendermint/actors/blobs/src/state.rs b/fendermint/actors/blobs/src/state.rs new file mode 100644 index 0000000000..87f0b87508 --- /dev/null +++ b/fendermint/actors/blobs/src/state.rs @@ -0,0 +1,491 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::GetStatsReturn; +use fendermint_actor_recall_config_shared::RecallConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::econ::TokenAmount; + +pub mod accounts; +pub mod blobs; +pub mod credit; +pub mod operators; + +use accounts::Accounts; +use blobs::{Blobs, DeleteBlobStateParams}; +use credit::Credits; +use operators::Operators; + +/// The state represents all accounts and stored blobs. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// Struct containing credit-related state. + pub credits: Credits, + /// HAMT containing all accounts keyed by actor ID address. + pub accounts: Accounts, + /// HAMT containing all blobs keyed by blob hash. + pub blobs: Blobs, + /// Registry of node operators for blob storage. + pub operators: Operators, +} + +impl State { + /// Creates a new [`State`]. + pub fn new(store: &BS) -> Result { + Ok(Self { + credits: Credits::default(), + accounts: Accounts::new(store)?, + blobs: Blobs::new(store)?, + operators: Operators::new(store)?, + }) + } + + /// Returns stats about the current actor state. + pub fn get_stats(&self, config: &RecallConfig, balance: TokenAmount) -> GetStatsReturn { + GetStatsReturn { + balance, + capacity_free: self.capacity_available(config.blob_capacity), + capacity_used: self.blobs.bytes_size(), + credit_sold: self.credits.credit_sold.clone(), + credit_committed: self.credits.credit_committed.clone(), + credit_debited: self.credits.credit_debited.clone(), + token_credit_rate: config.token_credit_rate.clone(), + num_accounts: self.accounts.len(), + num_blobs: self.blobs.len(), + num_added: self.blobs.added.len(), + bytes_added: self.blobs.added.bytes_size(), + num_resolving: self.blobs.pending.len(), + bytes_resolving: self.blobs.pending.bytes_size(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::state::blobs::{ + AddBlobStateParams, FinalizeBlobStateParams, SetPendingBlobStateParams, + }; + use fendermint_actor_blobs_shared::{ + blobs::{BlobStatus, SubscriptionId}, + bytes::B256, + credit::Credit, + }; + use fendermint_actor_blobs_testing::{ + new_address, new_hash, new_metadata_hash, new_pk, new_subscription_id, setup_logs, + }; + use fvm_ipld_blockstore::MemoryBlockstore; + use fvm_shared::{address::Address, clock::ChainEpoch}; + use log::{debug, warn}; + use num_traits::Zero; + use rand::{seq::SliceRandom, Rng}; + use std::collections::{BTreeMap, HashMap}; + + #[allow(dead_code)] + fn test_simulate_one_day_multiple_runs() { + const NUM_RUNS: usize = 1000; + let mut successful_runs = 0; + + for _ in 0..NUM_RUNS { + // Run the test in a way that we can catch panics + let result = std::panic::catch_unwind(|| { + // Call the existing test method + test_simulate_one_day(); + }); + + match result { + Ok(_) => { + successful_runs += 1; + } + Err(_) => { + break; + } + } + } + + println!("------- Test Summary -------"); + println!("Total runs: {}", NUM_RUNS); + println!("Successful runs: {}", successful_runs); + println!("Failed runs: {}", NUM_RUNS - successful_runs); + println!( + "Success rate: {:.2}%", + (successful_runs as f64 / NUM_RUNS as f64) * 100.0 + ); + + // Fail the overall test if any run failed + assert_eq!( + successful_runs, + NUM_RUNS, + "{} out of {} test runs failed or didn't run", + NUM_RUNS - successful_runs, + NUM_RUNS + ); + } + + #[test] + fn test_simulate_one_day() { + setup_logs(); + + let config = RecallConfig { + blob_credit_debit_interval: ChainEpoch::from(10), + blob_min_ttl: ChainEpoch::from(10), + ..Default::default() + }; + + #[derive(Clone, Debug)] + struct TestBlob { + hash: B256, + metadata_hash: B256, + size: u64, + added: HashMap>, // added, expiry + } + + fn generate_test_blobs(count: i64, min_size: usize, max_size: usize) -> Vec { + let mut blobs = Vec::new(); + let mut rng = rand::thread_rng(); + + for _ in 0..count { + let size = rng.gen_range(min_size..=max_size); + let (hash, size) = new_hash(size); + blobs.push(TestBlob { + hash, + metadata_hash: new_metadata_hash(), + size, + added: HashMap::new(), + }); + } + blobs + } + + fn generate_test_users( + config: &RecallConfig, + store: &BS, + state: &mut State, + credit_tokens: TokenAmount, + count: i64, + ) -> Vec
{ + let mut users = Vec::new(); + for _ in 0..count { + let user = new_address(); + state + .buy_credit(&store, config, user, credit_tokens.clone(), 0) + .unwrap(); + users.push(user); + } + users + } + + // Test params + let epochs: i64 = 360; // num. epochs to run test for + let user_pool_size: i64 = 10; // some may not be used, some will be used more than once + let blob_pool_size: i64 = user_pool_size; // some may not be used, some will be used more than once + let min_ttl = config.blob_min_ttl; + let max_ttl = epochs; + let min_size = 10; + let max_size = 1000; + let add_intervals = [1, 2, 4, 8, 10, 12, 15, 20]; // used to add at random intervals + let max_resolve_epochs = 30; // max num. epochs in future to resolve + let debit_interval: i64 = config.blob_credit_debit_interval; // interval at which to debit all accounts + let percent_fail_resolve = 0.1; // controls % of subscriptions that fail to resolve + + // Set up store and state + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let mut rng = rand::thread_rng(); + + // Get some users + let credit_tokens = TokenAmount::from_whole(100); // buy a lot + let user_credit: Credit = credit_tokens.clone() * &config.token_credit_rate; + let users = generate_test_users(&config, &store, &mut state, credit_tokens, user_pool_size); + + // Get some blobs. + let mut blobs = generate_test_blobs(blob_pool_size, min_size, max_size); + + // Map of resolve epochs to a set of blob indexes + #[allow(clippy::type_complexity)] + let mut resolves: BTreeMap< + ChainEpoch, + Vec<(Address, SubscriptionId, B256, u64, B256)>, + > = BTreeMap::new(); + #[allow(clippy::type_complexity)] + let mut statuses: HashMap< + (Address, SubscriptionId, B256), + (BlobStatus, ChainEpoch), + > = HashMap::new(); + + // Walk epochs. + // We go for twice the paramaterized epochs to ensure all subscriptions can expire. + let mut num_added = 0; + let mut num_readded = 0; + let mut num_resolved = 0; + let mut num_failed = 0; + for epoch in 1..=epochs * 2 { + if epoch <= epochs { + let add_interval = add_intervals.choose(&mut rng).unwrap().to_owned(); + if epoch % add_interval == 0 { + // Add a random blob with a random user + let blob_index = rng.gen_range(0..blobs.len()); + let blob = unsafe { blobs.get_unchecked_mut(blob_index) }; + let user_index = rng.gen_range(0..users.len()); + let user = users[user_index]; + let sub_id = new_subscription_id(7); + let ttl = rng.gen_range(min_ttl..=max_ttl); + let source = new_pk(); + + let res = state.add_blob( + &store, + &config, + user, + None, + AddBlobStateParams { + hash: blob.hash, + metadata_hash: blob.metadata_hash, + id: sub_id.clone(), + size: blob.size, + ttl: Some(ttl), + source, + epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + if blob.added.is_empty() { + num_added += 1; + warn!( + "added new blob {} at epoch {} with ttl {}", + blob.hash, epoch, ttl + ); + } else { + num_readded += 1; + warn!( + "added new sub to blob {} at epoch {} with ttl {}", + blob.hash, epoch, ttl + ); + } + + // Determine if this will fail or not + let fail = rng.gen_bool(percent_fail_resolve); + let status = if fail { + BlobStatus::Failed + } else { + BlobStatus::Resolved + }; + statuses.insert((user, sub_id.clone(), blob.hash), (status.clone(), 0)); + + // Track blob interval per user + let expiry = epoch + ttl; + let added = blob.added.entry(user).or_insert(Vec::new()); + added.push((sub_id.into(), epoch, expiry)); + } + } + + // Every debit interval epochs we debit all acounts + if epoch % debit_interval == 0 { + let (deletes_from_disc, _) = state.debit_accounts(&store, &config, epoch).unwrap(); + warn!( + "deleting {} blobs at epoch {}", + deletes_from_disc.len(), + epoch + ); + } + + // Move added blobs to pending state + let added_blobs = state.get_added_blobs(&store, 1000).unwrap(); + for (hash, size, sources) in added_blobs { + for (user, id, source) in sources { + warn!( + "processing added blob {} for {} at epoch {} (id: {})", + hash, user, epoch, id + ); + state + .set_blob_pending( + &store, + user, + SetPendingBlobStateParams { + source, + hash, + size, + id, + }, + ) + .unwrap(); + } + } + + // Schedule pending blobs for finalization + let pending_blobs = state.get_pending_blobs(&store, 1000).unwrap(); + for (hash, size, sources) in pending_blobs { + for (user, id, source) in sources { + if let Some(status) = statuses.get_mut(&(user, id.clone(), hash)) { + if status.1 == 0 { + let resolve_epoch = rng.gen_range(1..=max_resolve_epochs) + epoch; + + warn!( + "processing pending blob {} for {} at epoch {} (id: {})", + hash, user, epoch, id + ); + + status.1 = resolve_epoch; + resolves + .entry(resolve_epoch) + .and_modify(|entry| { + entry.push((user, id.clone(), hash, size, source)); + }) + .or_insert(vec![(user, id.clone(), hash, size, source)]); + } + } + } + } + + // Resolve blobs + if let Some(entries) = resolves.get(&epoch) { + for (user, id, hash, size, source) in entries { + let status = statuses.get_mut(&(*user, id.clone(), *hash)).unwrap(); + match status.0 { + BlobStatus::Failed => { + num_failed += 1; + } + BlobStatus::Resolved => { + num_resolved += 1; + } + _ => unreachable!(), + } + warn!( + "finalizing blob {} for {} to status {} at epoch {} (id: {})", + hash, user, status.0, epoch, id + ); + let finalized = state + .finalize_blob( + &store, + *user, + FinalizeBlobStateParams { + source: *source, + hash: *hash, + size: *size, + id: id.clone(), + status: status.0.clone(), + epoch, + }, + ) + .unwrap(); + if !finalized { + status.1 = 0; + } + } + } + } + + debug!("num. blobs added: {}", num_added); + debug!("num. blobs re-added: {}", num_readded); + debug!("num. blobs resolved: {}", num_resolved); + debug!("num. blobs failed: {}", num_failed); + + // Check global state. + let stats = state.get_stats(&config, TokenAmount::zero()); + debug!("stats: {:#?}", stats); + assert_eq!(stats.num_blobs, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + + // Check the account balances + let mut total_credit = Credit::zero(); + for (i, user) in users.iter().enumerate() { + let account = state.get_account(&store, *user).unwrap().unwrap(); + debug!("account {} {}: {:#?}", i, user, account); + + let mut total_user_credit = Credit::zero(); + for blob in blobs.iter() { + if let Some(added) = blob.added.get(user) { + debug!("{} subscriptions to {}", user, blob.hash); + let mut intervals = Vec::new(); + for (id, start, end) in added { + if let Some((status, resolve_epoch)) = + statuses.get(&(*user, SubscriptionId::new(id).unwrap(), blob.hash)) + { + debug!( + "id: {}, size: {}, start: {}, expiry: {}, status: {}, resolved: {}", + id, blob.size, start, end, status, resolve_epoch + ); + if status == &BlobStatus::Resolved + || (status == &BlobStatus::Failed && *resolve_epoch == 0) + { + intervals.push((*start as u64, *end as u64)); + } + } + } + let duration = get_total_duration(intervals) as ChainEpoch; + debug!("total duration: {}", duration); + let credit = state.get_storage_cost(duration, &blob.size); + total_user_credit += &credit; + } + } + debug!("total user credit: {}", total_user_credit); + + assert_eq!(account.capacity_used, 0); + assert_eq!(account.credit_free, &user_credit - &total_user_credit); + assert_eq!(account.credit_committed, Credit::zero()); + + total_credit += &total_user_credit; + } + + // Check more global state. + assert_eq!(stats.capacity_used, 0); + assert_eq!(stats.credit_committed, Credit::zero()); + assert_eq!(stats.credit_debited, total_credit); + } + + fn get_total_duration(mut intervals: Vec<(u64, u64)>) -> u64 { + if intervals.is_empty() { + return 0; + } + + // Sort intervals by start time + intervals.sort_by_key(|&(start, _)| start); + + let mut merged = Vec::new(); + let mut current = intervals[0]; + + // Merge overlapping intervals + for &(start, end) in &intervals[1..] { + if start <= current.1 { + // Overlapping interval, extend if needed + current.1 = current.1.max(end); + } else { + // Non-overlapping interval + merged.push(current); + current = (start, end); + } + } + merged.push(current); + + merged.iter().map(|&(start, end)| end - start).sum() + } + + #[test] + fn test_total_non_overlapping_duration() { + assert_eq!(get_total_duration(vec![]), 0); + assert_eq!(get_total_duration(vec![(1, 5)]), 4); + assert_eq!(get_total_duration(vec![(1, 5), (10, 15)]), 9); + assert_eq!(get_total_duration(vec![(1, 5), (3, 8)]), 7); + assert_eq!(get_total_duration(vec![(1, 10), (3, 5)]), 9); + assert_eq!( + get_total_duration(vec![(1, 5), (2, 7), (6, 9), (11, 13)]), + 10 + ); + assert_eq!(get_total_duration(vec![(1, 5), (5, 10)]), 9); + assert_eq!( + get_total_duration(vec![(11, 13), (1, 5), (6, 9), (2, 7)]), + 10 + ); + assert_eq!( + get_total_duration(vec![(1, 3), (2, 6), (8, 10), (15, 18), (4, 7), (16, 17)]), + 11 + ); + } +} diff --git a/fendermint/actors/blobs/src/state/accounts.rs b/fendermint/actors/blobs/src/state/accounts.rs new file mode 100644 index 0000000000..592ed8bc2e --- /dev/null +++ b/fendermint/actors/blobs/src/state/accounts.rs @@ -0,0 +1,10 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod account; +mod methods; +#[cfg(test)] +mod tests; + +pub use account::*; diff --git a/fendermint/actors/blobs/src/state/accounts/account.rs b/fendermint/actors/blobs/src/state/accounts/account.rs new file mode 100644 index 0000000000..5cf513251c --- /dev/null +++ b/fendermint/actors/blobs/src/state/accounts/account.rs @@ -0,0 +1,168 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fendermint_actor_blobs_shared::{self as shared, credit::Credit}; +use fil_actors_runtime::{runtime::Runtime, ActorError}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use recall_actor_sdk::util::to_delegated_address; +use recall_ipld::hamt::{self, map::TrackedFlushResult, BytesKey}; + +use crate::state::credit::Approvals; + +/// The stored representation of an account. +#[derive(Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Account { + /// Total size of all blobs managed by the account. + pub capacity_used: u64, + /// Current free credit in byte-blocks that can be used for new commitments. + pub credit_free: Credit, + /// Current committed credit in byte-blocks that will be used for debits. + pub credit_committed: Credit, + /// Optional default sponsor account address. + pub credit_sponsor: Option
, + /// The chain epoch of the last debit. + pub last_debit_epoch: ChainEpoch, + /// Credit approvals to other accounts from this account, keyed by receiver. + pub approvals_to: Approvals, + /// Credit approvals to this account from other accounts, keyed by sender. + pub approvals_from: Approvals, + /// The maximum allowed TTL for actor's blobs. + pub max_ttl: ChainEpoch, + /// The total token value an account has used to buy credits. + pub gas_allowance: TokenAmount, +} + +impl Account { + /// Returns a new [`Account`]. + pub fn new( + store: &BS, + current_epoch: ChainEpoch, + max_ttl: ChainEpoch, + ) -> Result { + Ok(Self { + capacity_used: 0, + credit_free: Credit::default(), + credit_committed: Credit::default(), + credit_sponsor: None, + last_debit_epoch: current_epoch, + approvals_to: Approvals::new(store)?, + approvals_from: Approvals::new(store)?, + max_ttl, + gas_allowance: TokenAmount::default(), + }) + } +} + +impl std::fmt::Debug for Account { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Account") + .field("capacity_used", &self.capacity_used) + .field("credit_free", &self.credit_free) + .field("credit_committed", &self.credit_committed) + .field("credit_sponsor", &self.credit_sponsor) + .field("last_debit_epoch", &self.last_debit_epoch) + .field("max_ttl", &self.max_ttl) + .field("gas_allowance", &self.gas_allowance) + .finish() + } +} + +impl Account { + /// Returns [`shared::accounts::Account`] that is safe to return from actor methods. + pub fn to_shared(&self, rt: &impl Runtime) -> Result { + let store = rt.store(); + let mut approvals_to = HashMap::new(); + self.approvals_to + .hamt(store)? + .for_each(|address, approval| { + let external_account_address = to_delegated_address(rt, address)?; + approvals_to.insert(external_account_address, approval.clone()); + Ok(()) + })?; + + let mut approvals_from = HashMap::new(); + self.approvals_from + .hamt(store)? + .for_each(|address, approval| { + let external_account_address = to_delegated_address(rt, address)?; + approvals_from.insert(external_account_address, approval.clone()); + Ok(()) + })?; + + Ok(shared::accounts::Account { + capacity_used: self.capacity_used, + credit_free: self.credit_free.clone(), + credit_committed: self.credit_committed.clone(), + credit_sponsor: self.credit_sponsor, + last_debit_epoch: self.last_debit_epoch, + approvals_to, + approvals_from, + max_ttl: self.max_ttl, + gas_allowance: self.gas_allowance.clone(), + }) + } +} + +/// HAMT wrapper for accounts state. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Accounts { + /// The HAMT root. + pub root: hamt::Root, + /// The size of the collection. + size: u64, + /// The next account to debit in the current debit cycle. + /// If this is None, we have finished the debit cycle. + next_debit_address: Option
, +} + +impl Accounts { + /// Returns a new account collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "accounts")?; + Ok(Self { + root, + size: 0, + next_debit_address: None, + }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked(&mut self, tracked_flush_result: TrackedFlushResult) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size + } + + /// Saves the start address to be used by the next debit round. + pub fn save_debit_progress(&mut self, next_address: Option
) { + self.next_debit_address = next_address; + } + + /// Returns the start address to be used by the next debit round. + pub fn get_debit_start_address(&self) -> Option { + self.next_debit_address + .map(|address| BytesKey::from(address.to_bytes())) + } + + /// The size of the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } +} diff --git a/fendermint/actors/blobs/src/state/accounts/methods.rs b/fendermint/actors/blobs/src/state/accounts/methods.rs new file mode 100644 index 0000000000..b9a6d8b7f9 --- /dev/null +++ b/fendermint/actors/blobs/src/state/accounts/methods.rs @@ -0,0 +1,157 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use fendermint_actor_blobs_shared::{accounts::AccountStatus, bytes::B256}; +use fendermint_actor_recall_config_shared::RecallConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use log::{debug, warn}; + +use super::Account; +use crate::{caller::Caller, state::DeleteBlobStateParams, State}; + +impl State { + /// Returns an [`Account`] by address. + pub fn get_account( + &self, + store: &BS, + address: Address, + ) -> Result, ActorError> { + let accounts = self.accounts.hamt(store)?; + accounts.get(&address) + } + + /// Sets an account's [`TtlStatus`] by address. + /// + /// Flushes state to the blockstore. + pub fn set_account_status( + &mut self, + store: &BS, + config: &RecallConfig, + address: Address, + status: AccountStatus, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + let mut accounts = self.accounts.hamt(store)?; + match status { + // We don't want to create an account for default TTL + AccountStatus::Default => { + if let Some(mut account) = accounts.get(&address)? { + account.max_ttl = status.get_max_ttl(config.blob_default_ttl); + self.accounts + .save_tracked(accounts.set_and_flush_tracked(&address, account)?); + } + } + _ => { + // Get or create a new account + let max_ttl = status.get_max_ttl(config.blob_default_ttl); + let mut account = accounts + .get_or_create(&address, || Account::new(store, current_epoch, max_ttl))?; + account.max_ttl = max_ttl; + self.accounts + .save_tracked(accounts.set_and_flush_tracked(&address, account)?); + } + } + Ok(()) + } + + /// Debits accounts for their blob usage and cleans up expired blob subscriptions. + /// + /// This method performs two main operations: + /// 1. Deletes expired blob subscriptions based on the current epoch + /// 2. Debits a batch of accounts for their ongoing blob storage usage + /// + /// The debiting process works in cycles, processing a subset of accounts in each call + /// to avoid excessive computation in a single pass. The number of accounts processed + /// in each batch is controlled by the subnet config parameter `account_debit_batch_size`. + /// Similarly, expired blob deletion is controlled by `blob_delete_batch_size`. + /// + /// Flushes state to the blockstore. + /// + /// TODO: Break this into two methods called by a `cron_tick` actor method. + pub fn debit_accounts( + &mut self, + store: &BS, + config: &RecallConfig, + current_epoch: ChainEpoch, + ) -> Result<(HashSet, bool), ActorError> { + // Delete expired subscriptions + let mut delete_from_disc = HashSet::new(); + let mut num_deleted = 0; + let mut expiries = self.blobs.expiries.clone(); + let mut credit_return_groups = HashSet::new(); + expiries.foreach_up_to_epoch( + store, + current_epoch, + Some(config.blob_delete_batch_size), + |_, subscriber, key| { + let key_tuple = (subscriber, key.hash); + match self.delete_blob( + store, + subscriber, + None, + DeleteBlobStateParams { + hash: key.hash, + id: key.id.clone(), + epoch: current_epoch, + skip_credit_return: credit_return_groups.contains(&key_tuple), + }, + ) { + Ok((from_disc, _, credit_returned)) => { + num_deleted += 1; + if from_disc { + delete_from_disc.insert(key.hash); + } + if credit_returned { + credit_return_groups.insert(key_tuple); + } + } + Err(e) => { + warn!( + "failed to delete blob {} for {} (id: {}): {}", + key.hash, subscriber, key.id, e + ) + } + } + Ok(()) + }, + )?; + + debug!("deleted {} expired subscriptions", num_deleted); + debug!( + "{} blobs marked for deletion from disc", + delete_from_disc.len() + ); + + // Debit accounts for existing usage + let reader = self.accounts.hamt(store)?; + let mut writer = self.accounts.hamt(store)?; + let start_key = self.accounts.get_debit_start_address(); + let (count, next_account) = reader.for_each_ranged( + start_key.as_ref(), + Some(config.account_debit_batch_size as usize), + |address, account| { + let mut caller = + Caller::load_account(store, &reader, address, account.clone(), None)?; + self.debit_caller(&mut caller, current_epoch); + caller.save(&mut writer)?; + Ok(true) + }, + )?; + + // Save accounts + self.accounts.save_tracked(writer.flush_tracked()?); + self.accounts.save_debit_progress(next_account); + + debug!( + "finished debiting {:#?} accounts, next account: {:#?}", + count, next_account + ); + + Ok((delete_from_disc, next_account.is_some())) + } +} diff --git a/fendermint/actors/blobs/src/state/accounts/tests.rs b/fendermint/actors/blobs/src/state/accounts/tests.rs new file mode 100644 index 0000000000..141055cec9 --- /dev/null +++ b/fendermint/actors/blobs/src/state/accounts/tests.rs @@ -0,0 +1,493 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + accounts::AccountStatus, + blobs::{BlobStatus, SubscriptionId}, + credit::Credit, +}; +use fendermint_actor_blobs_testing::{ + new_address, new_hash, new_metadata_hash, new_pk, setup_logs, +}; +use fendermint_actor_recall_config_shared::RecallConfig; +use fvm_ipld_blockstore::{Blockstore, MemoryBlockstore}; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use num_traits::Zero; + +use crate::state::blobs::SetPendingBlobStateParams; +use crate::{ + caller::DelegationOptions, + state::blobs::{AddBlobStateParams, FinalizeBlobStateParams}, + testing::check_approval_used, + State, +}; + +#[test] +fn test_set_account_status() { + setup_logs(); + + let config = RecallConfig::default(); + + struct TestCase { + name: &'static str, + initial_ttl_status: Option, // None means don't set the initial status + new_ttl_status: AccountStatus, + expected_ttl: ChainEpoch, + } + + let test_cases = vec![ + TestCase { + name: "Setting Reduced on new account", + initial_ttl_status: None, + new_ttl_status: AccountStatus::Reduced, + expected_ttl: 0, + }, + TestCase { + name: "Setting Default on new account", + initial_ttl_status: None, + new_ttl_status: AccountStatus::Default, + expected_ttl: config.blob_default_ttl, + }, + TestCase { + name: "Changing from Default to Reduced", + initial_ttl_status: Some(AccountStatus::Default), + new_ttl_status: AccountStatus::Reduced, + expected_ttl: 0, + }, + TestCase { + name: "Changing from Extended to Reduced", + initial_ttl_status: Some(AccountStatus::Extended), + new_ttl_status: AccountStatus::Reduced, + expected_ttl: 0, + }, + TestCase { + name: "Changing from Reduced to Extended", + initial_ttl_status: Some(AccountStatus::Reduced), + new_ttl_status: AccountStatus::Extended, + expected_ttl: ChainEpoch::MAX, + }, + ]; + + for tc in test_cases { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let address = new_address(); + let current_epoch = ChainEpoch::from(1); + + // Initialize the account if needed + if tc.initial_ttl_status.is_some() { + state + .set_account_status( + &store, + &config, + address, + tc.initial_ttl_status.unwrap(), + current_epoch, + ) + .unwrap(); + } + + // Change TTL status + let res = + state.set_account_status(&store, &config, address, tc.new_ttl_status, current_epoch); + assert!( + res.is_ok(), + "Test case '{}' failed to set TTL status", + tc.name + ); + + // Verify max TTL + let max_ttl = state.get_account_max_ttl(&config, &store, address).unwrap(); + assert_eq!( + max_ttl, tc.expected_ttl, + "Test case '{}' failed: expected max TTL {}, got {}", + tc.name, tc.expected_ttl, max_ttl + ); + } +} + +#[test] +fn test_debit_accounts_delete_from_disc() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, token_amount.clone(), current_epoch) + .unwrap(); + debit_accounts_delete_from_disc( + &config, + &store, + state, + caller, + None, + current_epoch, + token_amount, + false, + ); +} + +#[test] +fn test_debit_accounts_delete_from_disc_with_approval() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let sponsor = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + sponsor, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + state + .approve_credit( + &config, + &store, + sponsor, + caller, + DelegationOptions::default(), + current_epoch, + ) + .unwrap(); + debit_accounts_delete_from_disc( + &config, + &store, + state, + caller, + Some(sponsor), + current_epoch, + token_amount, + true, + ); +} + +#[allow(clippy::too_many_arguments)] +fn debit_accounts_delete_from_disc( + config: &RecallConfig, + store: &BS, + mut state: State, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + token_amount: TokenAmount, + using_approval: bool, +) { + let subscriber = sponsor.unwrap_or(caller); + let mut credit_amount = + Credit::from_atto(token_amount.atto().clone()) * &config.token_credit_rate; + + // Add blob with default a subscription ID + let (hash, size) = new_hash(1024); + let add1_epoch = current_epoch; + let id1 = SubscriptionId::default(); + let ttl1 = ChainEpoch::from(config.blob_min_ttl); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size, + ttl: Some(ttl1), + source, + epoch: add1_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + let stats = state.get_stats(config, TokenAmount::zero()); + // Using a credit delegation creates both the from and to account + let expected_num_accounts = if using_approval { 2 } else { 1 }; + assert_eq!(stats.num_accounts, expected_num_accounts); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 1); + assert_eq!(stats.bytes_added, size); + + // Set to status pending + let res = state.set_blob_pending( + &store, + subscriber, + SetPendingBlobStateParams { + hash, + size, + id: id1.clone(), + source, + }, + ); + assert!(res.is_ok()); + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 1); + assert_eq!(stats.bytes_resolving, size); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Finalize as resolved + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + subscriber, + FinalizeBlobStateParams { + source, + hash, + size, + id: id1.clone(), + status: BlobStatus::Resolved, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add1_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole(ttl1 as u64 * size) + ); + credit_amount -= &account.credit_committed; + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); + + // Add the same blob but this time uses a different subscription ID + let add2_epoch = ChainEpoch::from(21); + let ttl2 = ChainEpoch::from(config.blob_min_ttl); + let id2 = SubscriptionId::new("foo").unwrap(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id2.clone(), + size, + ttl: Some(ttl2), + source, + epoch: add2_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add2_epoch); + assert_eq!( + account.credit_committed, // stays the same becuase we're starting over + Credit::from_whole(ttl2 as u64 * size), + ); + credit_amount -= Credit::from_whole((add2_epoch - add1_epoch) as u64 * size); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); // not changed + + // Check the subscription group + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + let group = subscribers.get(&subscriber).unwrap().unwrap(); + assert_eq!(group.len(), 2); + + // Debit all the accounts at an epoch between the two expiries (3601-3621) + let debit_epoch = ChainEpoch::from(config.blob_min_ttl + 11); + let (deletes_from_disc, _) = state.debit_accounts(&store, config, debit_epoch).unwrap(); + assert!(deletes_from_disc.is_empty()); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, debit_epoch); + assert_eq!( + account.credit_committed, // debit reduces this + Credit::from_whole((ttl2 - (debit_epoch - add2_epoch)) as u64 * size), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, size); // not changed + + // Check the subscription group + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(&store).unwrap(); + let group = subscribers.get(&subscriber).unwrap().unwrap(); + assert_eq!(group.len(), 1); // the first subscription was deleted + + // Debit all the accounts at an epoch greater than group expiry (3621) + let debit_epoch = ChainEpoch::from(config.blob_min_ttl + 31); + let (deletes_from_disc, _) = state.debit_accounts(&store, config, debit_epoch).unwrap(); + assert!(!deletes_from_disc.is_empty()); // blob is marked for deletion + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, debit_epoch); + assert_eq!( + account.credit_committed, // the second debit reduces this to zero + Credit::from_whole(0), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, 0); + + // Check state + assert_eq!(state.credits.credit_committed, Credit::from_whole(0)); // credit was released + assert_eq!( + state.credits.credit_debited, + token_amount * &config.token_credit_rate - &account.credit_free + ); + assert_eq!(state.blobs.bytes_size(), 0); // capacity was released + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 0); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); + + // Check approval + if using_approval { + check_approval_used(&state, store, caller, subscriber); + } +} + +#[test] +fn test_paginated_debit_accounts() { + let config = RecallConfig { + account_debit_batch_size: 5, // Process 5 accounts at a time (10 accounts total) + ..Default::default() + }; + + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let current_epoch = ChainEpoch::from(1); + + // Create more than one batch worth of accounts (>5) + for i in 0..10 { + let address = Address::new_id(1000 + i); + let token_amount = TokenAmount::from_whole(10); + + // Buy credits for each account + state + .buy_credit( + &store, + &config, + address, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + + // Add some storage usage + let mut accounts = state.accounts.hamt(&store).unwrap(); + let mut account = accounts.get(&address).unwrap().unwrap(); + account.capacity_used = 1000; + accounts.set(&address, account).unwrap(); + } + + // First batch (should process 5 accounts) + assert!(state.accounts.get_debit_start_address().is_none()); + let (deletes1, _) = state + .debit_accounts(&store, &config, current_epoch + 1) + .unwrap(); + assert!(deletes1.is_empty()); // No expired blobs + assert!(state.accounts.get_debit_start_address().is_some()); + + // Second batch (should process remaining 5 accounts and clear state) + let (deletes2, _) = state + .debit_accounts(&store, &config, current_epoch + 1) + .unwrap(); + assert!(deletes2.is_empty()); + assert!(state.accounts.get_debit_start_address().is_none()); // The state should be cleared after all accounts processed + + // Verify all accounts were processed + let reader = state.accounts.hamt(&store).unwrap(); + reader + .for_each(|_, account| { + assert_eq!(account.last_debit_epoch, current_epoch + 1); + Ok(()) + }) + .unwrap(); +} + +#[test] +fn test_multiple_debit_cycles() { + let config = RecallConfig { + account_debit_batch_size: 5, // Process 5 accounts at a time (10 accounts total) + ..Default::default() + }; + + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let current_epoch = ChainEpoch::from(1); + + // Create accounts + for i in 0..10 { + let address = Address::new_id(1000 + i); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + address, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + + let mut accounts = state.accounts.hamt(&store).unwrap(); + let mut account = accounts.get(&address).unwrap().unwrap(); + account.capacity_used = 1000; + accounts.set(&address, account).unwrap(); + } + + // First cycle + let (deletes1, _) = state + .debit_accounts(&store, &config, current_epoch + 1) + .unwrap(); + assert!(deletes1.is_empty()); + assert!(state.accounts.get_debit_start_address().is_some()); + + let (deletes2, _) = state + .debit_accounts(&store, &config, current_epoch + 1) + .unwrap(); + assert!(deletes2.is_empty()); + assert!(state.accounts.get_debit_start_address().is_none()); // First cycle complete + + // Second cycle + let (deletes3, _) = state + .debit_accounts(&store, &config, current_epoch + 2) + .unwrap(); + assert!(deletes3.is_empty()); + assert!(state.accounts.get_debit_start_address().is_some()); + + let (deletes4, _) = state + .debit_accounts(&store, &config, current_epoch + 2) + .unwrap(); + assert!(deletes4.is_empty()); + assert!(state.accounts.get_debit_start_address().is_none()); // Second cycle complete +} diff --git a/fendermint/actors/blobs/src/state/blobs.rs b/fendermint/actors/blobs/src/state/blobs.rs new file mode 100644 index 0000000000..5c7c90875c --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs.rs @@ -0,0 +1,20 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod blob; +mod expiries; +mod methods; +mod params; +mod queue; +mod subscribers; +mod subscriptions; +#[cfg(test)] +mod tests; + +pub use blob::*; +pub use expiries::*; +pub use params::*; +pub use queue::*; +pub use subscribers::*; +pub use subscriptions::*; diff --git a/fendermint/actors/blobs/src/state/blobs/blob.rs b/fendermint/actors/blobs/src/state/blobs/blob.rs new file mode 100644 index 0000000000..40dcd2ca5a --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/blob.rs @@ -0,0 +1,454 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fendermint_actor_blobs_shared::blobs::SubscriptionId; +use fendermint_actor_blobs_shared::{ + self as shared, + blobs::{BlobStatus, Subscription}, + bytes::B256, +}; +use fil_actors_runtime::{runtime::Runtime, ActorError}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use fvm_shared::clock::ChainEpoch; +use log::debug; +use recall_ipld::hamt::{self, map::TrackedFlushResult}; + +use super::{ + AddBlobStateParams, BlobSource, Expiries, ExpiryUpdate, Queue, Subscribers, Subscriptions, +}; +use crate::caller::Caller; + +/// Represents the result of a blob upsert. +#[derive(Debug, Clone)] +pub struct UpsertBlobResult { + /// New or updated subscription. + pub subscription: Subscription, + /// New capacity used by the caller. + pub capacity_used: u64, + /// Duration for the new credit commitment. + pub commit_duration: ChainEpoch, + /// Duration for the returned credit commitment. + pub return_duration: ChainEpoch, +} + +/// The stored representation of a blob. +#[derive(Clone, PartialEq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Blob { + /// The size of the content. + pub size: u64, + /// Blob metadata that contains information for blob recovery. + pub metadata_hash: B256, + /// Active subscribers (accounts) that are paying for the blob. + pub subscribers: Subscribers, + /// Blob status. + pub status: BlobStatus, +} + +impl Blob { + /// Returns a new [`Blob`]. + pub fn new( + store: &BS, + size: u64, + metadata_hash: B256, + ) -> Result { + Ok(Self { + size, + metadata_hash, + subscribers: Subscribers::new(store)?, + status: BlobStatus::Added, + }) + } + + /// Returns a [`shared::blobs::Blob`] that is safe to return from actor methods. + /// TODO: HAMTs should carry max expiry such that we don't full scan here. + pub fn to_shared(&self, rt: &impl Runtime) -> Result { + let store = rt.store(); + let mut subscribers = HashMap::new(); + self.subscribers.hamt(store)?.for_each(|_, group| { + group.hamt(store)?.for_each(|id, sub| { + subscribers.insert(id, sub.expiry); + Ok(()) + })?; + Ok(()) + })?; + Ok(shared::blobs::Blob { + size: self.size, + metadata_hash: self.metadata_hash, + subscribers, + status: self.status.clone(), + }) + } +} + +/// HAMT wrapper for blobs state. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Blobs { + /// The HAMT root. + pub root: hamt::Root, + /// Map of expiries to blob hashes. + pub expiries: Expiries, + /// Map of currently added blob hashes to account and source Iroh node IDs. + pub added: Queue, + /// Map of currently pending blob hashes to account and source Iroh node IDs. + pub pending: Queue, + /// Number of blobs in the collection. + /// A blob with multiple subscribers and/or subscriptions is only counted once. + size: u64, + /// Number of blob bytes in the collection. + /// A blob with multiple subscribers and/or subscriptions is only counted once. + bytes_size: u64, +} + +/// Return type used when getting and hydrating a blob. +#[derive(Debug)] +pub struct GetBlobResult { + /// The blob that was retrieved. + pub blob: Blob, + /// The blob's subscriber subscriptions. + pub subscriptions: Subscriptions, + /// The blob subscription. + pub subscription: Subscription, +} + +impl Blobs { + /// Returns a blob collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "blobs")?; + Ok(Self { + root, + expiries: Expiries::new(store)?, + added: Queue::new(store, "added blobs queue")?, + pending: Queue::new(store, "pending blobs queue")?, + size: 0, + bytes_size: 0, + }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked(&mut self, tracked_flush_result: TrackedFlushResult) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// Number of blobs in the collection. + /// A blob with multiple subscribers and/or subscriptions is only counted once. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Returns the number of blob bytes in the collection. + /// A blob with multiple subscribers and/or subscriptions is only counted once. + pub fn bytes_size(&self) -> u64 { + self.bytes_size + } + + /// Sets subnet bytes capacity. + pub fn set_capacity(&mut self, size: u64) { + self.bytes_size = size; + } + + /// Releases subnet bytes capacity. + pub fn release_capacity(&mut self, size: u64) { + self.bytes_size = self.bytes_size.saturating_sub(size); + + debug!("released {} bytes to subnet", size); + } + + /// Retrieves a blob and subscription information for a given subscriber, blob hash, + /// and subscription ID. + /// + /// This function performs a series of lookups to locate both the requested blob and the + /// specific subscription to that blob for the subscriber: + /// 1. Retrieve the blob using its hash + /// 2. Confirm the subscriber is a valid subscriber to blob + /// 3. Locate the specific subscription by its ID + pub fn get_and_hydrate( + &self, + store: &BS, + subscriber: Address, + hash: B256, + id: &SubscriptionId, + ) -> Result, ActorError> { + let blobs_hamt = self.hamt(store)?; + + // Early return if the blob doesn't exist + let blob = match blobs_hamt.get(&hash)? { + Some(blob) => blob, + None => return Ok(None), + }; + + // Get subscriber's subscriptions + let subscribers_hamt = blob.subscribers.hamt(store)?; + let subscriptions = match subscribers_hamt.get(&subscriber)? { + Some(subscriptions) => subscriptions, + None => { + return Err(ActorError::forbidden(format!( + "subscriber {} is not subscribed to blob {}", + subscriber, hash + ))); + } + }; + + // Get the subscription by ID + let subscriptions_hamt = subscriptions.hamt(store)?; + let subscription = match subscriptions_hamt.get(id)? { + Some(subscription) => subscription, + None => { + return Err(ActorError::not_found(format!( + "subscription id {} not found", + id + ))); + } + }; + + Ok(Some(GetBlobResult { + blob, + subscriptions, + subscription, + })) + } + + /// Creates or updates a blob and subscription, managing all related state changes. + /// + /// This function performs several operations: + /// 1. Check if the blob exists and create it if not + /// 2. Add or update the caller's subscription to blob + /// 3. Update the blob's status to "Added" if it's not already resolved + /// 4. Update the blob source in the "added" queue + /// 5. Update expiry indexes for subscription + /// 6. Save all changes to storage + /// + /// The function handles both the creation of new blobs and updates to existing ones, + /// as well as managing subscriptions, expiries, and status tracking. + pub fn upsert( + &mut self, + store: &BS, + caller: &Caller, + params: &AddBlobStateParams, + expiry: ChainEpoch, + ) -> Result { + let mut blobs = self.hamt(store)?; + let (mut blob, blob_added) = if let Some(blob) = blobs.get(¶ms.hash)? { + (blob, false) + } else { + (Blob::new(store, params.size, params.metadata_hash)?, true) + }; + + // Add/update subscriber and the subscription + let result = blob.subscribers.upsert(store, caller, params, expiry)?; + + // Update blob status and added index if the blob is not already resolved + if !matches!(blob.status, BlobStatus::Resolved) { + // If failed, reset to added state + if matches!(blob.status, BlobStatus::Failed) { + blob.status = BlobStatus::Added; + } + + // Add to or update the source in the added queue + self.added.upsert( + store, + params.hash, + BlobSource::new( + caller.subscriber_address(), + params.id.clone(), + params.source, + ), + blob.size, + )?; + } + + // Update expiry index + let mut expiry_updates = vec![]; + if let Some(previous_expiry) = result.previous_subscription_expiry { + if previous_expiry != expiry { + expiry_updates.push(ExpiryUpdate::Remove(previous_expiry)); + expiry_updates.push(ExpiryUpdate::Add(expiry)); + } + } else { + expiry_updates.push(ExpiryUpdate::Add(expiry)); + } + self.expiries.update( + store, + caller.subscriber_address(), + params.hash, + ¶ms.id, + expiry_updates, + )?; + + self.save_tracked(blobs.set_and_flush_tracked(¶ms.hash, blob)?); + + // Update global state + if blob_added { + self.bytes_size = self.bytes_size.saturating_add(params.size); + + debug!("used {} bytes from subnet", params.size); + debug!("created new blob {}", params.hash); + } else { + debug!("used 0 bytes from subnet"); + } + + Ok(UpsertBlobResult { + subscription: result.subscription, + capacity_used: if result.subscriber_added { + params.size + } else { + 0 + }, + commit_duration: result.commit_duration, + return_duration: result.return_duration, + }) + } + + /// Saves all state changes from a blob retrieval operation. + /// + /// This function updates multiple related data structures after a blob has been retrieved: + /// 1. Update the subscription state in subscriptions collection + /// 2. Update the subscription list for subscriber + /// 3. Update the blob entry in the blobs HAMT + /// + /// This function ensures that all state changes from a blob retrieval operation are + /// saved atomically, maintaining data consistency across the different collections. + pub fn save_result( + &mut self, + store: &BS, + subscriber: Address, + hash: B256, + id: &SubscriptionId, + blob: &mut GetBlobResult, + ) -> Result<(), ActorError> { + blob.subscriptions + .save_subscription(store, id, blob.subscription.clone())?; + + blob.blob + .subscribers + .save_subscriptions(store, subscriber, blob.subscriptions.clone())?; + + let mut blobs = self.hamt(store)?; + self.save_tracked(blobs.set_and_flush_tracked(&hash, blob.blob.clone())?); + + Ok(()) + } + + /// Deletes a subscription to a blob for a specific caller and returns whether the blob was + /// also deleted. + /// + /// This function removes a specific subscription identified by `id` for the given `caller` to + /// the blob identified by `hash`. It performs multiple cleanup operations: + /// 1. Update the expiry index by removing the subscription's expiry entry + /// 2. Remove the blob source from the "added" queue + /// 3. Remove the blob source from the "pending" queue + /// 4. Delete the subscription from the subscriber's subscriptions + /// 5. If the subscriber has no remaining subscriptions to the blob, remove subscriber + /// 6. If no subscribers remain for the blob, delete the blob entirely + pub fn delete_subscription( + &mut self, + store: &BS, + caller: &Caller, + hash: B256, + id: SubscriptionId, + blob_result: &mut GetBlobResult, + ) -> Result { + // Update expiry index + self.expiries.update( + store, + caller.subscriber_address(), + hash, + &id, + vec![ExpiryUpdate::Remove(blob_result.subscription.expiry)], + )?; + + // Remove the source from the added queue + self.added.remove_source( + store, + &hash, + blob_result.blob.size, + BlobSource::new( + caller.subscriber_address(), + id.clone(), + blob_result.subscription.source, + ), + )?; + + // Remove the source from the pending queue + self.pending.remove_source( + store, + &hash, + blob_result.blob.size, + BlobSource::new( + caller.subscriber_address(), + id.clone(), + blob_result.subscription.source, + ), + )?; + + // Delete subscription + let mut subscriptions_hamt = blob_result.subscriptions.hamt(store)?; + blob_result + .subscriptions + .save_tracked(subscriptions_hamt.delete_and_flush_tracked(&id)?.0); + debug!( + "deleted subscription to blob {} for {} (key: {})", + hash, + caller.subscriber_address(), + id + ); + + // Delete the group if empty + let mut blobs_hamt = self.hamt(store)?; + let mut subscribers_hamt = blob_result.blob.subscribers.hamt(store)?; + let blob_deleted = if blob_result.subscriptions.is_empty() { + blob_result.blob.subscribers.save_tracked( + subscribers_hamt + .delete_and_flush_tracked(&caller.subscriber_address())? + .0, + ); + debug!( + "deleted subscriber {} to blob {}", + caller.subscriber_address(), + hash + ); + + // Delete or update blob + let blob_deleted = blob_result.blob.subscribers.is_empty(); + if blob_deleted { + self.save_tracked(blobs_hamt.delete_and_flush_tracked(&hash)?.0); + debug!("deleted blob {}", hash); + } else { + self.save_tracked( + blobs_hamt.set_and_flush_tracked(&hash, blob_result.blob.clone())?, + ); + } + blob_deleted + } else { + blob_result + .blob + .subscribers + .save_tracked(subscribers_hamt.set_and_flush_tracked( + &caller.subscriber_address(), + blob_result.subscriptions.clone(), + )?); + self.save_tracked(blobs_hamt.set_and_flush_tracked(&hash, blob_result.blob.clone())?); + false + }; + + Ok(blob_deleted) + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/expiries.rs b/fendermint/actors/blobs/src/state/blobs/expiries.rs new file mode 100644 index 0000000000..adb0caedde --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/expiries.rs @@ -0,0 +1,572 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Display; + +use fendermint_actor_blobs_shared::{blobs::SubscriptionId, bytes::B256}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{tuple::*, RawBytes}; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use log::debug; +use recall_ipld::{ + amt::{self, vec::TrackedFlushResult}, + hamt::{self, MapKey}, +}; + +/// Key used to namespace subscriptions in the expiry index. +#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ExpiryKey { + /// Key hash. + pub hash: B256, + /// Key subscription ID. + pub id: SubscriptionId, +} + +impl Display for ExpiryKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ExpiryKey(hash: {}, id: {})", self.hash, self.id) + } +} + +impl MapKey for ExpiryKey { + fn from_bytes(b: &[u8]) -> Result { + let raw_bytes = RawBytes::from(b.to_vec()); + fil_actors_runtime::cbor::deserialize(&raw_bytes, "ExpiryKey") + .map_err(|e| format!("Failed to deserialize ExpiryKey {}", e)) + } + + fn to_bytes(&self) -> Result, String> { + let raw_bytes = fil_actors_runtime::cbor::serialize(self, "ExpiryKey") + .map_err(|e| format!("Failed to serialize ExpiryKey {}", e))?; + Ok(raw_bytes.to_vec()) + } +} + +impl ExpiryKey { + /// Create a new expiry key. + pub fn new(hash: B256, id: &SubscriptionId) -> Self { + Self { + hash, + id: id.clone(), + } + } +} + +/// Type used as the root of [`Expiries`]. +type ExpiriesRoot = hamt::Root>; + +/// AMT wrapper for expiry index state. +#[derive(Debug, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct Expiries { + /// The AMT root. + pub root: amt::Root, + /// Index marker for pagination. + /// When present, iteration starts from this index. + /// Otherwise, iteration begins from the first entry. + /// Used for efficient traversal during blob expiration. + next_index: Option, +} + +impl Expiries { + /// Returns a new expiry collection. + pub fn new(store: &BS) -> Result { + let root = amt::Root::::new(store)?; + Ok(Self { + root, + next_index: None, + }) + } + + /// Returns the underlying [`amt::vec::Amt`]. + pub fn amt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.amt(store) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked(&mut self, tracked_flush_result: TrackedFlushResult) { + self.root = tracked_flush_result.root; + } + + /// The size of the collection. + pub fn len(&self, store: BS) -> Result { + Ok(self.root.amt(store)?.count()) + } + + /// Iterates the collection up to the given epoch. + pub fn foreach_up_to_epoch( + &mut self, + store: BS, + epoch: ChainEpoch, + batch_size: Option, + mut f: F, + ) -> Result<(), ActorError> + where + F: FnMut(ChainEpoch, Address, ExpiryKey) -> Result<(), ActorError>, + { + let expiries = self.amt(&store)?; + + debug!( + "walking blobs up to epoch {} (next_index: {:?})", + epoch, self.next_index + ); + + let (_, next_idx) = expiries.for_each_while_ranged( + self.next_index, + batch_size, + |index, per_chain_epoch_root| { + if index > epoch as u64 { + return Ok(false); + } + let per_chain_epoch_hamt = per_chain_epoch_root.hamt(&store, 0)?; + per_chain_epoch_hamt.for_each(|address, per_address_root| { + let per_address_hamt = per_address_root.hamt(&store, 0)?; + per_address_hamt.for_each(|expiry_key, _| f(index as i64, address, expiry_key)) + })?; + Ok(true) + }, + )?; + self.next_index = next_idx.filter(|&idx| idx <= epoch as u64); + + debug!("walked blobs (next_index: {:?})", self.next_index,); + + Ok(()) + } + + /// Updates the collection by applying the list of [`ExpiryUpdate`]s. + pub fn update( + &mut self, + store: BS, + subscriber: Address, + hash: B256, + id: &SubscriptionId, + updates: Vec, + ) -> Result<(), ActorError> { + if updates.is_empty() { + return Ok(()); + } + + let mut expiries = self.amt(&store)?; + for update in updates { + match update { + ExpiryUpdate::Add(chain_epoch) => { + // You cannot do get_or_create here: it expects value, we give it Result> + let per_chain_epoch_root = + if let Some(per_chain_epoch_root) = expiries.get(chain_epoch as u64)? { + per_chain_epoch_root + } else { + hamt::Root::>::new( + &store, + &Expiries::store_name_per_root(chain_epoch), + )? + }; + // The size does not matter + let mut per_chain_epoch_hamt = per_chain_epoch_root.hamt(&store, 1)?; + // You cannot do get_or_create here: it expects value, we give it Result> + let per_address_root = + if let Some(per_address_root) = per_chain_epoch_hamt.get(&subscriber)? { + per_address_root + } else { + hamt::Root::::new( + &store, + &Expiries::store_name_per_address(chain_epoch, &subscriber), + )? + }; + let mut per_address_hamt = per_address_root.hamt(&store, 1)?; // The size does not matter here + let expiry_key = ExpiryKey::new(hash, id); + let per_address_root = per_address_hamt.set_and_flush(&expiry_key, ())?; + let per_chain_epoch_root = + per_chain_epoch_hamt.set_and_flush(&subscriber, per_address_root)?; + self.save_tracked( + expiries.set_and_flush_tracked(chain_epoch as u64, per_chain_epoch_root)?, + ); + } + ExpiryUpdate::Remove(chain_epoch) => { + if let Some(mut per_chain_epoch_root) = expiries.get(chain_epoch as u64)? { + let mut per_chain_epoch_hamt = per_chain_epoch_root.hamt(&store, 1)?; // The size does not matter here + if let Some(mut per_address_root) = per_chain_epoch_hamt.get(&subscriber)? { + let mut per_address_hamt = per_address_root.hamt(&store, 1)?; // The size does not matter here + let expiry_key = ExpiryKey::new(hash, id); + (per_address_root, _) = + per_address_hamt.delete_and_flush(&expiry_key)?; + if per_address_hamt.is_empty() { + (per_chain_epoch_root, _) = + per_chain_epoch_hamt.delete_and_flush(&subscriber)?; + } else { + per_chain_epoch_root = per_chain_epoch_hamt + .set_and_flush(&subscriber, per_address_root)?; + } + } + if per_chain_epoch_hamt.is_empty() { + self.save_tracked( + expiries.delete_and_flush_tracked(chain_epoch as u64)?, + ); + } else { + self.save_tracked( + expiries.set_and_flush_tracked( + chain_epoch as u64, + per_chain_epoch_root, + )?, + ); + } + } + } + } + } + Ok(()) + } + + /// Returns the store display name. + fn store_name() -> String { + "expiries".to_string() + } + + /// Returns the store display name for a root. + fn store_name_per_root(chain_epoch: ChainEpoch) -> String { + format!("{}.{}", Expiries::store_name(), chain_epoch) + } + + /// Returns the store display name for an address. + fn store_name_per_address(chain_epoch: ChainEpoch, address: &Address) -> String { + format!("{}.{}", Expiries::store_name_per_root(chain_epoch), address) + } +} + +/// Helper enum for expiry updates. +pub enum ExpiryUpdate { + /// Entry to add. + Add(ChainEpoch), + /// Entry to remove. + Remove(ChainEpoch), +} + +#[cfg(test)] +mod tests { + use super::*; + + use fendermint_actor_blobs_testing::{new_address, new_hash}; + use fvm_ipld_blockstore::MemoryBlockstore; + + #[test] + fn test_expiries_foreach_up_to_epoch() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + + let addr = new_address(); + let mut hashes = vec![]; + for i in 1..=100 { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(i)], + ) + .unwrap(); + hashes.push(hash); + } + assert_eq!(state.len(&store).unwrap(), 100); + + let mut range = vec![]; + state + .foreach_up_to_epoch(&store, 10, None, |chain_epoch, _, _| { + range.push(chain_epoch); + Ok(()) + }) + .unwrap(); + assert_eq!(range.len(), 10); + + // Remove an element to test against a sparse state + let remove_epoch = 5; + let hash = hashes[remove_epoch - 1]; + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Remove(remove_epoch as ChainEpoch)], + ) + .unwrap(); + assert_eq!(state.len(&store).unwrap(), 99); + + let mut range = vec![]; + state + .foreach_up_to_epoch(&store, 10, None, |chain_epoch, _, _| { + range.push(chain_epoch); + Ok(()) + }) + .unwrap(); + assert_eq!(range.len(), 9); + } + + #[test] + fn test_expiries_pagination() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + let addr = new_address(); + + // Create expiries at epochs 1,2,4,7,8,10 + for i in &[1, 2, 4, 7, 8, 10] { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(*i as ChainEpoch)], + ) + .unwrap(); + } + + // Process with batch size 2 + let mut processed = vec![]; + let mut done = false; + while !done { + state + .foreach_up_to_epoch(&store, 10, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + done = state.next_index.is_none(); + } + + // Should get all epochs in order, despite gaps + assert_eq!(processed, vec![1, 2, 4, 7, 8, 10]); + } + + #[test] + fn test_expiries_pagination_with_mutations() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + let addr = new_address(); + let current_epoch = 100; + + // Initial set: 110,120,130,140,150 + let mut hashes = vec![]; + for ttl in (10..=50).step_by(10) { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(current_epoch + ttl)], + ) + .unwrap(); + hashes.push(hash); + } + + let mut processed = vec![]; + + // Process first batch (110,120) + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + assert_eq!(processed, vec![110, 120]); + + // Add new expiry at 135 + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(current_epoch + 35)], + ) + .unwrap(); + + // Remove expiry at 140 + let hash = hashes[3]; + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Remove(current_epoch + 40)], + ) + .unwrap(); + + // Process remaining epochs + while state.next_index.is_some() { + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + } + + // Should get all expiries in order, with 140 removed and 135 added + assert_eq!(processed, vec![110, 120, 130, 135, 150]); + } + + #[test] + fn test_expiries_pagination_with_expiry_update() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + let addr = new_address(); + let current_epoch = 100; + + // Initial set: add blobs with ttl 10,20,30,40,50 + let mut hashes = vec![]; + for ttl in (10..=50).step_by(10) { + let (hash, _) = new_hash(1024); + let expiry = current_epoch + ttl; + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(expiry)], + ) + .unwrap(); + hashes.push(hash); + } + + let mut processed = vec![]; + + // Process the first two expiries (110,120) + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + assert_eq!(processed, vec![110, 120]); + + // Extend the expiry of the blob at 130 to 145 (can only extend, not reduce) + let hash = hashes[2]; // blob with ttl 30 + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ + ExpiryUpdate::Remove(current_epoch + 30), // remove 130 + ExpiryUpdate::Add(current_epoch + 45), // add 145 (extended) + ], + ) + .unwrap(); + + // Process remaining epochs - should see updated expiry + while state.next_index.is_some() { + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + } + + // Should get all expiries in chronological order, with 130 replaced by 145 + assert_eq!(processed, vec![110, 120, 140, 145, 150]); + } + + #[test] + fn test_expiries_pagination_with_multiple_subscribers() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + let addr1 = new_address(); + let addr2 = new_address(); + + // Add multiple blobs expiring at the same epochs + // addr1: two blobs expiring at 110, one at 120 + // addr2: one blob expiring at 110, two at 130 + let mut entries = vec![]; + + // addr1's blobs + for _ in 0..2 { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr1, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(110)], + ) + .unwrap(); + entries.push((110, addr1, hash)); + } + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr1, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(120)], + ) + .unwrap(); + entries.push((120, addr1, hash)); + + // addr2's blobs + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr2, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(110)], + ) + .unwrap(); + entries.push((110, addr2, hash)); + + for _ in 0..2 { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr2, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(130)], + ) + .unwrap(); + entries.push((130, addr2, hash)); + } + + let mut processed = vec![]; + let mut done = false; + + // Process all entries with batch size 2 + while !done { + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, subscriber, key| { + processed.push((epoch, subscriber, key.hash)); + Ok(()) + }) + .unwrap(); + done = state.next_index.is_none(); + } + + // Should get all entries, with multiple entries per epoch + assert_eq!(processed.len(), 6); // Total number of blob expiries + + // Verify we got all entries at epoch 110 + let epoch_110 = processed.iter().filter(|(e, _, _)| *e == 110).count(); + assert_eq!(epoch_110, 3); // 2 from addr1, 1 from addr2 + + // Verify we got all entries at epoch 130 + let epoch_130 = processed.iter().filter(|(e, _, _)| *e == 130).count(); + assert_eq!(epoch_130, 2); // Both from addr2 + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/methods.rs b/fendermint/actors/blobs/src/state/blobs/methods.rs new file mode 100644 index 0000000000..4fea6428ca --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/methods.rs @@ -0,0 +1,752 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::error::Error; +use std::str::from_utf8; + +use fendermint_actor_blobs_shared::{ + blobs::{BlobRequest, BlobStatus, Subscription, SubscriptionId}, + bytes::B256, + credit::Credit, +}; +use fendermint_actor_recall_config_shared::RecallConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::{ + address::Address, bigint::BigInt, clock::ChainEpoch, econ::TokenAmount, error::ExitCode, +}; +use log::debug; +use num_traits::Zero; +use recall_ipld::hamt::BytesKey; + +use super::{ + AddBlobStateParams, Blob, BlobSource, DeleteBlobStateParams, FinalizeBlobStateParams, + SetPendingBlobStateParams, +}; +use crate::{caller::Caller, state::credit::CommitCapacityParams, State}; + +/// Return type for blob queues. +type BlobSourcesResult = Result, ActorError>; + +impl State { + /// Adds or updates a blob subscription. + /// + /// This method handles the entire process of adding a new blob or updating an existing + /// blob subscription, including + /// - Managing subscriber and sponsorship relationships + /// - Handling blob creation or update + /// - Processing subscription groups and expiry tracking + /// - Managing capacity accounting and credit commitments + /// - Updating blob status and indexing + /// + /// Flushes state to the blockstore. + pub fn add_blob( + &mut self, + store: &BS, + config: &RecallConfig, + caller: Address, + sponsor: Option
, + params: AddBlobStateParams, + ) -> Result<(Subscription, TokenAmount), ActorError> { + self.ensure_capacity(config.blob_capacity)?; + + // Get or create a new account + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load_or_create( + store, + &accounts, + caller, + sponsor, + params.epoch, + config.blob_default_ttl, + )?; + + // Validate the TTL + let ttl = caller.validate_ttl_usage(config, params.ttl)?; + let expiry = params.epoch.saturating_add(ttl); + + // Get or create a new blob + let result = self.blobs.upsert(store, &caller, ¶ms, expiry)?; + + // Determine credit commitments + let credit_return = self.get_storage_cost(result.return_duration, ¶ms.size); + if credit_return.is_positive() { + self.return_committed_credit_for_caller(&mut caller, &credit_return); + } + let credit_required = self.get_storage_cost(result.commit_duration, ¶ms.size); + + // Account capacity is changing, debit for existing usage + self.debit_caller(&mut caller, params.epoch); + + // Account for new size and commit credit + let token_rebate = if credit_required.is_positive() { + self.commit_capacity_for_caller( + &mut caller, + config, + CommitCapacityParams { + size: result.capacity_used, + cost: credit_required, + value: params.token_amount, + epoch: params.epoch, + }, + )? + } else if credit_required.is_negative() { + self.release_capacity_for_caller(&mut caller, 0, &-credit_required); + params.token_amount + } else { + params.token_amount + }; + + // Save caller + self.save_caller(&mut caller, &mut accounts)?; + + Ok((result.subscription, token_rebate)) + } + + /// Retuns a [`Blob`] by hash. + pub fn get_blob( + &self, + store: &BS, + hash: B256, + ) -> Result, ActorError> { + let blobs = self.blobs.hamt(store)?; + blobs.get(&hash) + } + + /// Returns [`BlobStatus`] by hash. + pub fn get_blob_status( + &self, + store: &BS, + subscriber: Address, + hash: B256, + id: SubscriptionId, + ) -> Result, ActorError> { + let blob = if let Some(blob) = self + .blobs + .hamt(store) + .ok() + .and_then(|blobs| blobs.get(&hash).ok()) + .flatten() + { + blob + } else { + return Ok(None); + }; + + let subscribers = blob.subscribers.hamt(store)?; + if subscribers.contains_key(&subscriber)? { + match blob.status { + BlobStatus::Added => Ok(Some(BlobStatus::Added)), + BlobStatus::Pending => Ok(Some(BlobStatus::Pending)), + BlobStatus::Resolved => Ok(Some(BlobStatus::Resolved)), + BlobStatus::Failed => { + // The blob state's status may have been finalized as failed by another + // subscription. + // We need to see if this specific subscription failed. + let subscriptions = subscribers.get(&subscriber)?.unwrap(); // safe here + if let Some(sub) = subscriptions.hamt(store)?.get(&id)? { + if sub.failed { + Ok(Some(BlobStatus::Failed)) + } else { + Ok(Some(BlobStatus::Pending)) + } + } else { + Ok(None) + } + } + } + } else { + Ok(None) + } + } + + /// Retrieves a page of newly added blobs that need to be resolved. + /// + /// This method fetches blobs from the "added" queue, which contains blobs that have been + /// added to the system but haven't yet been successfully resolved and stored. + pub fn get_added_blobs(&self, store: &BS, size: u32) -> BlobSourcesResult { + let blobs = self.blobs.hamt(store)?; + self.blobs + .added + .take_page(store, size)? + .into_iter() + .map(|(hash, sources)| { + let blob = blobs + .get(&hash)? + .ok_or_else(|| ActorError::not_found(format!("blob {} not found", hash)))?; + Ok((hash, blob.size, sources)) + }) + .collect() + } + + /// Retrieves a page of blobs that are pending resolve. + /// + /// This method fetches blobs from the "pending" queue, which contains blobs that are + /// actively being resolved but are still in a pending state. + pub fn get_pending_blobs(&self, store: &BS, size: u32) -> BlobSourcesResult { + let blobs = self.blobs.hamt(store)?; + self.blobs + .pending + .take_page(store, size)? + .into_iter() + .map(|(hash, sources)| { + let blob = blobs + .get(&hash)? + .ok_or_else(|| ActorError::not_found(format!("blob {} not found", hash)))?; + Ok((hash, blob.size, sources)) + }) + .collect() + } + + /// Marks a blob as being in the pending resolution state. + /// + /// This method transitions a blob from 'added' to 'pending' state, indicating that its + /// resolution process has started. It updates the blob's status and moves it from the + /// 'added' queue to the 'pending' queue. + /// + /// Flushes state to the blockstore. + pub fn set_blob_pending( + &mut self, + store: &BS, + subscriber: Address, + params: SetPendingBlobStateParams, + ) -> Result<(), ActorError> { + // Get the blob + let mut blob = match self + .blobs + .get_and_hydrate(store, subscriber, params.hash, ¶ms.id) + { + Ok(Some(result)) => result, + Ok(None) => { + // Blob might have been deleted already + // Remove the entire blob entry from the added queue + self.blobs + .added + .remove_entry(store, ¶ms.hash, params.size)?; + return Ok(()); + } + Err(err) + if err.exit_code() == ExitCode::USR_FORBIDDEN + || err.exit_code() == ExitCode::USR_NOT_FOUND => + { + // Blob might not be accessible (forbidden or not found) + // Remove the source from the added queue + self.blobs.added.remove_source( + store, + ¶ms.hash, + params.size, + BlobSource::new(subscriber, params.id.clone(), params.source), + )?; + return Ok(()); + } + Err(err) => return Err(err), + }; + + // Check the current status + match blob.blob.status { + BlobStatus::Resolved => { + // Blob is already finalized as resolved. + // Remove the entire blob entry from the added queue + self.blobs + .added + .remove_entry(store, ¶ms.hash, params.size)?; + return Ok(()); + } + BlobStatus::Failed => { + return Err(ActorError::illegal_state(format!( + "blob {} cannot be set to pending from status failed", + params.hash + ))); + } + _ => {} + } + + // Check if the blob's size matches the size provided when it was added + if blob.blob.size != params.size { + return Err(ActorError::assertion_failed(format!( + "blob {} size mismatch (expected: {}; actual: {})", + params.hash, params.size, blob.blob.size + ))); + } + + // Update status + blob.blob.status = BlobStatus::Pending; + + // Add the source to the pending queue + self.blobs.pending.upsert( + store, + params.hash, + BlobSource::new(subscriber, params.id.clone(), params.source), + params.size, + )?; + + // Remove the source from the added queue + self.blobs.added.remove_source( + store, + ¶ms.hash, + params.size, + BlobSource::new(subscriber, params.id.clone(), params.source), + )?; + + // Save blob + self.blobs + .save_result(store, subscriber, params.hash, ¶ms.id, &mut blob)?; + + debug!("set blob {} to pending", params.hash); + + Ok(()) + } + + /// Finalizes a blob's resolution process with a success or failure status. + /// + /// This method completes the blob resolution process by setting its final status + /// (resolved or failed). For failed blobs, it handles refunding of credits and capacity + /// reclamation as needed. The method also removes the blob from the pending queue. + /// + /// Flushes state to the blockstore. + pub fn finalize_blob( + &mut self, + store: &BS, + subscriber: Address, + params: FinalizeBlobStateParams, + ) -> Result { + // Validate incoming status + if matches!(params.status, BlobStatus::Added | BlobStatus::Pending) { + return Err(ActorError::illegal_state(format!( + "cannot finalize blob {} as added or pending", + params.hash + ))); + } + + // Get the blob + let mut blob = match self + .blobs + .get_and_hydrate(store, subscriber, params.hash, ¶ms.id) + { + Ok(Some(result)) => result, + Ok(None) => { + debug!("blob not found {} (id: {})", params.hash, params.id); + // Blob might have been deleted already + // Remove the entire blob entry from the pending queue + self.blobs + .pending + .remove_entry(store, ¶ms.hash, params.size)?; + return Ok(false); + } + Err(err) + if err.exit_code() == ExitCode::USR_FORBIDDEN + || err.exit_code() == ExitCode::USR_NOT_FOUND => + { + debug!("blob error {} {} (id: {})", params.hash, err, params.id); + // Blob might not be accessible (forbidden or not found) + // Remove the entire blob entry from the pending queue + self.blobs.pending.remove_source( + store, + ¶ms.hash, + params.size, + BlobSource::new(subscriber, params.id.clone(), params.source), + )?; + return Ok(false); + } + Err(err) => return Err(err), + }; + + // Check the current status + match blob.blob.status { + BlobStatus::Resolved => { + debug!("blob already resolved {} (id: {})", params.hash, params.id); + // Blob is already finalized as resolved. + // We can ignore later finalizations, even if they are failed. + // Remove from any queue it might be in + self.blobs + .added + .remove_entry(store, ¶ms.hash, blob.blob.size)?; + self.blobs + .pending + .remove_entry(store, ¶ms.hash, blob.blob.size)?; + return Ok(false); + } + _ => {} + } + + // Check if the blob's size matches the size provided when it was added + if blob.blob.size != params.size { + return Err(ActorError::assertion_failed(format!( + "blob {} size mismatch (expected: {}; actual: {})", + params.hash, params.size, blob.blob.size + ))); + } + + // Load the caller account and delegation. + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load( + store, + &accounts, + blob.subscription.delegate.unwrap_or(subscriber), + blob.subscription.delegate.map(|_| subscriber), + )?; + + // Update blob status + blob.blob.status = params.status.clone(); + // if matches!(blob.blob.status, BlobStatus::Failed) && !blob.subscription.failed { + // // Mark the subscription as failed + // blob.subscription.failed = true; + + // // We're not going to make a debit, but we need to refund any spent credits that may + // // have been used on this group in the event the last debit is later than the + // // added epoch. + // let (group_expiry, new_group_expiry) = + // blob.subscriptions + // .max_expiries(store, ¶ms.id, Some(0))?; + // let (sub_is_min_added, next_min_added) = + // blob.subscriptions.is_min_added(store, ¶ms.id)?; + // let last_debit_epoch = caller.subscriber().last_debit_epoch; + // if last_debit_epoch > blob.subscription.added && sub_is_min_added { + // // The refund extends up to either the next minimum added epoch that is less + // // than the last debit epoch, or the last debit epoch. + // let refund_end = if let Some(next_min_added) = next_min_added { + // next_min_added.min(blob.subscription.expiry) + // } else { + // last_debit_epoch + // }; + // let refund_credits = self.get_storage_cost( + // refund_end - (blob.subscription.added - blob.subscription.overlap), + // &blob.blob.size, + // ); + // let group_expiry = group_expiry.unwrap(); // safe here + // let correction_credits = if refund_end > group_expiry { + // self.get_storage_cost(refund_end - group_expiry, &blob.blob.size) + // } else { + // Credit::zero() + // }; + // self.refund_caller(&mut caller, &refund_credits, &correction_credits); + // } + + // // Account for reclaimed size and move committed credit to free credit + // self.release_capacity_for_subnet_and_caller( + // &mut caller, + // group_expiry, + // new_group_expiry, + // blob.blob.size, + // blob.blob.subscribers.len(), + // ); + // } + + // Remove the source from both added and pending queues + // (blob may be finalized directly from added status without going through pending) + // Use blob.subscription.source (what was stored) not params.source (what gateway sends) + self.blobs.added.remove_source( + store, + ¶ms.hash, + blob.blob.size, + BlobSource::new(subscriber, params.id.clone(), blob.subscription.source), + )?; + self.blobs.pending.remove_source( + store, + ¶ms.hash, + blob.blob.size, + BlobSource::new(subscriber, params.id.clone(), blob.subscription.source), + )?; + + // Save blob + self.blobs.save_result( + store, + caller.subscriber_address(), + params.hash, + ¶ms.id, + &mut blob, + )?; + + // Save accounts + self.save_caller(&mut caller, &mut accounts)?; + + debug!("finalized blob {} to status {}", params.hash, params.status); + + Ok(true) + } + + /// Deletes a blob subscription or the entire blob if it has no remaining subscriptions. + /// + /// This method handles the process of deleting a blob subscription for a specific caller, + /// which may include: + /// - Removing the caller's subscription from the blob's subscriber list + /// - Refunding unused storage credits to the subscriber + /// - Releasing committed capacity from the subscriber's account + /// - Removing the blob entirely if no subscriptions remain + /// - Cleaning up related queue entries and indexes + /// + /// Flushes state to the blockstore. + pub fn delete_blob( + &mut self, + store: &BS, + caller: Address, + sponsor: Option
, + params: DeleteBlobStateParams, + ) -> Result<(bool, u64, bool), ActorError> { + // Load the caller account and delegation. + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load(store, &accounts, caller, sponsor)?; + caller.validate_delegate_expiration(params.epoch)?; + + // Get the blob + let mut blob = match self.blobs.get_and_hydrate( + store, + caller.subscriber_address(), + params.hash, + ¶ms.id, + )? { + Some(result) => result, + None => { + // We could error here, but since this method is called from other actors, + // they would need to be able to identify this specific case. + // For example, the bucket actor may need to delete a blob while overwriting + // an existing key. + // However, the system may have already deleted the blob due to expiration or + // insufficient funds. + // We could use a custom error code, but this is easier. + return Ok((false, 0, false)); + } + }; + + // Do not allow deletion if the status is added or pending. + // This would cause issues with deletion from disc. + if matches!(blob.blob.status, BlobStatus::Added) + || matches!(blob.blob.status, BlobStatus::Pending) + { + return Err(ActorError::forbidden(format!( + "blob {} pending finalization; please wait", + params.hash + ))); + } + + // Since the charge will be for all the account's blobs, we can only + // account for capacity up to this blob's expiry if it is less than + // the current epoch. + // If the subscription is failed, there may be no group expiry. + let mut return_duration = 0; + if !blob.subscription.failed { + let (group_expiry, new_group_expiry) = + blob.subscriptions + .max_expiries(store, ¶ms.id, Some(0))?; + if let Some(group_expiry) = group_expiry { + let debit_epoch = group_expiry.min(params.epoch); + // Account capacity is changing, debit for existing usage. + // It could be possible that the debit epoch is less than the last debit, + // in which case we need to refund for that duration. + let last_debit_epoch = caller.subscriber().last_debit_epoch; + if last_debit_epoch < debit_epoch { + self.debit_caller(&mut caller, debit_epoch); + } else if last_debit_epoch != debit_epoch && !params.skip_credit_return { + // The account was debited after this blob's expiry + // Return over-debited credit + return_duration = last_debit_epoch - group_expiry; + let return_credits = self.get_storage_cost(return_duration, &blob.blob.size); + self.return_committed_credit_for_caller(&mut caller, &return_credits); + } + } + + // Account for reclaimed size and move committed credit to free credit + self.release_capacity_for_subnet_and_caller( + &mut caller, + group_expiry, + new_group_expiry, + blob.blob.size, + blob.blob.subscribers.len(), + ); + } + + let blob_deleted = self.blobs.delete_subscription( + store, + &caller, + params.hash, + params.id.clone(), + &mut blob, + )?; + + if blob.subscription.failed && blob_deleted { + self.blobs.release_capacity(blob.blob.size); + } + + // Save accounts + self.save_caller(&mut caller, &mut accounts)?; + + Ok((blob_deleted, blob.blob.size, return_duration > 0)) + } + + /// Adjusts all subscriptions for `account` according to its max TTL. + /// + /// Returns the number of subscriptions processed and the next key to continue iteration. + /// If `starting_hash` is `None`, iteration starts from the beginning. + /// If `limit` is `None`, all subscriptions are processed. + /// If `limit` is not `None`, iteration stops after examining `limit` blobs. + /// + /// Flushes state to the blockstore. + pub fn trim_blob_expiries( + &mut self, + config: &RecallConfig, + store: &BS, + subscriber: Address, + current_epoch: ChainEpoch, + starting_hash: Option, + limit: Option, + ) -> Result<(u32, Option, Vec), ActorError> { + let new_ttl = self.get_account_max_ttl(config, store, subscriber)?; + let mut deleted_blobs = Vec::new(); + let mut processed = 0; + let blobs = self.blobs.hamt(store)?; + let starting_key = starting_hash.map(|h| BytesKey::from(h.0.as_slice())); + + fn err_map(e: E) -> ActorError + where + E: Error, + { + ActorError::illegal_state(format!( + "subscriptions group cannot be iterated over: {}", + e + )) + } + + // Walk blobs + let (_, next_key) = blobs.for_each_ranged( + starting_key.as_ref(), + limit.map(|l| l as usize), + |hash, blob| -> Result { + let subscribers = blob.subscribers.hamt(store)?; + if let Some(subscriptions) = subscribers.get(&subscriber)? { + let subscriptions_hamt = subscriptions.hamt(store)?; + for val in subscriptions_hamt.iter() { + let (id_bytes, subscription) = val.map_err(err_map)?; + let id = from_utf8(id_bytes).map_err(err_map)?; + + // Skip expired subscriptions, they will be handled by cron tick + let expired = subscription.expiry <= current_epoch; + if !expired && subscription.expiry - subscription.added > new_ttl { + if new_ttl == 0 { + // Delete subscription + let (from_disc, _, _) = self.delete_blob( + store, + subscriber, + None, + DeleteBlobStateParams { + epoch: current_epoch, + hash, + id: SubscriptionId::new(id)?, + skip_credit_return: false, + }, + )?; + if from_disc { + deleted_blobs.push(hash); + }; + } else { + // Reduce subscription TTL + self.add_blob( + store, + config, + subscriber, + None, + AddBlobStateParams { + hash, + metadata_hash: blob.metadata_hash, + id: SubscriptionId::new(id)?, + size: blob.size, + ttl: Some(new_ttl), + source: subscription.source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + )?; + } + processed += 1; + } + } + } + Ok(true) + }, + )?; + + Ok((processed, next_key, deleted_blobs)) + } + + /// Returns an error if the subnet storage is at capacity. + pub(crate) fn ensure_capacity(&self, capacity: u64) -> Result<(), ActorError> { + if self.capacity_available(capacity).is_zero() { + return Err(ActorError::forbidden( + "subnet has reached storage capacity".into(), + )); + } + Ok(()) + } + + /// Return available capacity as a difference between `blob_capacity_total` and `capacity_used`. + pub(crate) fn capacity_available(&self, blob_capacity_total: u64) -> u64 { + // Prevent underflow. We only care if free capacity is > 0 anyway. + blob_capacity_total.saturating_sub(self.blobs.bytes_size()) + } + + /// Returns the [`Credit`] storage cost for the given duration and size. + pub(crate) fn get_storage_cost(&self, duration: i64, size: &u64) -> Credit { + Credit::from_whole(duration * BigInt::from(*size)) + } + + /// Returns the current [`Credit`] debit amount based on the caller's current capacity used + /// and the given duration. + pub(crate) fn get_debit_for_caller( + &self, + caller: &Caller, + epoch: ChainEpoch, + ) -> Credit { + let debit_duration = epoch.saturating_sub(caller.subscriber().last_debit_epoch); + Credit::from_whole(BigInt::from(caller.subscriber().capacity_used) * debit_duration) + } + + /// Returns an account's current max allowed blob TTL by address. + pub(crate) fn get_account_max_ttl( + &self, + config: &RecallConfig, + store: &BS, + address: Address, + ) -> Result { + let accounts = self.accounts.hamt(store)?; + Ok(accounts + .get(&address)? + .map_or(config.blob_default_ttl, |account| account.max_ttl)) + } + + /// Releases capacity for the subnet and caller. + /// Does NOT flush the state to the blockstore. + fn release_capacity_for_subnet_and_caller( + &mut self, + caller: &mut Caller, + group_expiry: Option, + new_group_expiry: Option, + size: u64, + num_subscribers: u64, + ) { + // If there's no new group expiry, we can reclaim capacity. + let reclaim_capacity = if new_group_expiry.is_none() { size } else { 0 }; + + // Only reclaim subnet capacity if this was the last subscriber + if num_subscribers == 1 { + self.blobs.release_capacity(reclaim_capacity); + } + + // We can release credits if the new group expiry is in the future, + // considering other subscriptions may still be active. + let reclaim_credits = group_expiry + .map(|group_expiry| { + let last_debit_epoch = caller.subscriber().last_debit_epoch; + if last_debit_epoch < group_expiry { + // let reclaim_start = new_group_expiry.unwrap_or(last_debit_epoch); + let reclaim_start = + new_group_expiry.map_or(last_debit_epoch, |e| e.max(last_debit_epoch)); + self.get_storage_cost(group_expiry - reclaim_start, &size) + } else { + Credit::zero() + } + }) + .unwrap_or_default(); + + self.release_capacity_for_caller(caller, reclaim_capacity, &reclaim_credits); + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/params.rs b/fendermint/actors/blobs/src/state/blobs/params.rs new file mode 100644 index 0000000000..5d55fcf87f --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/params.rs @@ -0,0 +1,138 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + blobs::{BlobStatus, SubscriptionId}, + bytes::B256, +}; +use fvm_shared::{clock::ChainEpoch, econ::TokenAmount}; + +/// Params for adding a blob. +#[derive(Clone, Debug)] +pub struct AddBlobStateParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Blob blake3 hash. + pub hash: B256, + /// Blake3 hash of the metadata to use for blob recovery. + pub metadata_hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// Blob size. + pub size: u64, + /// Blob time-to-live epochs. + /// If not specified, the current default TTL from the config actor is used. + pub ttl: Option, + /// Chain epoch. + pub epoch: ChainEpoch, + /// Token amount sent with the transaction. + pub token_amount: TokenAmount, +} + +impl AddBlobStateParams { + pub fn from_actor_params( + params: fendermint_actor_blobs_shared::blobs::AddBlobParams, + epoch: ChainEpoch, + token_amount: TokenAmount, + ) -> Self { + Self { + source: params.source, + hash: params.hash, + metadata_hash: params.metadata_hash, + id: params.id, + size: params.size, + ttl: params.ttl, + epoch, + token_amount, + } + } +} + +/// Params for deleting a blob. +#[derive(Clone, Debug)] +pub struct DeleteBlobStateParams { + /// Blob blake3 hash. + pub hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// Chain epoch. + pub epoch: ChainEpoch, + /// Whether to skip returning credit for an over-debit. + /// This is needed to handle cases where multiple subscriptions are being expired in the same + /// epoch for the same subscriber. + pub skip_credit_return: bool, +} + +impl DeleteBlobStateParams { + pub fn from_actor_params( + params: fendermint_actor_blobs_shared::blobs::DeleteBlobParams, + epoch: ChainEpoch, + ) -> Self { + Self { + hash: params.hash, + id: params.id, + epoch, + skip_credit_return: false, + } + } +} + +/// Params for setting a blob to pending state. +#[derive(Clone, Debug)] +pub struct SetPendingBlobStateParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Blob blake3 hash. + pub hash: B256, + /// Blob size. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, +} + +impl SetPendingBlobStateParams { + pub fn from_actor_params( + params: fendermint_actor_blobs_shared::blobs::SetBlobPendingParams, + ) -> Self { + Self { + source: params.source, + hash: params.hash, + size: params.size, + id: params.id, + } + } +} + +/// Params for finalizing a blob. +#[derive(Clone, Debug)] +pub struct FinalizeBlobStateParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Blob blake3 hash. + pub hash: B256, + /// Blob size. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// Finalized status. + pub status: BlobStatus, + /// Chain epoch. + pub epoch: ChainEpoch, +} + +impl FinalizeBlobStateParams { + pub fn from_actor_params( + params: fendermint_actor_blobs_shared::blobs::FinalizeBlobParams, + epoch: ChainEpoch, + ) -> Self { + Self { + source: params.source, + hash: params.hash, + size: params.size, + id: params.id, + status: params.status, + epoch, + } + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/queue.rs b/fendermint/actors/blobs/src/state/blobs/queue.rs new file mode 100644 index 0000000000..54be2749a5 --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/queue.rs @@ -0,0 +1,210 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use fendermint_actor_blobs_shared::{self as shared, blobs::SubscriptionId, bytes::B256}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{tuple::*, RawBytes}; +use fvm_shared::address::Address; +use recall_ipld::hamt::{self, map::TrackedFlushResult, MapKey}; + +/// Key used to namespace a blob source set. +#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct BlobSource { + /// Blob subscriber. + pub subscriber: Address, + /// Subscription ID. + pub id: SubscriptionId, + /// Source Iroh node ID. + pub source: B256, +} + +impl BlobSource { + /// Create a new blob source. + pub fn new(subscriber: Address, id: SubscriptionId, source: B256) -> Self { + Self { + subscriber, + id, + source, + } + } +} + +impl std::fmt::Display for BlobSource { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "BlobSource(subscriber: {}, id: {}, source: {})", + self.subscriber, self.id, self.source + ) + } +} + +impl MapKey for BlobSource { + fn from_bytes(b: &[u8]) -> Result { + let raw_bytes = RawBytes::from(b.to_vec()); + fil_actors_runtime::cbor::deserialize(&raw_bytes, "BlobSource") + .map_err(|e| format!("Failed to deserialize BlobSource {}", e)) + } + + fn to_bytes(&self) -> Result, String> { + let raw_bytes = fil_actors_runtime::cbor::serialize(self, "BlobSource") + .map_err(|e| format!("Failed to serialize BlobSource {}", e))?; + Ok(raw_bytes.to_vec()) + } +} + +/// A set of [`shared::blobs::BlobSource`]s. +/// A blob in the collection may have multiple sources. +type BlobSourceSet = HashSet; + +/// A collection of blobs used for progress queues. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Queue { + /// The HAMT root. + pub root: hamt::Root>, + /// Number of sources in the collection. + size: u64, + /// Number of blob bytes in the collection. + /// A blob with multiple sources is only counted once. + bytes_size: u64, +} + +impl Queue { + /// Returns a new progress collection. + pub fn new(store: &BS, name: &str) -> Result { + let root = hamt::Root::>::new(store, name)?; + Ok(Self { + root, + size: 0, + bytes_size: 0, + }) + } + + /// Returns a store name for the inner root. + fn store_name_per_hash(&self, hash: B256) -> String { + format!("{}.{}", self.root.name(), hash) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result>, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult>, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// Number of sources in the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Returns the number of blob bytes in the collection. + /// A blob with multiple sources is only counted once. + pub fn bytes_size(&self) -> u64 { + self.bytes_size + } + + /// Adds/updates an entry in the collection. + pub fn upsert( + &mut self, + store: BS, + hash: B256, + source: BlobSource, + blob_size: u64, + ) -> Result<(), ActorError> { + let mut collection = self.hamt(&store)?; + let sources_root = if let Some(sources_root) = collection.get(&hash)? { + // Modify the existing entry + let mut sources = sources_root.hamt(&store, 0)?; + sources.set_and_flush(&source, ())? + } else { + // Entry did not exist, add and increase tracked bytes size + let sources_root = + hamt::Root::::new(&store, &self.store_name_per_hash(hash))?; + let mut sources = sources_root.hamt(&store, 0)?; + self.bytes_size = self.bytes_size.saturating_add(blob_size); + sources.set_and_flush(&source, ())? + }; + self.save_tracked(collection.set_and_flush_tracked(&hash, sources_root)?); + Ok(()) + } + + /// Returns a page of entries from the collection. + pub fn take_page( + &self, + store: BS, + size: u32, + ) -> Result, ActorError> { + let collection = self.hamt(&store)?; + let mut page = Vec::with_capacity(size as usize); + collection.for_each_ranged(None, Some(size as usize), |hash, sources_root| { + let sources = sources_root.hamt(&store, 0)?; + let mut set = HashSet::new(); + sources.for_each(|source, _| { + set.insert((source.subscriber, source.id, source.source)); + Ok(()) + })?; + page.push((hash, set)); + Ok(true) + })?; + page.shrink_to_fit(); + Ok(page) + } + + /// Removes a source from an entry in the collection. + /// If the entry is empty after removing the source, the entry is also removed. + pub fn remove_source( + &mut self, + store: BS, + hash: &B256, + size: u64, + source: BlobSource, + ) -> Result<(), ActorError> { + let mut collection = self.hamt(&store)?; + if let Some(mut source_root) = collection.get(hash)? { + let mut sources = source_root.hamt(&store, 1)?; + (source_root, _) = sources.delete_and_flush(&source)?; + if sources.is_empty() { + self.save_tracked(collection.delete_and_flush_tracked(hash)?.0); + self.bytes_size = self.bytes_size.saturating_sub(size); + } else { + self.save_tracked(collection.set_and_flush_tracked(hash, source_root)?); + } + } + Ok(()) + } + + /// Removes an entry from the collection. + pub fn remove_entry( + &mut self, + store: BS, + hash: &B256, + size: u64, + ) -> Result<(), ActorError> { + let mut collection = self.hamt(&store)?; + let (res, deleted) = collection.delete_and_flush_tracked(hash)?; + self.save_tracked(res); + if deleted.is_some() { + self.bytes_size = self.bytes_size.saturating_sub(size); + } + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/subscribers.rs b/fendermint/actors/blobs/src/state/blobs/subscribers.rs new file mode 100644 index 0000000000..bd8646ae9b --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/subscribers.rs @@ -0,0 +1,142 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::blobs::Subscription; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use recall_ipld::{hamt, hamt::map::TrackedFlushResult}; + +use super::{AddBlobStateParams, Subscriptions}; +use crate::caller::Caller; + +/// Represents the result of a subscriber upsert. +#[derive(Debug, Clone)] +pub struct UpsertSubscriberResult { + /// New or updated subscription. + pub subscription: Subscription, + /// Whether the subscriber was added or updated. + pub subscriber_added: bool, + /// Previous subscription expiry if the subscription was updated. + pub previous_subscription_expiry: Option, + /// Duration for the new credit commitment. + pub commit_duration: ChainEpoch, + /// Duration for the returned credit commitment. + pub return_duration: ChainEpoch, +} + +/// HAMT wrapper tracking blob [`Subscriptions`]s by subscriber address. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Subscribers { + /// The HAMT root. + pub root: hamt::Root, + /// The size of the collection. + size: u64, +} + +impl Subscribers { + /// Returns a subscriber collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "blob_subscribers")?; + Ok(Self { root, size: 0 }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// The size of the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Creates or updates a subscriber's subscription to a blob, managing all related state + /// changes. + /// + /// This function handles both the creation of new subscribers and updating existing + /// subscribers' subscriptions. It calculates credit commitment and return durations based on + /// the subscription's expiry and the group's maximum expiry. + pub fn upsert( + &mut self, + store: &BS, + caller: &Caller, + params: &AddBlobStateParams, + expiry: ChainEpoch, + ) -> Result { + let mut subscribers = self.hamt(store)?; + let mut subscriptions = + if let Some(subscriptions) = subscribers.get(&caller.subscriber_address())? { + subscriptions + } else { + Subscriptions::new(store)? + }; + + // If the subscriber has been debited after the group's max expiry, we need to + // determine the duration for which credits will be returned. + // The return duration can only extend up to the current epoch. + let (group_expiry, new_group_expiry) = + subscriptions.max_expiries(store, ¶ms.id, Some(expiry))?; + let return_duration = group_expiry + .filter(|&expiry| params.epoch > expiry) + .map_or(0, |expiry| params.epoch - expiry); + + // Determine the duration for which credits will be committed, considering the subscription + // group may have expiries that cover a portion of the added duration. + // Duration can be negative if the subscriber is reducing expiry. + let new_group_expiry = new_group_expiry.unwrap(); // safe here + let commit_start = group_expiry.map_or(params.epoch, |e| e.max(params.epoch)); + let commit_duration = new_group_expiry - commit_start; + let overlap = commit_start - group_expiry.unwrap_or(params.epoch); + + // Add/update subscription + let result = subscriptions.upsert(store, caller, params, overlap, expiry)?; + + self.save_tracked( + subscribers.set_and_flush_tracked(&caller.subscriber_address(), subscriptions)?, + ); + + Ok(UpsertSubscriberResult { + subscription: result.subscription, + subscriber_added: group_expiry.is_none(), + previous_subscription_expiry: result.previous_expiry, + commit_duration, + return_duration, + }) + } + + /// Saves a subscriber's subscriptions to the blockstore. + /// + /// This is a helper function that simplifies the process of saving a subscriber's subscription + /// data by handling the HAMT operations internally. It creates or updates the subscriber entry + /// in the HAMT and saves the changes to the blockstore. + pub fn save_subscriptions( + &mut self, + store: &BS, + subscriber: Address, + subscriptions: Subscriptions, + ) -> Result<(), ActorError> { + let mut subscribers = self.hamt(store)?; + self.save_tracked(subscribers.set_and_flush_tracked(&subscriber, subscriptions)?); + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/subscriptions.rs b/fendermint/actors/blobs/src/state/blobs/subscriptions.rs new file mode 100644 index 0000000000..83a2393f20 --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/subscriptions.rs @@ -0,0 +1,697 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::str::from_utf8; + +use fendermint_actor_blobs_shared::blobs::{Subscription, SubscriptionId}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::clock::ChainEpoch; +use log::debug; +use recall_ipld::{hamt, hamt::map::TrackedFlushResult}; + +use super::AddBlobStateParams; +use crate::caller::Caller; + +/// Represents the result of a subscription upsert. +#[derive(Debug, Clone)] +pub struct UpsertSubscriptionResult { + /// New or updated subscription. + pub subscription: Subscription, + /// Previous subscription expiry if the subscription was updated. + pub previous_expiry: Option, +} + +/// HAMT wrapper tracking blob [`Subscription`]s by subscription ID. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Subscriptions { + /// The HAMT root. + pub root: hamt::Root, + /// The size of the collection. + size: u64, +} + +impl Subscriptions { + /// Returns a subscription collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "subscription_group")?; + Ok(Self { root, size: 0 }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// The size of the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Calculates the current maximum expiry and the new maximum expiry after a potential update. + /// + /// This function serves two purposes: + /// 1. It finds the current maximum expiry among all non-failed subscriptions + /// 2. It calculates what the new maximum expiry would be if the subscription with `target_id` + /// had its expiry updated to `new_value` + /// + /// This is particularly useful for determining if group expiry boundaries need to be updated + /// when a single subscription's expiry changes. + pub fn max_expiries( + &self, + store: &BS, + target_id: &SubscriptionId, + new_value: Option, + ) -> Result<(Option, Option), ActorError> { + let mut max = None; + let mut new_max = None; + let subscriptions = self.hamt(store)?; + for val in subscriptions.iter() { + let (id, sub) = deserialize_iter_sub(val)?; + if sub.failed { + continue; + } + if sub.expiry > max.unwrap_or(0) { + max = Some(sub.expiry); + } + let new_value = if &id == target_id { + new_value.unwrap_or_default() + } else { + sub.expiry + }; + if new_value > new_max.unwrap_or(0) { + new_max = Some(new_value); + } + } + // Target ID may not be in the current group + if let Some(new_value) = new_value { + if new_value > new_max.unwrap_or(0) { + new_max = Some(new_value); + } + } + Ok((max, new_max)) + } + + /// Determines if a subscription has the earliest added timestamp and finds the next earliest + /// timestamp. + /// + /// This function checks if the subscription identified by `trim_id` has the earliest "added" + /// timestamp among all active, non-failed subscriptions. It also identifies what would be the + /// new earliest timestamp if this subscription were removed. + /// + /// This is typically used when deciding if a subscription can be safely removed without + /// affecting the overall data retention requirements of the system. + pub fn is_min_added( + &self, + store: &BS, + trim_id: &SubscriptionId, + ) -> Result<(bool, Option), ActorError> { + let subscriptions = self.hamt(store)?; + let trim = subscriptions + .get(trim_id)? + .ok_or(ActorError::not_found(format!( + "subscription id {} not found", + trim_id + )))?; + + let mut next_min = None; + for val in subscriptions.iter() { + let (id, sub) = deserialize_iter_sub(val)?; + if sub.failed || &id == trim_id { + continue; + } + if sub.added < trim.added { + return Ok((false, None)); + } + if sub.added < next_min.unwrap_or(ChainEpoch::MAX) { + next_min = Some(sub.added); + } + } + Ok((true, next_min)) + } + + /// Creates a new subscription or updates an existing one with the provided parameters. + /// + /// This function handles both the creation and update cases for blob subscriptions: + /// - If a subscription with the given ID already exists, it updates its properties + /// - If no subscription exists with the ID, it creates a new one + /// + /// When updating an existing subscription, it preserves the original subscription's + /// added timestamp but updates the expiry, source, delegate, and resets the failed flag. + pub fn upsert( + &mut self, + store: &BS, + caller: &Caller, + params: &AddBlobStateParams, + overlap: ChainEpoch, + expiry: ChainEpoch, + ) -> Result { + let mut subscriptions = self.hamt(store)?; + if let Some(mut subscription) = subscriptions.get(¶ms.id)? { + let previous_expiry = subscription.expiry; + subscription.expiry = expiry; + subscription.source = params.source; // subscriber can retry from a different source + subscription.delegate = caller.delegate_address(); + subscription.failed = false; + + self.save_tracked( + subscriptions.set_and_flush_tracked(¶ms.id, subscription.clone())?, + ); + + debug!( + "updated subscription to blob {} for {} (key: {})", + params.hash, + caller.subscriber_address(), + params.id + ); + + Ok(UpsertSubscriptionResult { + subscription, + previous_expiry: Some(previous_expiry), + }) + } else { + let subscription = Subscription { + added: params.epoch, + overlap, + expiry, + source: params.source, + delegate: caller.delegate_address(), + failed: false, + }; + + self.save_tracked( + subscriptions.set_and_flush_tracked(¶ms.id, subscription.clone())?, + ); + + debug!( + "created new subscription to blob {} for {} (key: {})", + params.hash, + caller.subscriber_address(), + params.id + ); + + Ok(UpsertSubscriptionResult { + subscription, + previous_expiry: None, + }) + } + } + + /// Saves a subscription with the given ID to the blockstore. + /// + /// This is a helper function that simplifies the process of saving a subscription + /// by handling the HAMT operations internally. It creates or updates the subscription + /// in the HAMT and saves the changes to the blockstore. + pub fn save_subscription( + &mut self, + store: &BS, + id: &SubscriptionId, + subscription: Subscription, + ) -> Result<(), ActorError> { + let mut subscriptions = self.hamt(store)?; + self.save_tracked(subscriptions.set_and_flush_tracked(id, subscription)?); + Ok(()) + } +} + +fn deserialize_iter_sub<'a>( + val: Result<(&hamt::BytesKey, &'a Subscription), hamt::Error>, +) -> Result<(SubscriptionId, &'a Subscription), ActorError> { + let (id_bytes, sub) = val.map_err(|e| { + ActorError::illegal_state(format!( + "failed to deserialize subscription from iter: {}", + e + )) + })?; + let id = from_utf8(id_bytes).map_err(|e| { + ActorError::illegal_state(format!( + "failed to deserialize subscription ID from iter: {}", + e + )) + })?; + let subscription_id = SubscriptionId::new(id).map_err(|e| { + ActorError::illegal_state(format!("failed to decode subscription ID from iter: {}", e)) + })?; + Ok((subscription_id, sub)) +} + +#[cfg(test)] +mod tests { + use super::*; + use fendermint_actor_blobs_shared::blobs::{Subscription, SubscriptionId}; + use fendermint_actor_blobs_testing::new_pk; + use fvm_ipld_blockstore::MemoryBlockstore; + use fvm_shared::clock::ChainEpoch; + + fn create_test_subscription( + id: &str, + added: ChainEpoch, + expiry: ChainEpoch, + failed: bool, + ) -> (SubscriptionId, Subscription) { + let subscription_id = SubscriptionId::new(id).unwrap(); + let subscription = Subscription { + added, + overlap: 0, + expiry, + source: new_pk(), + delegate: None, + failed, + }; + (subscription_id, subscription) + } + + #[test] + fn test_max_expiries_empty_group() { + let store = MemoryBlockstore::default(); + let subscriptions = Subscriptions::new(&store).unwrap(); + + let target_id = SubscriptionId::new("not-exists").unwrap(); + let (max, new_max) = subscriptions + .max_expiries(&store, &target_id, Some(100)) + .unwrap(); + + assert_eq!(max, None, "Max expiry should be None for empty group"); + assert_eq!( + new_max, + Some(100), + "New max should be the new value when group is empty" + ); + } + + #[test] + fn test_max_expiries_single_subscription() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add a single subscription + let (id, subscription) = create_test_subscription("test1", 0, 50, false); + subscriptions + .save_subscription(&store, &id, subscription) + .unwrap(); + + // Test with existing ID + let (max, new_max) = subscriptions.max_expiries(&store, &id, Some(100)).unwrap(); + assert_eq!( + max, + Some(50), + "Max should be the existing subscription's expiry" + ); + assert_eq!(new_max, Some(100), "New max should be the new value"); + + // Test with non-existing ID + let non_existing_id = SubscriptionId::new("not-exists").unwrap(); + let (max, new_max) = subscriptions + .max_expiries(&store, &non_existing_id, Some(80)) + .unwrap(); + assert_eq!( + max, + Some(50), + "Max should be the existing subscription's expiry" + ); + assert_eq!( + new_max, + Some(80), + "New max should be the new value for non-existing ID" + ); + } + + #[test] + fn test_max_expiries_multiple_subscriptions() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with different expiries + let (id1, sub1) = create_test_subscription("test1", 0, 50, false); + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); + let (id3, sub3) = create_test_subscription("test3", 0, 30, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Test updating the middle expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(60)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should still be 70 after update to 60" + ); + + // Test updating to the new highest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(100)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!(new_max, Some(100), "New max should be 100 after update"); + + // Test with non-existing ID + let non_existing_id = SubscriptionId::new("not-exists").unwrap(); + let (max, new_max) = subscriptions + .max_expiries(&store, &non_existing_id, Some(120)) + .unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(120), + "New max should be 120 for non-existing ID" + ); + } + + #[test] + fn test_max_expiries_with_failed_subscriptions() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add a mix of failed and non-failed subscriptions + let (id1, sub1) = create_test_subscription("test1", 0, 50, true); // Failed + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); // Not failed + let (id3, sub3) = create_test_subscription("test3", 0, 90, true); // Failed (highest) + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Failed subscriptions should be ignored in max calculation + let (max, new_max) = subscriptions.max_expiries(&store, &id2, Some(60)).unwrap(); + assert_eq!( + max, + Some(70), + "Max should only consider non-failed subscriptions (70)" + ); + assert_eq!(new_max, Some(60), "New max should be 60 after update"); + + // Test updating a failed subscription + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(100)).unwrap(); + assert_eq!( + max, + Some(70), + "Max should only consider non-failed subscriptions (70)" + ); + assert_eq!( + new_max, + Some(100), + "New max should be 100 after updating a failed subscription" + ); + } + + #[test] + fn test_max_expiries_with_none_new_value() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add subscriptions + let (id1, sub1) = create_test_subscription("test1", 0, 50, false); + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + + // Test with None as new_value - should calculate without modifying + let (max, new_max) = subscriptions.max_expiries(&store, &id1, None).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should remain 70 when target expiry is None" + ); + + // Test with target_id that doesn't exist and None as new_value + let non_existing_id = SubscriptionId::new("not-exists").unwrap(); + let (max, new_max) = subscriptions + .max_expiries(&store, &non_existing_id, None) + .unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should remain 70 for non-existing ID with None value" + ); + } + + #[test] + fn test_max_expiries_with_zero_new_value() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add subscriptions + let (id1, sub1) = create_test_subscription("test1", 0, 50, false); + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + + // Test with zero as new_value for the highest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id2, Some(0)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(50), + "New max should be 50 after setting highest to 0" + ); + + // Test with zero as new_value for the lowest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(0)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should be the highest expiry (70)" + ); + } + + #[test] + fn test_max_expiries_with_one_zero_new_value() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add subscriptions + let (id1, sub1) = create_test_subscription("test1", 0, 50, true); + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + + // Test with zero as new_value for the highest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id2, Some(0)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, None, + "New max should be None after setting highest to 0" + ); + + // Test with zero as new_value for the lowest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(0)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should be the highest expiry (70)" + ); + } + + #[test] + fn test_is_min_added_empty_group() { + let store = MemoryBlockstore::default(); + let subscriptions = Subscriptions::new(&store).unwrap(); + + let target_id = SubscriptionId::new("nonexistent").unwrap(); + let result = subscriptions.is_min_added(&store, &target_id); + + // This should return not found error since no subscription exists + assert!(result.is_err()); + + // Verify it's the expected error type + match result { + Err(e) => { + assert!(e.to_string().contains("not found")); + assert!(e.to_string().contains("nonexistent")); + } + _ => panic!("Expected not found error"), + } + } + + #[test] + fn test_is_min_added_single_subscription() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add a single subscription + let (id, subscription) = create_test_subscription("test1", 100, 200, false); + subscriptions + .save_subscription(&store, &id, subscription) + .unwrap(); + + // Check if it's the minimum (it should be since it's the only one) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id).unwrap(); + assert!(is_min, "Single subscription should be minimum"); + assert_eq!(next_min, None, "No next minimum should exist"); + } + + #[test] + fn test_is_min_added_multiple_subscriptions_is_min() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with the first having the earliest added timestamp + let (id1, sub1) = create_test_subscription("test1", 100, 200, false); + let (id2, sub2) = create_test_subscription("test2", 150, 250, false); + let (id3, sub3) = create_test_subscription("test3", 200, 300, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Check if id1 is the minimum (it should be) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id1).unwrap(); + assert!( + is_min, + "Subscription with earliest added timestamp should be minimum" + ); + assert_eq!(next_min, Some(150), "Next minimum should be 150 (from id2)"); + } + + #[test] + fn test_is_min_added_multiple_subscriptions_not_min() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with the second one not being the earliest + let (id1, sub1) = create_test_subscription("test1", 100, 200, false); + let (id2, sub2) = create_test_subscription("test2", 150, 250, false); + let (id3, sub3) = create_test_subscription("test3", 200, 300, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Check if id2 is the minimum (it shouldn't be) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id2).unwrap(); + assert!( + !is_min, + "Subscription with later added timestamp should not be minimum" + ); + assert_eq!( + next_min, None, + "Next minimum should be None when not the minimum" + ); + } + + #[test] + fn test_is_min_added_equal_timestamps() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with equal earliest timestamps + let (id1, sub1) = create_test_subscription("test1", 100, 200, false); + let (id2, sub2) = create_test_subscription("test2", 100, 250, false); + let (id3, sub3) = create_test_subscription("test3", 200, 300, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Check id1 - both id1 and id2 have the same timestamp + let (is_min, next_min) = subscriptions.is_min_added(&store, &id1).unwrap(); + assert!( + is_min, + "Subscription with equal earliest timestamp should be minimum" + ); + assert_eq!(next_min, Some(100), "Next minimum should be 100 (from id2)"); + + // Check id2 - both id1 and id2 have the same timestamp + let (is_min, next_min) = subscriptions.is_min_added(&store, &id2).unwrap(); + assert!( + is_min, + "Subscription with equal earliest timestamp should be minimum" + ); + assert_eq!(next_min, Some(100), "Next minimum should be 100 (from id1)"); + } + + #[test] + fn test_is_min_added_with_failed_subscriptions() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with failed ones having earlier timestamps + let (id1, sub1) = create_test_subscription("test1", 50, 150, true); // Failed (earliest) + let (id2, sub2) = create_test_subscription("test2", 100, 200, false); // Not failed (should be min) + let (id3, sub3) = create_test_subscription("test3", 75, 175, true); // Failed (between id1 and id2) + let (id4, sub4) = create_test_subscription("test4", 150, 250, false); // Not failed (later) + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + subscriptions.save_subscription(&store, &id4, sub4).unwrap(); + + // Check if id2 is the minimum (it should be since failed ones are ignored) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id2).unwrap(); + assert!( + is_min, + "Non-failed subscription with earliest timestamp should be minimum" + ); + assert_eq!(next_min, Some(150), "Next minimum should be 150 (from id4)"); + + // Check a failed subscription + let (is_min, next_min) = subscriptions.is_min_added(&store, &id1).unwrap(); + assert!(is_min, "Failed subscription is checked against itself"); // This is somewhat counterintuitive + assert_eq!(next_min, Some(100), "Next minimum should be 100 (from id2)"); + } + + #[test] + fn test_is_min_added_all_other_subscriptions_are_failed() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions where all others are failed + let (id1, sub1) = create_test_subscription("test1", 100, 200, true); // Failed + let (id2, sub2) = create_test_subscription("test2", 150, 250, false); // Only non-failed subscription + let (id3, sub3) = create_test_subscription("test3", 50, 150, true); // Failed, earliest + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Check if id2 is the minimum (it should be since all others are failed) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id2).unwrap(); + assert!(is_min, "Only non-failed subscription should be minimum"); + assert_eq!( + next_min, None, + "No next minimum should exist when all others are failed" + ); + } + + #[test] + fn test_is_min_added_with_nonexistent_id() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add some subscriptions + let (id1, sub1) = create_test_subscription("test1", 100, 200, false); + let (id2, sub2) = create_test_subscription("test2", 150, 250, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + + // Check with nonexistent ID + let nonexistent_id = SubscriptionId::new("nonexistent").unwrap(); + let result = subscriptions.is_min_added(&store, &nonexistent_id); + + // Should return a "not found" error + assert!(result.is_err()); + match result { + Err(e) => { + assert!(e.to_string().contains("not found")); + assert!(e.to_string().contains("nonexistent")); + } + _ => panic!("Expected not found error"), + } + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/tests.rs b/fendermint/actors/blobs/src/state/blobs/tests.rs new file mode 100644 index 0000000000..bd3b35b04a --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/tests.rs @@ -0,0 +1,2118 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + accounts::AccountStatus, + blobs::{BlobStatus, SubscriptionId}, + credit::Credit, +}; +use fendermint_actor_blobs_testing::{ + new_address, new_hash, new_metadata_hash, new_pk, setup_logs, +}; +use fendermint_actor_recall_config_shared::RecallConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::{Blockstore, MemoryBlockstore}; +use fvm_shared::{address::Address, bigint::BigInt, clock::ChainEpoch, econ::TokenAmount}; +use num_traits::Zero; + +use super::{ + AddBlobStateParams, DeleteBlobStateParams, FinalizeBlobStateParams, SetPendingBlobStateParams, +}; +use crate::{caller::DelegationOptions, testing::check_approval_used, State}; + +#[test] +fn test_add_blob_refund() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, token_amount.clone(), current_epoch) + .unwrap(); + add_blob_refund( + &config, + &store, + state, + caller, + None, + current_epoch, + token_amount, + false, + ); +} + +#[test] +fn test_add_blob_refund_with_approval() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let sponsor = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + sponsor, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + state + .approve_credit( + &config, + &store, + sponsor, + caller, + DelegationOptions::default(), + current_epoch, + ) + .unwrap(); + add_blob_refund( + &config, + &store, + state, + caller, + Some(sponsor), + current_epoch, + token_amount, + true, + ); +} + +#[allow(clippy::too_many_arguments)] +fn add_blob_refund( + config: &RecallConfig, + store: &BS, + mut state: State, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + token_amount: TokenAmount, + using_approval: bool, +) { + let subscriber = sponsor.unwrap_or(caller); + let token_credit_rate = BigInt::from(1_000_000_000_000_000_000u64); + let mut credit_amount = token_amount.clone() * &config.token_credit_rate; + + // Add blob with default a subscription ID + let (hash1, size1) = new_hash(1024); + let add1_epoch = current_epoch; + let id1 = SubscriptionId::default(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash: hash1, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size: size1, + ttl: Some(config.blob_min_ttl), + source, + epoch: add1_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 1); + assert_eq!(stats.bytes_added, size1); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add1_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole(config.blob_min_ttl as u64 * size1), + ); + credit_amount -= &account.credit_committed; + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size1); + + assert!(state + .set_account_status( + &store, + config, + subscriber, + AccountStatus::Extended, + current_epoch + ) + .is_ok()); + + // Add another blob past the first blob's expiry + let (hash2, size2) = new_hash(2048); + let add2_epoch = ChainEpoch::from(config.blob_min_ttl + 11); + let id2 = SubscriptionId::new("foo").unwrap(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash: hash2, + metadata_hash: new_metadata_hash(), + id: id2.clone(), + size: size2, + ttl: Some(config.blob_min_ttl), + source, + epoch: add2_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 2); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 2); + assert_eq!(stats.bytes_added, size1 + size2); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add2_epoch); + let blob1_expiry = ChainEpoch::from(config.blob_min_ttl + add1_epoch); + let overcharge = BigInt::from((add2_epoch - blob1_expiry) as u64 * size1); + assert_eq!( + account.credit_committed, // this includes an overcharge that needs to be refunded + Credit::from_whole(config.blob_min_ttl as u64 * size2 - overcharge), + ); + credit_amount -= Credit::from_whole(config.blob_min_ttl as u64 * size2); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size1 + size2); + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); + assert_eq!( + state.credits.credit_debited, + (token_amount.clone() * &token_credit_rate) + - (&account.credit_free + &account.credit_committed) + ); + assert_eq!(state.blobs.bytes_size(), account.capacity_used); + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 2); + assert_eq!(state.blobs.added.len(), 2); + assert_eq!(state.blobs.pending.len(), 0); + + // Add the first (now expired) blob again + let add3_epoch = ChainEpoch::from(config.blob_min_ttl + 21); + let id1 = SubscriptionId::default(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash: hash1, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size: size1, + ttl: Some(config.blob_min_ttl), + source, + epoch: add3_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 2); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 2); + assert_eq!(stats.bytes_added, size1 + size2); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add3_epoch); + assert_eq!( + account.credit_committed, // should not include overcharge due to refund + Credit::from_whole( + (config.blob_min_ttl - (add3_epoch - add2_epoch)) as u64 * size2 + + config.blob_min_ttl as u64 * size1 + ), + ); + credit_amount -= Credit::from_whole(config.blob_min_ttl as u64 * size1); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size1 + size2); + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); + assert_eq!( + state.credits.credit_debited, + token_amount.clone() * &token_credit_rate + - (&account.credit_free + &account.credit_committed) + ); + assert_eq!(state.blobs.bytes_size(), account.capacity_used); + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 2); + assert_eq!(state.blobs.added.len(), 2); + assert_eq!(state.blobs.pending.len(), 0); + + // Check approval + if using_approval { + check_approval_used(&state, store, caller, subscriber); + } +} + +#[test] +fn test_add_blob_same_hash_same_account() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, token_amount.clone(), current_epoch) + .unwrap(); + add_blob_same_hash_same_account( + &config, + &store, + state, + caller, + None, + current_epoch, + token_amount, + false, + ); +} + +#[test] +fn test_add_blob_same_hash_same_account_with_approval() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let sponsor = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + sponsor, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + state + .approve_credit( + &config, + &store, + sponsor, + caller, + DelegationOptions::default(), + current_epoch, + ) + .unwrap(); + add_blob_same_hash_same_account( + &config, + &store, + state, + caller, + Some(sponsor), + current_epoch, + token_amount, + true, + ); +} + +#[allow(clippy::too_many_arguments)] +fn add_blob_same_hash_same_account( + config: &RecallConfig, + store: &BS, + mut state: State, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + token_amount: TokenAmount, + using_approval: bool, +) { + let subscriber = sponsor.unwrap_or(caller); + let mut credit_amount = + Credit::from_atto(token_amount.atto().clone()) * &config.token_credit_rate; + + assert!(state + .set_account_status( + &store, + config, + subscriber, + AccountStatus::Extended, + current_epoch + ) + .is_ok()); + + // Add a blob with a default subscription ID + let (hash, size) = new_hash(1024); + let add1_epoch = current_epoch; + let id1 = SubscriptionId::default(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size, + ttl: Some(config.blob_min_ttl), + source, + epoch: add1_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + let (sub, _) = res.unwrap(); + assert_eq!(sub.added, add1_epoch); + assert_eq!(sub.expiry, add1_epoch + config.blob_min_ttl); + assert_eq!(sub.source, source); + assert!(!sub.failed); + if subscriber != caller { + assert_eq!(sub.delegate, Some(caller)); + } + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 1); + assert_eq!(stats.bytes_added, size); + + // Check the blob status + assert_eq!( + state + .get_blob_status(&store, subscriber, hash, id1.clone()) + .unwrap(), + Some(BlobStatus::Added) + ); + + // Check the blob + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + assert_eq!(blob.subscribers.len(), 1); + assert_eq!(blob.status, BlobStatus::Added); + assert_eq!(blob.size, size); + + // Check the subscription group + let group = subscribers.get(&subscriber).unwrap().unwrap(); + let group_hamt = group.hamt(store).unwrap(); + assert_eq!(group.len(), 1); + let got_sub = group_hamt.get(&id1.clone()).unwrap().unwrap(); + assert_eq!(got_sub, sub); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add1_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole(config.blob_min_ttl as u64 * size), + ); + credit_amount -= &account.credit_committed; + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); + + // Set to status pending + let res = state.set_blob_pending( + &store, + subscriber, + SetPendingBlobStateParams { + hash, + size, + id: id1.clone(), + source, + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 1); + assert_eq!(stats.bytes_resolving, size); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Finalize as resolved + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + subscriber, + FinalizeBlobStateParams { + source, + hash, + size, + id: id1.clone(), + status: BlobStatus::Resolved, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + assert_eq!( + state + .get_blob_status(&store, subscriber, hash, id1.clone()) + .unwrap(), + Some(BlobStatus::Resolved) + ); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Add the same blob again with a default subscription ID + let add2_epoch = ChainEpoch::from(21); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size, + ttl: Some(config.blob_min_ttl), + source, + epoch: add2_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + let (sub, _) = res.unwrap(); + assert_eq!(sub.added, add1_epoch); // added should not change + assert_eq!(sub.expiry, add2_epoch + config.blob_min_ttl); + assert_eq!(sub.source, source); + assert!(!sub.failed); + if subscriber != caller { + assert_eq!(sub.delegate, Some(caller)); + } + + // Check the blob status + // Should already be resolved + assert_eq!( + state + .get_blob_status(&store, subscriber, hash, id1.clone()) + .unwrap(), + Some(BlobStatus::Resolved) + ); + + // Check the blob + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + assert_eq!(blob.subscribers.len(), 1); + assert_eq!(blob.status, BlobStatus::Resolved); + assert_eq!(blob.size, size); + + // Check the subscription group + let group = subscribers.get(&subscriber).unwrap().unwrap(); + let group_hamt = group.hamt(store).unwrap(); + assert_eq!(group.len(), 1); // Still only one subscription + let got_sub = group_hamt.get(&id1.clone()).unwrap().unwrap(); + assert_eq!(got_sub, sub); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add2_epoch); + assert_eq!( + account.credit_committed, // stays the same becuase we're starting over + Credit::from_whole(config.blob_min_ttl as u64 * size), + ); + credit_amount -= Credit::from_whole((add2_epoch - add1_epoch) as u64 * size); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); // not changed + + assert_eq!(state.blobs.expiries.len(store).unwrap(), 1); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); + + // Add the same blob again but use a different subscription ID + let add3_epoch = ChainEpoch::from(31); + let id2 = SubscriptionId::new("foo").unwrap(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id2.clone(), + size, + ttl: Some(config.blob_min_ttl), + source, + epoch: add3_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + let (sub, _) = res.unwrap(); + assert_eq!(sub.added, add3_epoch); + assert_eq!(sub.expiry, add3_epoch + config.blob_min_ttl); + assert_eq!(sub.source, source); + assert!(!sub.failed); + if subscriber != caller { + assert_eq!(sub.delegate, Some(caller)); + } + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Check the blob status + // Should already be resolved + assert_eq!( + state + .get_blob_status(&store, subscriber, hash, id2.clone()) + .unwrap(), + Some(BlobStatus::Resolved) + ); + + // Check the blob + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + assert_eq!(blob.subscribers.len(), 1); // still only one subscriber + assert_eq!(blob.status, BlobStatus::Resolved); + assert_eq!(blob.size, size); + + // Check the subscription group + let group = subscribers.get(&subscriber).unwrap().unwrap(); + let group_hamt = group.hamt(store).unwrap(); + assert_eq!(group.len(), 2); + let got_sub = group_hamt.get(&id2.clone()).unwrap().unwrap(); + assert_eq!(got_sub, sub); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add3_epoch); + assert_eq!( + account.credit_committed, // stays the same becuase we're starting over + Credit::from_whole(config.blob_min_ttl as u64 * size), + ); + credit_amount -= Credit::from_whole((add3_epoch - add2_epoch) as u64 * size); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); // not changed + + // Debit all accounts + let debit_epoch = ChainEpoch::from(41); + let (deletes_from_disc, _) = state.debit_accounts(&store, config, debit_epoch).unwrap(); + assert!(deletes_from_disc.is_empty()); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, debit_epoch); + assert_eq!( + account.credit_committed, // debit reduces this + Credit::from_whole((config.blob_min_ttl - (debit_epoch - add3_epoch)) as u64 * size), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, size); // not changed + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 2); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); + + // Delete the default subscription ID + let delete_epoch = ChainEpoch::from(51); + let res = state.delete_blob( + &store, + caller, + sponsor, + DeleteBlobStateParams { + hash, + id: id1.clone(), + epoch: delete_epoch, + skip_credit_return: false, + }, + ); + + assert!(res.is_ok()); + let (delete_from_disk, deleted_size, _) = res.unwrap(); + assert!(!delete_from_disk); + assert_eq!(deleted_size, size); + + // Check the blob + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + + assert_eq!(blob.subscribers.len(), 1); // still one subscriber + assert_eq!(blob.status, BlobStatus::Resolved); + assert_eq!(blob.size, size); + + // Check the subscription group + let group = subscribers.get(&subscriber).unwrap().unwrap(); + let group_hamt = group.hamt(store).unwrap(); + assert_eq!(group.len(), 1); + let sub = group_hamt.get(&id2.clone()).unwrap().unwrap(); + assert_eq!(sub.added, add3_epoch); + assert_eq!(sub.expiry, add3_epoch + config.blob_min_ttl); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, delete_epoch); + assert_eq!( + account.credit_committed, // debit reduces this + Credit::from_whole((config.blob_min_ttl - (delete_epoch - add3_epoch)) as u64 * size), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, size); // not changed + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); + assert_eq!( + state.credits.credit_debited, + (token_amount.clone() * &config.token_credit_rate) + - (&account.credit_free + &account.credit_committed) + ); + assert_eq!(state.blobs.bytes_size(), size); + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 1); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); + + // Check approval + if using_approval { + check_approval_used(&state, store, caller, subscriber); + } +} + +#[test] +fn test_add_blob_ttl_exceeds_account_max_ttl() { + setup_logs(); + + let config = RecallConfig::default(); + const YEAR: ChainEpoch = 365 * 24 * 60 * 60; + + // Test cases structure + struct TestCase { + name: &'static str, + account_ttl_status: AccountStatus, + blob_ttl: Option, + should_succeed: bool, + expected_account_ttl: ChainEpoch, + expected_blob_ttl: ChainEpoch, + } + + // Define test cases + let test_cases = vec![ + TestCase { + name: "Reduced status rejects even minimum TTL", + account_ttl_status: AccountStatus::Reduced, + blob_ttl: Some(config.blob_min_ttl), + should_succeed: false, + expected_account_ttl: 0, + expected_blob_ttl: 0, + }, + TestCase { + name: "Reduced status rejects no TTL", + account_ttl_status: AccountStatus::Reduced, + blob_ttl: Some(config.blob_min_ttl), + should_succeed: false, + expected_account_ttl: 0, + expected_blob_ttl: 0, + }, + TestCase { + name: "Default status allows default TTL", + account_ttl_status: AccountStatus::Default, + blob_ttl: Some(config.blob_default_ttl), + should_succeed: true, + expected_account_ttl: config.blob_default_ttl, + expected_blob_ttl: config.blob_default_ttl, + }, + TestCase { + name: "Default status sets no TTL to default without auto renew", + account_ttl_status: AccountStatus::Default, + blob_ttl: None, + should_succeed: true, + expected_account_ttl: config.blob_default_ttl, + expected_blob_ttl: config.blob_default_ttl, + }, + TestCase { + name: "Default status preserves given TTL if it's less than default", + account_ttl_status: AccountStatus::Default, + blob_ttl: Some(config.blob_default_ttl - 1), + should_succeed: true, + expected_account_ttl: config.blob_default_ttl, + expected_blob_ttl: config.blob_default_ttl - 1, + }, + TestCase { + name: "Default status rejects TTLs higher than default", + account_ttl_status: AccountStatus::Default, + blob_ttl: Some(config.blob_default_ttl + 1), + should_succeed: false, + expected_account_ttl: config.blob_default_ttl, + expected_blob_ttl: 0, + }, + TestCase { + name: "Extended status allows any TTL", + account_ttl_status: AccountStatus::Extended, + blob_ttl: Some(YEAR), + should_succeed: true, + expected_account_ttl: ChainEpoch::MAX, + expected_blob_ttl: YEAR, + }, + ]; + + // Run all test cases + for tc in test_cases { + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(10); + + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + state + .set_account_status( + &store, + &config, + caller, + tc.account_ttl_status, + current_epoch, + ) + .unwrap(); + + let (hash, size) = new_hash(1024); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: tc.blob_ttl, + source: new_pk(), + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + + let account_ttl = state.get_account_max_ttl(&config, &store, caller).unwrap(); + assert_eq!( + account_ttl, tc.expected_account_ttl, + "Test case '{}' has unexpected account TTL (expected {}, got {})", + tc.name, tc.expected_account_ttl, account_ttl + ); + + if tc.should_succeed { + assert!( + res.is_ok(), + "Test case '{}' should succeed but failed: {:?}", + tc.name, + res.err() + ); + + let res = state.get_blob(&store, hash); + assert!(res.is_ok(), "Failed to get blob: {:?}", res.err()); + let blob = res.unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(&store).unwrap(); + subscribers + .for_each(|_, group| { + let group_hamt = group.hamt(&store).unwrap(); + for val in group_hamt.iter() { + let (_, sub) = val.unwrap(); + assert_eq!( + sub.expiry, + current_epoch + tc.expected_blob_ttl, + "Test case '{}' has unexpected blob expiry", + tc.name + ); + } + Ok(()) + }) + .unwrap(); + } else { + assert!( + res.is_err(), + "Test case '{}' should fail but succeeded", + tc.name + ); + assert_eq!( + res.err().unwrap().msg(), + format!( + "attempt to add a blob with TTL ({}) that exceeds account's max allowed TTL ({})", + tc.blob_ttl.map_or_else(|| "none".to_string(), |ttl| ttl.to_string()), tc.account_ttl_status.get_max_ttl(config.blob_default_ttl), + ), + "Test case '{}' failed with unexpected error message", + tc.name + ); + } + } +} + +#[test] +fn test_add_blob_with_overflowing_ttl() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(1000000); + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + + let res = state.set_account_status( + &store, + &config, + caller, + AccountStatus::Extended, + current_epoch, + ); + assert!(res.is_ok()); + + let (hash, size) = new_hash(1024); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: Some(ChainEpoch::MAX), + source: new_pk(), + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + let (sub, _) = res.unwrap(); + assert_eq!(sub.expiry, ChainEpoch::MAX); +} + +#[test] +fn test_finalize_blob_from_bad_state() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + + // Add a blob + let (hash, size) = new_hash(1024); + let source = new_pk(); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: None, + source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Finalize as pending + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + caller, + FinalizeBlobStateParams { + source, + hash, + size, + id: SubscriptionId::default(), + status: BlobStatus::Pending, + epoch: finalize_epoch, + }, + ); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + format!("cannot finalize blob {} as added or pending", hash) + ); +} + +#[test] +fn test_finalize_blob_resolved() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + + // Add a blob + let (hash, size) = new_hash(1024); + let source = new_pk(); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: None, + source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Set to status pending + let res = state.set_blob_pending( + &store, + caller, + SetPendingBlobStateParams { + hash, + size, + id: SubscriptionId::default(), + source, + }, + ); + assert!(res.is_ok()); + + // Finalize as resolved + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + caller, + FinalizeBlobStateParams { + source, + hash, + size, + id: SubscriptionId::default(), + status: BlobStatus::Resolved, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + + // Check status + let status = state + .get_blob_status(&store, caller, hash, SubscriptionId::default()) + .unwrap() + .unwrap(); + assert!(matches!(status, BlobStatus::Resolved)); + + // Check indexes + assert_eq!(state.blobs.expiries.len(&store).unwrap(), 1); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); +} + +#[test] +fn test_finalize_blob_failed() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + let credit_amount = amount * &config.token_credit_rate; + + // Add a blob + let add_epoch = current_epoch; + let (hash, size) = new_hash(1024); + let source = new_pk(); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: None, + source, + epoch: add_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Set to status pending + let res = state.set_blob_pending( + &store, + caller, + SetPendingBlobStateParams { + hash, + size, + id: SubscriptionId::default(), + source, + }, + ); + assert!(res.is_ok()); + + // Finalize as failed + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + caller, + FinalizeBlobStateParams { + source, + hash, + size, + id: SubscriptionId::default(), + status: BlobStatus::Failed, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + + // Check status + let status = state + .get_blob_status(&store, caller, hash, SubscriptionId::default()) + .unwrap() + .unwrap(); + assert!(matches!(status, BlobStatus::Failed)); + + // Check the account balance + let account = state.get_account(&store, caller).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add_epoch); + assert_eq!(account.credit_committed, Credit::from_whole(0)); // credit was released + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, 0); // capacity was released + + // Check state + assert_eq!(state.credits.credit_committed, Credit::from_whole(0)); // credit was released + assert_eq!(state.credits.credit_debited, Credit::from_whole(0)); + assert_eq!(state.blobs.bytes_size(), 0); // capacity was released + + // Check indexes + assert_eq!(state.blobs.expiries.len(&store).unwrap(), 1); // remains until the blob is explicitly deleted + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); +} + +#[test] +fn test_finalize_blob_failed_refund() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + let mut credit_amount = amount.clone() * &config.token_credit_rate; + + assert!(state + .set_account_status( + &store, + &config, + caller, + AccountStatus::Extended, + current_epoch + ) + .is_ok()); + + // Add a blob + let add_epoch = current_epoch; + let (hash, size) = new_hash(1024); + let source = new_pk(); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: Some(config.blob_min_ttl), + source, + epoch: add_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check the account balance + let account = state.get_account(&store, caller).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole(config.blob_min_ttl as u64 * size), + ); + credit_amount -= &account.credit_committed; + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); + assert_eq!(state.credits.credit_debited, Credit::from_whole(0)); + assert_eq!(state.blobs.bytes_size(), account.capacity_used); // capacity was released + + // Debit accounts to trigger a refund when we fail below + let debit_epoch = ChainEpoch::from(11); + let (deletes_from_disc, _) = state.debit_accounts(&store, &config, debit_epoch).unwrap(); + assert!(deletes_from_disc.is_empty()); + + // Check the account balance + let account = state.get_account(&store, caller).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, debit_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole((config.blob_min_ttl - (debit_epoch - add_epoch)) as u64 * size), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, size); + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); + assert_eq!( + state.credits.credit_debited, + Credit::from_whole((debit_epoch - add_epoch) as u64 * size) + ); + assert_eq!(state.blobs.bytes_size(), account.capacity_used); + + // Set to status pending + let res = state.set_blob_pending( + &store, + caller, + SetPendingBlobStateParams { + hash, + size, + id: SubscriptionId::default(), + source, + }, + ); + assert!(res.is_ok()); + + // Finalize as failed + let finalize_epoch = ChainEpoch::from(21); + let res = state.finalize_blob( + &store, + caller, + FinalizeBlobStateParams { + source, + hash, + size, + id: SubscriptionId::default(), + status: BlobStatus::Failed, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + + // Check status + let status = state + .get_blob_status(&store, caller, hash, SubscriptionId::default()) + .unwrap() + .unwrap(); + assert!(matches!(status, BlobStatus::Failed)); + + // Check the account balance + let account = state.get_account(&store, caller).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, debit_epoch); + assert_eq!(account.credit_committed, Credit::from_whole(0)); // credit was released + assert_eq!( + account.credit_free, + amount.clone() * &config.token_credit_rate + ); // credit was refunded + assert_eq!(account.capacity_used, 0); // capacity was released + + // Check state + assert_eq!(state.credits.credit_committed, Credit::from_whole(0)); // credit was released + assert_eq!(state.credits.credit_debited, Credit::from_whole(0)); // credit was refunded and released + assert_eq!(state.blobs.bytes_size(), 0); // capacity was released + + // Check indexes + assert_eq!(state.blobs.expiries.len(&store).unwrap(), 1); // remains until the blob is explicitly deleted + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); +} + +#[test] +fn test_delete_blob_refund() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, token_amount.clone(), current_epoch) + .unwrap(); + delete_blob_refund( + &config, + &store, + state, + caller, + None, + current_epoch, + token_amount, + false, + ); +} + +#[test] +fn test_delete_blob_refund_with_approval() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let sponsor = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + sponsor, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + state + .approve_credit( + &config, + &store, + sponsor, + caller, + DelegationOptions::default(), + current_epoch, + ) + .unwrap(); + delete_blob_refund( + &config, + &store, + state, + caller, + Some(sponsor), + current_epoch, + token_amount, + true, + ); +} + +#[allow(clippy::too_many_arguments)] +fn delete_blob_refund( + config: &RecallConfig, + store: &BS, + mut state: State, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + token_amount: TokenAmount, + using_approval: bool, +) { + let subscriber = sponsor.unwrap_or(caller); + let mut credit_amount = token_amount * &config.token_credit_rate; + + // Add a blob + let add1_epoch = current_epoch; + let (hash1, size1) = new_hash(1024); + let source1 = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash: hash1, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size: size1, + ttl: Some(config.blob_min_ttl), + source: source1, + epoch: add1_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Finalize as resolved + let res = state.set_blob_pending( + &store, + subscriber, + SetPendingBlobStateParams { + hash: hash1, + size: size1, + id: SubscriptionId::default(), + source: source1, + }, + ); + assert!(res.is_ok()); + let finalize_epoch = ChainEpoch::from(current_epoch + 1); + let res = state.finalize_blob( + &store, + subscriber, + FinalizeBlobStateParams { + source: source1, + hash: hash1, + size: size1, + id: SubscriptionId::default(), + status: BlobStatus::Resolved, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add1_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole(config.blob_min_ttl as u64 * size1), + ); + credit_amount -= &account.credit_committed; + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size1); + + // Add another blob past the first blob expiry + // This will trigger a debit on the account + let add2_epoch = ChainEpoch::from(config.blob_min_ttl + 10); + let (hash2, size2) = new_hash(2048); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash: hash2, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size: size2, + ttl: Some(config.blob_min_ttl), + source: new_pk(), + epoch: add2_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 2); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 1); + assert_eq!(stats.bytes_added, size2); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add2_epoch); + let blob1_expiry = ChainEpoch::from(config.blob_min_ttl + add1_epoch); + let overcharge = BigInt::from((add2_epoch - blob1_expiry) as u64 * size1); + assert_eq!( + account.credit_committed, // this includes an overcharge that needs to be refunded + Credit::from_whole(config.blob_min_ttl as u64 * size2 - overcharge), + ); + credit_amount -= Credit::from_whole(config.blob_min_ttl as u64 * size2); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size1 + size2); + + // Delete the first blob + let delete_epoch = ChainEpoch::from(config.blob_min_ttl + 20); + let (delete_from_disc, deleted_size, _) = state + .delete_blob( + &store, + caller, + sponsor, + DeleteBlobStateParams { + hash: hash1, + id: SubscriptionId::default(), + epoch: delete_epoch, + skip_credit_return: false, + }, + ) + .unwrap(); + assert!(delete_from_disc); + assert_eq!(size1, deleted_size); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 1); + assert_eq!(stats.bytes_added, size2); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add2_epoch); // not changed, blob is expired + assert_eq!( + account.credit_committed, // should not include overcharge due to refund + Credit::from_whole(config.blob_min_ttl as u64 * size2), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, size2); + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); // credit was released + assert_eq!( + state.credits.credit_debited, + Credit::from_whole(config.blob_min_ttl as u64 * size1) + ); + assert_eq!(state.blobs.bytes_size(), size2); // capacity was released + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 1); + assert_eq!(state.blobs.added.len(), 1); + assert_eq!(state.blobs.pending.len(), 0); + + // Check approval + if using_approval { + check_approval_used(&state, store, caller, subscriber); + } +} + +#[test] +fn test_trim_blob_expiries() { + setup_logs(); + let config = RecallConfig::default(); + + const HOUR: ChainEpoch = 3600; + const TWO_HOURS: ChainEpoch = HOUR * 2; + const DAY: ChainEpoch = HOUR * 24; + const YEAR: ChainEpoch = DAY * 365; + + let blobs_ttls: Vec> = + vec![None, Some(HOUR), Some(TWO_HOURS), Some(DAY), Some(YEAR)]; + + struct TestCase { + name: &'static str, + account_ttl: AccountStatus, + expected_ttls: Vec, + limit: Option, // None means process all at once + } + + let test_cases = vec![ + TestCase { + name: "Set to zero with Reduced status", + account_ttl: AccountStatus::Reduced, + expected_ttls: vec![0, 0, 0, 0, 0], + limit: None, + }, + TestCase { + name: "Set to default with Default status", + account_ttl: AccountStatus::Default, + expected_ttls: vec![DAY, HOUR, TWO_HOURS, DAY, DAY], + limit: None, + }, + TestCase { + name: "Set to extended with Extended status", + account_ttl: AccountStatus::Extended, + expected_ttls: vec![DAY, HOUR, TWO_HOURS, DAY, YEAR], + limit: None, + }, + ]; + + for tc in test_cases { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + + // Setup account with credits and TTL status + let token = TokenAmount::from_whole(1000); + state + .buy_credit(&store, &config, caller, token, current_epoch) + .unwrap(); + + // Set extended TTL status to allow adding all blobs + state + .set_account_status( + &store, + &config, + caller, + AccountStatus::Extended, + current_epoch, + ) + .unwrap(); + + // Add blobs + let mut blob_hashes = Vec::new(); + let mut total_cost = Credit::zero(); + let mut expected_credits = Credit::zero(); + for (i, ttl) in blobs_ttls.iter().enumerate() { + let size = (i + 1) * 1024; + let (hash, _) = new_hash(size); + let size = size as u64; + let id = SubscriptionId::try_from(format!("blob-{}", i)).unwrap(); + let source = new_pk(); + blob_hashes.push(hash); + + state + .add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id.clone(), + size, + ttl: *ttl, + source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ) + .unwrap(); + state + .set_blob_pending( + &store, + caller, + SetPendingBlobStateParams { + hash, + size, + id: id.clone(), + source, + }, + ) + .unwrap(); + state + .finalize_blob( + &store, + caller, + FinalizeBlobStateParams { + source, + hash, + size, + id, + status: BlobStatus::Resolved, + epoch: current_epoch, + }, + ) + .unwrap(); + + total_cost += state.get_storage_cost(ttl.unwrap_or(config.blob_default_ttl), &size); + expected_credits += state.get_storage_cost(tc.expected_ttls[i], &size); + } + + let account = state.get_account(&store, caller).unwrap().unwrap(); + assert_eq!( + account.credit_committed, total_cost, + "Test case '{}' failed: committed credits don't match", + tc.name + ); + + state + .set_account_status(&store, &config, caller, tc.account_ttl, current_epoch) + .unwrap(); + + let res = state.trim_blob_expiries(&config, &store, caller, current_epoch, None, tc.limit); + assert!( + res.is_ok(), + "Test case '{}' failed to trim expiries: {}", + tc.name, + res.err().unwrap() + ); + + // Verify expiries were trimmed correctly + for (i, hash) in blob_hashes.iter().enumerate() { + // If the TTL is zero, the blob should be deleted + if tc.expected_ttls[i] == 0 { + assert!( + state.get_blob(&store, *hash).unwrap().is_none(), + "Test case '{}' failed: blob {} not deleted", + tc.name, + i + ); + } else { + let blob = state.get_blob(&store, *hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(&store).unwrap(); + let group = subscribers.get(&caller).unwrap().unwrap(); + let group_hamt = group.hamt(&store).unwrap(); + let sub = group_hamt + .get(&SubscriptionId::new(&format!("blob-{}", i)).unwrap()) + .unwrap() + .unwrap(); + + assert_eq!( + sub.expiry - sub.added, + tc.expected_ttls[i], + "Test case '{}' failed: blob {} expiry not trimmed correctly. Expected {}, got {}", + tc.name, + i, + tc.expected_ttls[i], + sub.expiry - sub.added, + ); + } + } + + let account = state.get_account(&store, caller).unwrap().unwrap(); + assert_eq!( + account.credit_committed, expected_credits, + "Test case '{}' failed: account's committed credits after blob expiry trimming don't match", + tc.name + ); + + assert_eq!( + state.credits.credit_committed, expected_credits, + "Test case '{}' failed: state's committed credits after blob expiry trimming don't match", + tc.name + ); + } +} + +#[test] +fn test_trim_blob_expiries_pagination() { + setup_logs(); + let config = RecallConfig::default(); + + // Test cases for pagination + struct PaginationTest { + name: &'static str, + limit: Option, + start: Option, + expected_next_key: Option, + expected_processed: usize, + } + + let test_cases = vec![ + PaginationTest { + name: "Process all at once", + limit: None, + start: None, + expected_next_key: None, + expected_processed: 5, + }, + PaginationTest { + name: "Process two at a time from beginning", + limit: Some(2), + start: None, + expected_next_key: Some(2), + expected_processed: 2, + }, + PaginationTest { + name: "Process one at a time with offset", + limit: Some(1), + start: Some(1), + expected_next_key: Some(2), + expected_processed: 1, + }, + PaginationTest { + name: "Out of bounds limit", + limit: Some(10), + start: Some(1), + expected_next_key: None, + expected_processed: 4, + }, + PaginationTest { + name: "With offset ending at last item", + limit: Some(2), + start: Some(3), + expected_next_key: None, + expected_processed: 2, + }, + ]; + + for tc in test_cases { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + + // Setup account with credits and Extended TTL status to allow adding all blobs + state + .buy_credit( + &store, + &config, + caller, + TokenAmount::from_whole(1000), + current_epoch, + ) + .unwrap(); + state + .set_account_status( + &store, + &config, + caller, + AccountStatus::Extended, + current_epoch, + ) + .unwrap(); + + // Add 5 blobs with different sizes to ensure different hashes + for i in 0..5 { + let (hash, size) = new_hash((i + 1) * 1024); + let id = SubscriptionId::try_from(format!("blob-{}", i)).unwrap(); + let source = new_pk(); + state + .add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id.clone(), + size, + ttl: Some(7200), // 2 hours + source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ) + .unwrap(); + state + .set_blob_pending( + &store, + caller, + SetPendingBlobStateParams { + hash, + size, + id: id.clone(), + source, + }, + ) + .unwrap(); + state + .finalize_blob( + &store, + caller, + FinalizeBlobStateParams { + source, + hash, + size, + id, + status: BlobStatus::Resolved, + epoch: current_epoch, + }, + ) + .unwrap(); + } + + // Range over all blobs and store their hashes + let mut blob_hashes = Vec::with_capacity(5); + for _ in 0..5 { + let res = + state + .blobs + .hamt(&store) + .unwrap() + .for_each(|hash, _| -> Result<(), ActorError> { + blob_hashes.push(hash); + Ok(()) + }); + assert!( + res.is_ok(), + "Failed to iterate over blobs: {}", + res.err().unwrap() + ); + } + + // Change to Reduced status and process blobs with pagination + state + .set_account_status( + &store, + &config, + caller, + AccountStatus::Reduced, + current_epoch, + ) + .unwrap(); + + let res = state.trim_blob_expiries( + &config, + &store, + caller, + current_epoch, + tc.start.map(|ind| blob_hashes[ind]), + tc.limit, + ); + assert!( + res.is_ok(), + "Test case '{}' failed to trim expiries: {}", + tc.name, + res.err().unwrap() + ); + + let (processed, next, deleted_blobs) = res.unwrap(); + + assert_eq!( + processed as usize, tc.expected_processed, + "Test case '{}' had unexpected number of items processed", + tc.name + ); + + assert_eq!( + deleted_blobs.len(), + tc.expected_processed, + "Test case '{}' had unexpected number of deleted blobs", + tc.name + ); + + if let Some(expected_next_key) = tc.expected_next_key { + assert!(next.is_some(), "Test case '{}' expected next key", tc.name); + assert_eq!( + next.unwrap(), + blob_hashes[expected_next_key], + "Test case '{}' had unexpected next key", + tc.name + ); + } else { + assert!(next.is_none(), "Test case '{}' had no next key", tc.name); + } + } +} + +#[test] +fn test_trim_blob_expiries_for_multiple_accounts() { + setup_logs(); + + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let address1 = new_address(); + let address2 = new_address(); + let current_epoch = ChainEpoch::from(1); + + // Setup accounts with credits and Extended TTL status to allow adding all blobs + state + .buy_credit( + &store, + &config, + address1, + TokenAmount::from_whole(1000), + current_epoch, + ) + .unwrap(); + state + .buy_credit( + &store, + &config, + address2, + TokenAmount::from_whole(1000), + current_epoch, + ) + .unwrap(); + state + .set_account_status( + &store, + &config, + address1, + AccountStatus::Extended, + current_epoch, + ) + .unwrap(); + state + .set_account_status( + &store, + &config, + address2, + AccountStatus::Extended, + current_epoch, + ) + .unwrap(); + + // Add blobs for both accounts + let mut blob_hashes_account1 = Vec::new(); + let mut blob_hashes_account2 = Vec::new(); + for i in 0..3 { + let (hash, size) = new_hash((i + 1) * 1024); + let id = SubscriptionId::try_from(format!("blob-1-{}", i)).unwrap(); + let source = new_pk(); + blob_hashes_account1.push(hash); + state + .add_blob( + &store, + &config, + address1, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id.clone(), + size, + ttl: Some(7200), // 2 hours + source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ) + .unwrap(); + state + .set_blob_pending( + &store, + address1, + SetPendingBlobStateParams { + hash, + size, + id: id.clone(), + source, + }, + ) + .unwrap(); + state + .finalize_blob( + &store, + address1, + FinalizeBlobStateParams { + source, + hash, + size, + id, + status: BlobStatus::Resolved, + epoch: current_epoch, + }, + ) + .unwrap(); + } + for i in 0..3 { + let (hash, size) = new_hash((i + 1) * 1024); + let id = SubscriptionId::try_from(format!("blob-2-{}", i)).unwrap(); + let source = new_pk(); + blob_hashes_account2.push(hash); + state + .add_blob( + &store, + &config, + address2, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id.clone(), + size, + ttl: Some(7200), // 2 hours + source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ) + .unwrap(); + state + .set_blob_pending( + &store, + address2, + SetPendingBlobStateParams { + hash, + size, + id: id.clone(), + source, + }, + ) + .unwrap(); + state + .finalize_blob( + &store, + address2, + FinalizeBlobStateParams { + source, + hash, + size, + id, + status: BlobStatus::Resolved, + epoch: current_epoch, + }, + ) + .unwrap(); + } + + // Change TTL status for account1 and trim expiries + state + .set_account_status( + &store, + &config, + address1, + AccountStatus::Reduced, + current_epoch, + ) + .unwrap(); + let res = state.trim_blob_expiries(&config, &store, address1, current_epoch, None, None); + assert!( + res.is_ok(), + "Failed to trim expiries for account1: {}", + res.err().unwrap() + ); + + // Verify account1's blobs were trimmed + for hash in &blob_hashes_account1 { + assert!( + state.get_blob(&store, *hash).unwrap().is_none(), + "Blob {} for account1 was not deleted", + hash, + ); + } + + // Verify account2's blobs were not trimmed + for hash in &blob_hashes_account2 { + assert!( + state.get_blob(&store, *hash).unwrap().is_some(), + "Blob {} for account2 was incorrectly deleted", + hash, + ); + } +} diff --git a/fendermint/actors/blobs/src/state/credit.rs b/fendermint/actors/blobs/src/state/credit.rs new file mode 100644 index 0000000000..9201a386d6 --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit.rs @@ -0,0 +1,26 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::Credit; +use fvm_ipld_encoding::tuple::*; + +mod approvals; +mod methods; +mod params; +#[cfg(test)] +mod tests; + +pub use approvals::*; +pub use params::*; + +/// Global credit-related state. +#[derive(Debug, Clone, Default, Serialize_tuple, Deserialize_tuple)] +pub struct Credits { + /// The total number of credits sold in the subnet. + pub credit_sold: Credit, + /// The total number of credits committed to active storage in the subnet. + pub credit_committed: Credit, + /// The total number of credits debited in the subnet. + pub credit_debited: Credit, +} diff --git a/fendermint/actors/blobs/src/state/credit/approvals.rs b/fendermint/actors/blobs/src/state/credit/approvals.rs new file mode 100644 index 0000000000..9333e37841 --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit/approvals.rs @@ -0,0 +1,54 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::CreditApproval; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use recall_ipld::{hamt, hamt::map::TrackedFlushResult}; + +/// HAMT wrapper tracking [`CreditApproval`]s by account address. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Approvals { + /// The HAMT root. + pub root: hamt::Root, + /// The size of the collection. + size: u64, +} + +impl Approvals { + /// Returns a approval collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "credit_approvals")?; + Ok(Self { root, size: 0 }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size + } + + /// The size of the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } +} diff --git a/fendermint/actors/blobs/src/state/credit/methods.rs b/fendermint/actors/blobs/src/state/credit/methods.rs new file mode 100644 index 0000000000..5baab4e51a --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit/methods.rs @@ -0,0 +1,315 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::{Credit, CreditApproval, GasAllowance}; +use fendermint_actor_recall_config_shared::RecallConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount, error::ExitCode}; +use recall_ipld::hamt; + +use super::CommitCapacityParams; +use crate::{ + caller::{Caller, Delegation, DelegationOptions}, + state::accounts::Account, + State, +}; + +/// Returns an error if the amount is negative. +pub fn ensure_positive_amount(amount: &TokenAmount) -> Result<(), ActorError> { + if amount.is_negative() { + return Err(ActorError::illegal_argument( + "amount must be positive".into(), + )); + } + Ok(()) +} + +impl State { + /// Buys credit for an account. + /// Flushes state to the blockstore. + pub fn buy_credit( + &mut self, + store: &BS, + config: &RecallConfig, + to: Address, + value: TokenAmount, + current_epoch: ChainEpoch, + ) -> Result { + self.ensure_capacity(config.blob_capacity)?; + ensure_positive_amount(&value)?; + + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load_or_create( + store, + &accounts, + to, + None, + current_epoch, + config.blob_default_ttl, + )?; + + let amount: Credit = value.clone() * &config.token_credit_rate; + caller.add_allowances(&amount, &value); + + // Update global state + self.credits.credit_sold += &amount; + + // Save caller + self.save_caller(&mut caller, &mut accounts)?; + + Ok(caller.subscriber().clone()) + } + + /// Sets the default credit and gas fee sponsor for an account. + /// Flushes state to the blockstore. + pub fn set_account_sponsor( + &mut self, + config: &RecallConfig, + store: &BS, + from: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load_or_create( + store, + &accounts, + from, + None, + current_epoch, + config.blob_default_ttl, + )?; + + caller.set_default_sponsor(sponsor); + + // Save caller + self.save_caller(&mut caller, &mut accounts) + } + + /// Updates (adds/removes) gas allowance for an account. + /// Flushes state to the blockstore. + pub fn update_gas_allowance( + &mut self, + store: &BS, + from: Address, + sponsor: Option
, + add_amount: TokenAmount, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load(store, &accounts, from, sponsor)?; + + caller.update_gas_allowance(&add_amount, current_epoch)?; + + // Save caller + self.save_caller(&mut caller, &mut accounts) + } + + /// Approves credit and gas allowance spend from one account to another. + /// Flushes state to the blockstore. + pub fn approve_credit( + &mut self, + config: &RecallConfig, + store: &BS, + from: Address, + to: Address, + options: DelegationOptions, + current_epoch: ChainEpoch, + ) -> Result { + let mut accounts = self.accounts.hamt(store)?; + let mut delegation = Delegation::update_or_create( + store, + config, + &accounts, + from, + to, + options, + current_epoch, + )?; + + // Save delegation + self.save_delegation(&mut delegation, &mut accounts)?; + + Ok(delegation.approval().clone()) + } + + /// Revokes credit and gas allowance spend from one account to another. + /// Flushes state to the blockstore. + pub fn revoke_credit( + &mut self, + store: &BS, + from: Address, + to: Address, + ) -> Result<(), ActorError> { + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load(store, &accounts, to, Some(from))?; + + caller.cancel_delegation(&mut accounts)?; + + // Save caller + self.save_caller(&mut caller, &mut accounts) + } + + /// Returns a [`CreditApproval`] from the given address to the given address + /// or [`None`] if no approval exists. + pub fn get_credit_approval( + &self, + store: &BS, + from: Address, + to: Address, + ) -> Result, ActorError> { + let accounts = self.accounts.hamt(store)?; + let caller = Caller::load(store, &accounts, to, Some(from))?; + Ok(caller.delegate_approval().cloned()) + } + + /// Returns the gas allowance for the given address, including an amount from a default sponsor. + /// An error returned from this method would be fatal, as it's called from the FVM executor. + pub fn get_gas_allowance( + &self, + store: &BS, + from: Address, + current_epoch: ChainEpoch, + ) -> Result { + let accounts = self.accounts.hamt(store)?; + let allowance = Caller::load_with_default_sponsor(store, &accounts, from) + .map(|caller| caller.gas_allowance(current_epoch)) + .unwrap_or_default(); + Ok(allowance) + } + + /// Debits credit from the caller. + /// Does NOT flush the state to the blockstore. + pub(crate) fn debit_caller( + &mut self, + caller: &mut Caller, + current_epoch: ChainEpoch, + ) { + let amount = self.get_debit_for_caller(caller, current_epoch); + caller.debit_credit(&amount, current_epoch); + + // Update global state + self.credits.credit_debited += &amount; + self.credits.credit_committed -= &amount; + } + + /// Refunds credit to the caller. + /// Does NOT flush the state to the blockstore. + pub(crate) fn refund_caller( + &mut self, + caller: &mut Caller, + amount: &Credit, + correction: &Credit, + ) { + caller.refund_credit(amount, correction); + + // Update global state + self.credits.credit_debited -= amount; + self.credits.credit_committed += correction; + } + + /// Commits new capacity for the caller. + /// The caller may pay for capacity with free credit or token value. + /// Does NOT flush the state to the blockstore. + pub(crate) fn commit_capacity_for_caller( + &mut self, + caller: &mut Caller, + config: &RecallConfig, + params: CommitCapacityParams, + ) -> Result { + ensure_positive_amount(¶ms.cost)?; + ensure_positive_amount(¶ms.value)?; + + let value_remaining = match caller.commit_capacity(params.size, ¶ms.cost, params.epoch) + { + Ok(()) => Ok(params.value.clone()), + Err(e) => { + // Buy credit to cover the amount + if e.exit_code() == ExitCode::USR_INSUFFICIENT_FUNDS && !params.value.is_zero() { + if caller.is_delegate() { + return Err(ActorError::forbidden( + "cannot auto-buy credits for a sponsor".into(), + )); + } + + let remainder: Credit = ¶ms.cost - &caller.subscriber().credit_free; + let value_required = &remainder / &config.token_credit_rate; + let value_remaining = ¶ms.value - &value_required; + if value_remaining.is_negative() { + return Err(ActorError::insufficient_funds(format!( + "insufficient value (received: {}; required: {})", + params.value, value_required + ))); + } + caller.add_allowances(&remainder, &value_required); + + // Update global state + self.credits.credit_sold += &remainder; + + // Try again + caller.commit_capacity(params.size, ¶ms.cost, params.epoch)?; + Ok(value_remaining) + } else { + Err(e) + } + } + }?; + + // Update global state + self.credits.credit_committed += ¶ms.cost; + + Ok(value_remaining) + } + + /// Releases capacity for the caller. + /// Does NOT flush the state to the blockstore. + pub(crate) fn release_capacity_for_caller( + &mut self, + caller: &mut Caller, + size: u64, + cost: &Credit, + ) { + caller.release_capacity(size, cost); + + // Update global state + self.credits.credit_committed -= cost; + } + + /// Returns committed credit to the caller. + /// Does NOT flush the state to the blockstore. + pub(crate) fn return_committed_credit_for_caller( + &mut self, + caller: &mut Caller, + amount: &Credit, + ) { + caller.return_committed_credit(amount); + + // Update global state + self.credits.credit_debited -= amount; + self.credits.credit_committed += amount; + } + + /// Save the caller state to the accounts HAMT. + pub(crate) fn save_caller<'a, BS: Blockstore>( + &mut self, + caller: &mut Caller<'a, BS>, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + caller.save(accounts)?; + self.accounts.save_tracked(accounts.flush_tracked()?); + Ok(()) + } + + /// Save the delegation state to the accounts HAMT. + pub(crate) fn save_delegation<'a, BS: Blockstore>( + &mut self, + delegation: &mut Delegation<'a, &'a BS>, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + delegation.save(accounts)?; + self.accounts.save_tracked(accounts.flush_tracked()?); + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/state/credit/params.rs b/fendermint/actors/blobs/src/state/credit/params.rs new file mode 100644 index 0000000000..a38d0647ee --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit/params.rs @@ -0,0 +1,19 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::Credit; +use fvm_shared::{clock::ChainEpoch, econ::TokenAmount}; + +/// Params for committing capacity. +#[derive(Debug)] +pub struct CommitCapacityParams { + /// Commitment size for caller. + pub size: u64, + /// Commitment cost. + pub cost: Credit, + /// Token amount available to commitment. + pub value: TokenAmount, + /// Commitment chain epoch. + pub epoch: ChainEpoch, +} diff --git a/fendermint/actors/blobs/src/state/credit/tests.rs b/fendermint/actors/blobs/src/state/credit/tests.rs new file mode 100644 index 0000000000..de9129ddfa --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit/tests.rs @@ -0,0 +1,377 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + blobs::SubscriptionId, + credit::{Credit, CreditApproval}, +}; +use fendermint_actor_blobs_testing::{ + new_address, new_hash, new_metadata_hash, new_pk, setup_logs, +}; +use fendermint_actor_recall_config_shared::RecallConfig; +use fvm_ipld_blockstore::MemoryBlockstore; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use num_traits::Zero; + +use crate::{caller::DelegationOptions, state::blobs::AddBlobStateParams, State}; + +fn check_approvals_match( + state: &State, + store: &MemoryBlockstore, + from: Address, + to: Address, + expected: CreditApproval, +) { + let from_account = state.get_account(&store, from).unwrap().unwrap(); + assert_eq!( + from_account + .approvals_to + .hamt(store) + .unwrap() + .get(&to) + .unwrap() + .unwrap(), + expected + ); + let to_account = state.get_account(&store, to).unwrap().unwrap(); + assert_eq!( + to_account + .approvals_from + .hamt(store) + .unwrap() + .get(&from) + .unwrap() + .unwrap(), + expected + ); +} + +#[test] +fn test_buy_credit_success() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let to = new_address(); + let amount = TokenAmount::from_whole(1); + + let res = state.buy_credit(&store, &config, to, amount.clone(), 1); + assert!(res.is_ok()); + let account = res.unwrap(); + let credit_sold = amount.clone() * &config.token_credit_rate; + assert_eq!(account.credit_free, credit_sold); + assert_eq!(account.gas_allowance, amount); + assert_eq!(state.credits.credit_sold, credit_sold); + let account_back = state.get_account(&store, to).unwrap().unwrap(); + assert_eq!(account, account_back); +} + +#[test] +fn test_buy_credit_negative_amount() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let to = new_address(); + let amount = TokenAmount::from_whole(-1); + + let res = state.buy_credit(&store, &config, to, amount, 1); + assert!(res.is_err()); + assert_eq!(res.err().unwrap().msg(), "amount must be positive"); +} + +#[test] +fn test_buy_credit_at_capacity() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let to = new_address(); + let amount = TokenAmount::from_whole(1); + + state.blobs.set_capacity(config.blob_capacity); + let res = state.buy_credit(&store, &config, to, amount, 1); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + "subnet has reached storage capacity" + ); +} + +#[test] +fn test_approve_credit_success() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let config = RecallConfig::default(); + + // No limit or expiry + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions::default(), + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.credit_limit, None); + assert_eq!(approval.gas_allowance_limit, None); + assert_eq!(approval.expiry, None); + check_approvals_match(&state, &store, from, to, approval); + + // Add credit limit + let limit = 1_000_000_000_000_000_000u64; + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + credit_limit: Some(Credit::from_whole(limit)), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.credit_limit, Some(Credit::from_whole(limit))); + assert_eq!(approval.gas_allowance_limit, None); + assert_eq!(approval.expiry, None); + check_approvals_match(&state, &store, from, to, approval); + + // Add gas fee limit + let limit = 1_000_000_000_000_000_000u64; + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + gas_fee_limit: Some(TokenAmount::from_atto(limit)), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.credit_limit, None); + assert_eq!( + approval.gas_allowance_limit, + Some(TokenAmount::from_atto(limit)) + ); + assert_eq!(approval.expiry, None); + check_approvals_match(&state, &store, from, to, approval); + + // Add ttl + let ttl = ChainEpoch::from(config.blob_min_ttl); + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + credit_limit: Some(Credit::from_whole(limit)), + ttl: Some(ttl), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.credit_limit, Some(Credit::from_whole(limit))); + assert_eq!(approval.gas_allowance_limit, None); + assert_eq!(approval.expiry, Some(ttl + current_epoch)); + check_approvals_match(&state, &store, from, to, approval); +} + +#[test] +fn test_approve_credit_invalid_ttl() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let config = RecallConfig::default(); + let ttl = ChainEpoch::from(config.blob_min_ttl - 1); + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + ttl: Some(ttl), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + format!("minimum approval TTL is {}", config.blob_min_ttl) + ); +} + +#[test] +fn test_approve_credit_overflowing_ttl() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let config = RecallConfig::default(); + + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + ttl: Some(ChainEpoch::MAX), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.expiry, Some(i64::MAX)); +} + +#[test] +fn test_approve_credit_insufficient_credit() { + setup_logs(); + let config = RecallConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, from, amount.clone(), current_epoch) + .unwrap(); + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions::default(), + current_epoch, + ); + assert!(res.is_ok()); + + // Add a blob + let (hash, size) = new_hash(1024); + let res = state.add_blob( + &store, + &config, + to, + Some(from), + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: None, + source: new_pk(), + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check approval + let account = state.get_account(&store, from).unwrap().unwrap(); + let approval = account + .approvals_to + .hamt(&store) + .unwrap() + .get(&to) + .unwrap() + .unwrap(); + assert_eq!(account.credit_committed, approval.credit_used); + + // Try to update approval with a limit below what's already been committed + let limit = 1_000u64; + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + credit_limit: Some(Credit::from_whole(limit)), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + format!( + "limit cannot be less than amount of already used credits ({})", + approval.credit_used + ) + ); +} + +#[test] +fn test_revoke_credit_success() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let config = RecallConfig::default(); + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions::default(), + current_epoch, + ); + assert!(res.is_ok()); + + // Check the account approvals + let from_account = state.get_account(&store, from).unwrap().unwrap(); + assert_eq!(from_account.approvals_to.len(), 1); + let to_account = state.get_account(&store, to).unwrap().unwrap(); + assert_eq!(to_account.approvals_from.len(), 1); + + // Remove the approval + let res = state.revoke_credit(&store, from, to); + assert!(res.is_ok()); + let from_account = state.get_account(&store, from).unwrap().unwrap(); + assert_eq!(from_account.approvals_to.len(), 0); + let to_account = state.get_account(&store, to).unwrap().unwrap(); + assert_eq!(to_account.approvals_from.len(), 0); +} + +#[test] +fn test_revoke_credit_account_not_found() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + + let res = state.revoke_credit(&store, from, to); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + format!("{} not found in accounts", to) + ); +} diff --git a/fendermint/actors/blobs/src/state/operators.rs b/fendermint/actors/blobs/src/state/operators.rs new file mode 100644 index 0000000000..1896c66995 --- /dev/null +++ b/fendermint/actors/blobs/src/state/operators.rs @@ -0,0 +1,401 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::fvm_ipld_hamt::{BytesKey, Config, Hamt, Sha256}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use fvm_shared::clock::ChainEpoch; +use recall_ipld::hamt::{self, map::TrackedFlushResult}; + +pub use cid::Cid; + +/// Default HAMT configuration for pubkey mapping +const PUBKEY_HAMT_CONFIG: Config = Config { + bit_width: 5, + min_data_depth: 0, + max_array_width: 3, +}; + +/// Information about a registered node operator +#[derive(Clone, Debug, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct NodeOperatorInfo { + /// BLS public key (48 bytes) + pub bls_pubkey: Vec, + + /// RPC URL for gateway to query signatures + pub rpc_url: String, + + /// Epoch when operator registered + pub registered_epoch: ChainEpoch, + + /// Whether operator is active + pub active: bool, +} + +/// Registry of node operators +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Operators { + /// HAMT root: Address → NodeOperatorInfo + pub root: hamt::Root, + + /// HAMT root CID: BLS public key (BytesKey) → Address + /// Used for fast uniqueness check during registration + /// Uses fvm_ipld_hamt directly to avoid Display constraint + pub pubkey_to_addr: Cid, + + /// Ordered list of active operator addresses + /// Index in this vec = bit position in bitmap for signature aggregation + pub active_list: Vec
, + + /// Total number of registered operators + size: u64, + + /// Total number of entries in pubkey_to_addr HAMT + pubkey_size: u64, +} + +impl Operators { + /// Creates a new empty [`Operators`] registry + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "operators")?; + // Create empty pubkey HAMT using fvm_ipld_hamt directly with explicit config + let mut pubkey_hamt: Hamt<&BS, Address, BytesKey, Sha256> = + Hamt::new_with_config(store, PUBKEY_HAMT_CONFIG); + let pubkey_to_addr = pubkey_hamt + .flush() + .map_err(|e| ActorError::illegal_state(format!("failed to flush pubkey HAMT: {}", e)))?; + Ok(Self { + root, + pubkey_to_addr, + active_list: Vec::new(), + size: 0, + pubkey_size: 0, + }) + } + + /// Returns the underlying [`hamt::map::Hamt`] for operators + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Returns the underlying fvm_ipld_hamt for pubkey → address mapping + pub fn pubkey_hamt( + &self, + store: BS, + ) -> Result, ActorError> { + Hamt::load_with_config(&self.pubkey_to_addr, store, PUBKEY_HAMT_CONFIG) + .map_err(|e| ActorError::illegal_state(format!("failed to load pubkey HAMT: {}", e))) + } + + /// Saves the state from the [`TrackedFlushResult`] for operators + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// Saves the pubkey HAMT root CID and updates size + pub fn save_pubkey(&mut self, cid: Cid, size_delta: i64) { + self.pubkey_to_addr = cid; + self.pubkey_size = (self.pubkey_size as i64 + size_delta) as u64; + } + + /// Returns the number of registered operators + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if there are no registered operators + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Register a new operator (adds to end of active_list) + /// Returns the operator's index in the active_list + pub fn register( + &mut self, + store: BS, + address: Address, + info: NodeOperatorInfo, + ) -> Result { + let mut hamt = self.hamt(store.clone())?; + + // Check if operator already exists + if hamt.get(&address)?.is_some() { + return Err(ActorError::illegal_argument( + "Operator already registered".into(), + )); + } + + // Check if BLS public key is already registered (O(log n) lookup) + let mut pubkey_hamt = self.pubkey_hamt(store)?; + let pubkey_key = BytesKey::from(info.bls_pubkey.clone()); + if pubkey_hamt + .get(&pubkey_key) + .map_err(|e| ActorError::illegal_state(format!("failed to get pubkey: {}", e)))? + .is_some() + { + return Err(ActorError::illegal_argument( + "BLS public key already registered by another operator".into(), + )); + } + + // Add pubkey → address mapping + pubkey_hamt + .set(pubkey_key, address) + .map_err(|e| ActorError::illegal_state(format!("failed to set pubkey: {}", e)))?; + let pubkey_cid = pubkey_hamt + .flush() + .map_err(|e| ActorError::illegal_state(format!("failed to flush pubkey HAMT: {}", e)))?; + self.save_pubkey(pubkey_cid, 1); + + // Add to operator HAMT + self.save_tracked(hamt.set_and_flush_tracked(&address, info)?); + + // Add to active list (gets next available index) + let index = self.active_list.len(); + self.active_list.push(address); + + Ok(index) + } + + /// Get operator info by address + pub fn get( + &self, + store: BS, + address: &Address, + ) -> Result, ActorError> { + self.hamt(store)?.get(address) + } + + /// Get operator index in active_list (for bitmap generation) + /// Returns None if operator is not in the active list + pub fn get_index(&self, address: &Address) -> Option { + self.active_list.iter().position(|a| a == address) + } + + /// Get all active operators in order + pub fn get_active_operators(&self) -> Vec
{ + self.active_list.clone() + } + + /// Update operator info (e.g., to change RPC URL or deactivate) + pub fn update( + &mut self, + store: BS, + address: &Address, + info: NodeOperatorInfo, + ) -> Result<(), ActorError> { + let mut hamt = self.hamt(store)?; + + // Check if operator exists + if hamt.get(address)?.is_none() { + return Err(ActorError::not_found("Operator not found".into())); + } + + // Update in HAMT + self.save_tracked(hamt.set_and_flush_tracked(address, info)?); + + Ok(()) + } + + /// Deactivate an operator (removes from active_list but keeps in HAMT) + /// Note: This will change indices of all operators after the removed one + pub fn deactivate( + &mut self, + store: BS, + address: &Address, + ) -> Result<(), ActorError> { + let mut hamt = self.hamt(store.clone())?; + + // Get existing info + let mut info = hamt + .get(address)? + .ok_or_else(|| ActorError::not_found("Operator not found".into()))?; + + // Remove pubkey → address mapping to allow re-registration with same pubkey + let mut pubkey_hamt = self.pubkey_hamt(store)?; + let pubkey_key = BytesKey::from(info.bls_pubkey.clone()); + pubkey_hamt + .delete(&pubkey_key) + .map_err(|e| ActorError::illegal_state(format!("failed to delete pubkey: {}", e)))?; + let pubkey_cid = pubkey_hamt + .flush() + .map_err(|e| ActorError::illegal_state(format!("failed to flush pubkey HAMT: {}", e)))?; + self.save_pubkey(pubkey_cid, -1); + + // Mark as inactive + info.active = false; + self.save_tracked(hamt.set_and_flush_tracked(address, info)?); + + // Remove from active_list + if let Some(pos) = self.active_list.iter().position(|a| a == address) { + self.active_list.remove(pos); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use fvm_ipld_blockstore::MemoryBlockstore; + + fn new_test_address(id: u64) -> Address { + Address::new_id(id) + } + + fn new_test_operator(pubkey: u8) -> NodeOperatorInfo { + NodeOperatorInfo { + bls_pubkey: vec![pubkey; 48], + rpc_url: format!("http://operator{}.example.com:8080", pubkey), + registered_epoch: 0, + active: true, + } + } + + #[test] + fn test_register_operator() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let info1 = new_test_operator(1); + + let index = operators.register(&store, addr1, info1.clone()).unwrap(); + assert_eq!(index, 0); + assert_eq!(operators.len(), 1); + + let retrieved = operators.get(&store, &addr1).unwrap().unwrap(); + assert_eq!(retrieved, info1); + } + + #[test] + fn test_active_list_ordering() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let addr2 = new_test_address(101); + let addr3 = new_test_address(102); + + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + operators + .register(&store, addr2, new_test_operator(2)) + .unwrap(); + operators + .register(&store, addr3, new_test_operator(3)) + .unwrap(); + + assert_eq!(operators.get_index(&addr1), Some(0)); + assert_eq!(operators.get_index(&addr2), Some(1)); + assert_eq!(operators.get_index(&addr3), Some(2)); + + let active = operators.get_active_operators(); + assert_eq!(active, vec![addr1, addr2, addr3]); + } + + #[test] + fn test_duplicate_registration() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + + let result = operators.register(&store, addr1, new_test_operator(2)); + assert!(result.is_err()); + } + + #[test] + fn test_duplicate_pubkey_registration() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let addr2 = new_test_address(101); + + // Register first operator with pubkey 1 + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + + // Try to register second operator with same pubkey - should fail + let result = operators.register(&store, addr2, new_test_operator(1)); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .msg() + .contains("BLS public key already registered")); + } + + #[test] + fn test_pubkey_reuse_after_deactivation() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let addr2 = new_test_address(101); + + // Register first operator with pubkey 1 + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + + // Deactivate operator 1 + operators.deactivate(&store, &addr1).unwrap(); + + // Now registering with same pubkey from different address should succeed + let result = operators.register(&store, addr2, new_test_operator(1)); + assert!(result.is_ok()); + } + + #[test] + fn test_deactivate_operator() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let addr2 = new_test_address(101); + let addr3 = new_test_address(102); + + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + operators + .register(&store, addr2, new_test_operator(2)) + .unwrap(); + operators + .register(&store, addr3, new_test_operator(3)) + .unwrap(); + + // Deactivate middle operator + operators.deactivate(&store, &addr2).unwrap(); + + // Check active list updated + let active = operators.get_active_operators(); + assert_eq!(active, vec![addr1, addr3]); + + // Check indices shifted + assert_eq!(operators.get_index(&addr1), Some(0)); + assert_eq!(operators.get_index(&addr2), None); + assert_eq!(operators.get_index(&addr3), Some(1)); + + // Check still in HAMT but marked inactive + let info = operators.get(&store, &addr2).unwrap().unwrap(); + assert!(!info.active); + } +} diff --git a/fendermint/actors/blobs/src/testing.rs b/fendermint/actors/blobs/src/testing.rs new file mode 100644 index 0000000000..a157d39f61 --- /dev/null +++ b/fendermint/actors/blobs/src/testing.rs @@ -0,0 +1,142 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + blobs::AddBlobParams, credit::BuyCreditParams, method::Method, +}; +use fendermint_actor_recall_config_shared::{RecallConfig, RECALL_CONFIG_ACTOR_ADDR}; +use fil_actors_runtime::test_utils::{expect_empty, MockRuntime, SYSTEM_ACTOR_CODE_ID}; +use fil_actors_runtime::SYSTEM_ACTOR_ADDR; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::{ + address::Address, clock::ChainEpoch, econ::TokenAmount, error::ExitCode, sys::SendFlags, + MethodNum, +}; +use num_traits::Zero; +use recall_actor_sdk::evm::to_actor_event; + +use crate::{ + actor::BlobsActor, + sol_facade::{ + blobs as sol_blobs, + credit::{CreditApproved, CreditPurchased, CreditRevoked}, + }, + State, +}; + +pub fn construct_and_verify() -> MockRuntime { + let rt = MockRuntime { + receiver: Address::new_id(10), + ..Default::default() + }; + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let result = rt + .call::(Method::Constructor as u64, None) + .unwrap(); + expect_empty(result); + rt.verify(); + rt.reset(); + rt +} + +pub fn expect_get_config(rt: &MockRuntime) { + rt.expect_send( + RECALL_CONFIG_ACTOR_ADDR, + fendermint_actor_recall_config_shared::Method::GetConfig as MethodNum, + None, + TokenAmount::zero(), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor(&RecallConfig::default()).unwrap(), + ExitCode::OK, + None, + ); +} + +pub fn expect_emitted_purchase_event( + rt: &MockRuntime, + params: &BuyCreditParams, + amount: TokenAmount, +) { + let event = to_actor_event(CreditPurchased::new(params.0, amount)).unwrap(); + rt.expect_emitted_event(event); +} + +pub fn expect_emitted_approve_event( + rt: &MockRuntime, + from: Address, + to: Address, + credit_limit: Option, + gas_fee_limit: Option, + expiry: ChainEpoch, +) { + let event = to_actor_event(CreditApproved { + from, + to, + credit_limit, + gas_fee_limit, + expiry: Some(expiry), + }) + .unwrap(); + rt.expect_emitted_event(event); +} + +pub fn expect_emitted_revoke_event(rt: &MockRuntime, from: Address, to: Address) { + let event = to_actor_event(CreditRevoked::new(from, to)).unwrap(); + rt.expect_emitted_event(event); +} + +pub fn expect_emitted_add_event( + rt: &MockRuntime, + current_epoch: ChainEpoch, + params: &AddBlobParams, + subscriber: Address, + used: u64, +) { + let event = to_actor_event(sol_blobs::BlobAdded { + subscriber, + hash: ¶ms.hash, + size: params.size, + expiry: params.ttl.unwrap_or(86400) + current_epoch, + bytes_used: used, + }) + .unwrap(); + rt.expect_emitted_event(event); +} + +pub fn check_approval_used( + state: &State, + store: &BS, + caller: Address, + sponsor: Address, +) { + assert_ne!(caller, sponsor); + let subscriber_account = state.get_account(&store, sponsor).unwrap().unwrap(); + let subscriber_approval = subscriber_account + .approvals_to + .hamt(store) + .unwrap() + .get(&caller) + .unwrap() + .unwrap(); + assert_eq!( + subscriber_approval.credit_used, + state.credits.credit_debited.clone() + subscriber_account.credit_committed.clone() + ); + let origin_account = state.get_account(&store, caller).unwrap().unwrap(); + let origin_approval = origin_account + .approvals_from + .hamt(store) + .unwrap() + .get(&sponsor) + .unwrap() + .unwrap(); + assert_eq!( + subscriber_approval.credit_used, + &state.credits.credit_debited + &subscriber_account.credit_committed + ); + assert_eq!(subscriber_approval.credit_used, origin_approval.credit_used); +} diff --git a/fendermint/actors/blobs/testing/Cargo.toml b/fendermint/actors/blobs/testing/Cargo.toml new file mode 100644 index 0000000000..9c2ef0dbd3 --- /dev/null +++ b/fendermint/actors/blobs/testing/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "fendermint_actor_blobs_testing" +description = "Test utils for blobs" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +fvm_shared = { workspace = true } +iroh-blobs = { workspace = true } +rand = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } + +fendermint_actor_blobs_shared = { path = "../shared" } diff --git a/fendermint/actors/blobs/testing/src/lib.rs b/fendermint/actors/blobs/testing/src/lib.rs new file mode 100644 index 0000000000..a9cc46ea1e --- /dev/null +++ b/fendermint/actors/blobs/testing/src/lib.rs @@ -0,0 +1,66 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{blobs::SubscriptionId, bytes::B256}; +use fvm_shared::address::Address; +use rand::{distributions::Alphanumeric, Rng, RngCore}; + +pub fn setup_logs() { + use tracing_subscriber::layer::SubscriberExt; + use tracing_subscriber::util::SubscriberInitExt; + use tracing_subscriber::EnvFilter; + tracing_subscriber::registry() + .with( + tracing_subscriber::fmt::layer() + .event_format(tracing_subscriber::fmt::format().with_line_number(true)) + .with_writer(std::io::stdout), + ) + .with(EnvFilter::from_default_env()) + .try_init() + .ok(); +} + +pub fn new_hash(size: usize) -> (B256, u64) { + let mut rng = rand::thread_rng(); + let mut data = vec![0u8; size]; + rng.fill_bytes(&mut data); + (B256(*iroh_blobs::Hash::new(&data).as_bytes()), size as u64) +} + +pub fn new_hash_from_vec(buf: Vec) -> (B256, u64) { + ( + B256(*iroh_blobs::Hash::new(&buf).as_bytes()), + buf.len() as u64, + ) +} + +pub fn new_metadata_hash() -> B256 { + let mut rng = rand::thread_rng(); + let mut data = vec![0u8; 8]; + rng.fill_bytes(&mut data); + B256(*iroh_blobs::Hash::new(&data).as_bytes()) +} + +pub fn new_pk() -> B256 { + let mut rng = rand::thread_rng(); + let mut data = [0u8; 32]; + rng.fill_bytes(&mut data); + B256(data) +} + +pub fn new_address() -> Address { + let mut rng = rand::thread_rng(); + let mut data = vec![0u8; 32]; + rng.fill_bytes(&mut data); + Address::new_actor(&data) +} + +pub fn new_subscription_id(length: usize) -> SubscriptionId { + let str: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(length) + .map(char::from) + .collect(); + SubscriptionId::try_from(str).unwrap() +} diff --git a/fendermint/actors/bucket/Cargo.toml b/fendermint/actors/bucket/Cargo.toml new file mode 100644 index 0000000000..ebba3bbfdc --- /dev/null +++ b/fendermint/actors/bucket/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "fendermint_actor_bucket" +description = "Actor for bucket object storage" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +blake3 = { workspace = true } +cid = { workspace = true, default-features = false } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +recall_sol_facade = { workspace = true, features = ["bucket"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_blobs_shared = { path = "../blobs/shared" } +fendermint_actor_machine = { path = "../machine" } +recall_actor_sdk = { path = "../../../recall/actor_sdk" } +recall_ipld = { path = "../../../recall/ipld" } + +[dev-dependencies] +fil_actors_evm_shared = { workspace = true } +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +hex-literal = { workspace = true } +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } + +fendermint_actor_blobs_testing = { path = "../blobs/testing" } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/bucket/src/actor.rs b/fendermint/actors/bucket/src/actor.rs new file mode 100644 index 0000000000..362c9c0935 --- /dev/null +++ b/fendermint/actors/bucket/src/actor.rs @@ -0,0 +1,1262 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fendermint_actor_blobs_shared::{ + blobs::{ + AddBlobParams, Blob, BlobStatus, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, + SubscriptionId, + }, + sdk::{add_blob, delete_blob, get_blob, has_credit_approval, overwrite_blob}, +}; +use fendermint_actor_machine::MachineActor; +use fil_actors_runtime::{ + actor_dispatch_unrestricted, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, +}; +use fvm_shared::address::Address; +use recall_actor_sdk::evm::{ + emit_evm_event, InputData, InvokeContractParams, InvokeContractReturn, +}; +use recall_ipld::hamt::BytesKey; + +use crate::shared::{ + AddParams, DeleteParams, GetParams, ListObjectsReturn, ListParams, Method, Object, + BUCKET_ACTOR_NAME, +}; +use crate::sol_facade as sol; +use crate::sol_facade::AbiCall; +use crate::state::{ObjectState, State}; +use crate::{ + UpdateObjectMetadataParams, MAX_METADATA_ENTRIES, MAX_METADATA_KEY_SIZE, + MAX_METADATA_VALUE_SIZE, +}; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(Actor); + +pub struct Actor; + +impl Actor { + /// Adds an object to a bucket. + /// + /// Access control will be enforced by the Blobs actor. + /// We will pass the bucket owner as the `subscriber`, + /// and the Blobs actor will enforce that the `from` address is either + /// the `subscriber` or has a valid credit delegation from the `subscriber`. + /// The `from` address must be the origin or the caller. + fn add_object(rt: &impl Runtime, params: AddParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let from = rt.message().caller(); + + let state = rt.state::()?; + let sub_id = get_blob_id(&state, ¶ms.key)?; + let key = BytesKey(params.key.clone()); + + validate_metadata(¶ms.metadata)?; + + let sub = if let Some(object) = state.get(rt.store(), &key)? { + // If we have existing blob and it's not expired + let expired = object.expiry <= rt.curr_epoch(); + if params.overwrite || expired { + // Overwrite if the flag is passed + overwrite_blob( + rt, + OverwriteBlobParams { + old_hash: object.hash, + add: AddBlobParams { + from, + sponsor: Some(state.owner), + source: params.source, + hash: params.hash, + metadata_hash: params.recovery_hash, + id: sub_id, + size: params.size, + ttl: params.ttl, + }, + }, + )? + } else { + // Return an error if no overwrite flag gets passed + return Err(ActorError::illegal_state( + "key exists; use overwrite".into(), + )); + } + } else { + // No object found, just a new blob + add_blob( + rt, + AddBlobParams { + from, + sponsor: Some(state.owner), + source: params.source, + hash: params.hash, + metadata_hash: params.recovery_hash, + id: sub_id, + size: params.size, + ttl: params.ttl, + }, + )? + }; + + rt.transaction(|st: &mut State, rt| { + st.add( + rt.store(), + key, + params.hash, + params.size, + sub.expiry, + params.metadata.clone(), + params.overwrite, + ) + })?; + + emit_evm_event( + rt, + sol::ObjectAdded::new(¶ms.key, ¶ms.hash, ¶ms.metadata), + )?; + + Ok(Object { + hash: params.hash, + recovery_hash: params.recovery_hash, + size: params.size, + expiry: sub.expiry, + metadata: params.metadata, + }) + } + + /// Deletes an object from a bucket. + /// + /// Access control will be enforced by the Blobs actor. + /// We will pass the bucket owner as the `subscriber`, + /// and the Blobs actor will enforce that the `from` address is either + /// the `subscriber` or has a valid credit delegation from the `subscriber`. + /// The `from` address must be the origin or the caller. + fn delete_object(rt: &impl Runtime, params: DeleteParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from = rt.message().caller(); + + let state = rt.state::()?; + let sub_id = get_blob_id(&state, ¶ms.0)?; + let key = BytesKey(params.0); + let object = state + .get(rt.store(), &key)? + .ok_or(ActorError::illegal_state("object not found".into()))?; + + // Delete blob for object + delete_blob( + rt, + DeleteBlobParams { + from, + sponsor: Some(state.owner), + hash: object.hash, + id: sub_id, + }, + )?; + + rt.transaction(|st: &mut State, rt| st.delete(rt.store(), &key))?; + + emit_evm_event(rt, sol::ObjectDeleted::new(&key, &object.hash))?; + + Ok(()) + } + + /// Returns an object. + fn get_object(rt: &impl Runtime, params: GetParams) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let state = rt.state::()?; + let owner = state.owner; + let sub_id = get_blob_id(&state, ¶ms.0)?; + let key = BytesKey(params.0); + if let Some(object_state) = state.get(rt.store(), &key)? { + if let Some(blob) = get_blob(rt, GetBlobParams(object_state.hash))? { + let object = build_object(&blob, &object_state, sub_id, owner)?; + Ok(object) + } else { + Ok(None) + } + } else { + Ok(None) + } + } + + /// Lists bucket objects. + fn list_objects( + rt: &impl Runtime, + params: ListParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let current_epoch = rt.curr_epoch(); + let mut objects = Vec::new(); + let start_key = params.start_key.map(BytesKey::from); + let state = rt.state::()?; + let (prefixes, next_key) = state.list( + rt.store(), + params.prefix, + params.delimiter, + start_key.as_ref(), + params.limit, + |key: Vec, object_state: ObjectState| -> Result<(), ActorError> { + if object_state.expiry > current_epoch { + objects.push((key, object_state)); + } + Ok(()) + }, + )?; + + let next_key = next_key.map(|key| key.0); + + Ok(ListObjectsReturn { + objects, + next_key, + common_prefixes: prefixes, + }) + } + + /// Updates object metadata. + /// + /// Only the bucket owner or an account with a credit delegation + /// from the bucket owner can update object metadata. + /// The `from` address must be the origin or the caller. + fn update_object_metadata( + rt: &impl Runtime, + params: UpdateObjectMetadataParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from = rt.message().caller(); + + let key = BytesKey(params.key.clone()); + let state = rt.state::()?; + let mut object = state + .get(rt.store(), &key)? + .ok_or(ActorError::illegal_state("object not found".into()))?; + + let bucket_owner = state.owner; + if !has_credit_approval(rt, bucket_owner, from)? { + return Err(actor_error!( + forbidden; + format!("Unauthorized: missing delegation from bucket owner {} to {}", bucket_owner, from))); + } + + validate_metadata_optional(¶ms.metadata)?; + + let metadata = rt.transaction(|st: &mut State, rt| { + for (key, val) in params.metadata { + match val { + Some(v) => { + object + .metadata + .entry(key) + .and_modify(|s| *s = v.clone()) + .or_insert(v); + } + None => { + object.metadata.remove(&key); + } + } + } + + if object.metadata.len() as u32 > MAX_METADATA_ENTRIES { + return Err(ActorError::illegal_state(format!( + "the maximum metadata entries allowed is {}", + MAX_METADATA_ENTRIES + ))); + } + + st.add( + rt.store(), + key, + object.hash, + object.size, + object.expiry, + object.metadata.clone(), + true, + )?; + + Ok(object.metadata) + })?; + + emit_evm_event(rt, sol::ObjectMetadataUpdated::new(¶ms.key, &metadata))?; + + Ok(()) + } + + fn invoke_contract( + rt: &impl Runtime, + params: InvokeContractParams, + ) -> Result { + let input_data: InputData = params.try_into()?; + if sol::can_handle(&input_data) { + let output_data = match sol::parse_input(&input_data)? { + sol::Calls::addObject_0(call) => { + // function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size) external; + let params = call.params(); + Self::add_object(rt, params)?; + call.returns(()) + } + sol::Calls::addObject_1(call) => { + // function addObject(AddObjectParams memory params) external; + let params = call.params(); + Self::add_object(rt, params)?; + call.returns(()) + } + sol::Calls::deleteObject(call) => { + // function deleteObject(string memory key) external; + let params = call.params(); + Self::delete_object(rt, params)?; + call.returns(()) + } + sol::Calls::getObject(call) => { + // function getObject(string memory key) external view returns (ObjectValue memory); + let params = call.params(); + let object = Self::get_object(rt, params)?; + call.returns(object) + } + sol::Calls::queryObjects_0(call) => { + // function queryObjects(string memory prefix, string memory delimiter, string memory startKey, uint64 limit) external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::queryObjects_1(call) => { + // function queryObjects(string memory prefix, string memory delimiter, string memory startKey) external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::queryObjects_2(call) => { + // function queryObjects(string memory prefix) external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::queryObjects_3(call) => { + // function queryObjects() external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::queryObjects_4(call) => { + // function queryObjects(string memory prefix, string memory delimiter) external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::updateObjectMetadata(call) => { + // function updateObjectMetadata(string memory key, KeyValue[] memory metadata) external; + let params = call.params(); + Self::update_object_metadata(rt, params)?; + call.returns(()) + } + }; + Ok(InvokeContractReturn { output_data }) + } else { + Err(actor_error!(illegal_argument, "invalid call".to_string())) + } + } +} + +/// Returns a blob subscription ID specific to this machine and object key. +fn get_blob_id(state: &State, key: &[u8]) -> Result { + let mut data = state.address.get()?.payload_bytes(); + data.extend(key); + let id = blake3::hash(&data).to_hex().to_string(); + SubscriptionId::new(&id) +} + +/// Build an object from its state and blob. +fn build_object( + blob: &Blob, + object_state: &ObjectState, + sub_id: SubscriptionId, + subscriber: Address, +) -> Result, ActorError> { + match blob.status { + BlobStatus::Resolved => { + blob.subscribers.get(&sub_id).cloned().ok_or_else(|| { + ActorError::illegal_state(format!( + "owner {} is not subscribed to blob {}; this should not happen", + subscriber, object_state.hash + )) + })?; + Ok(Some(Object { + hash: object_state.hash, + recovery_hash: blob.metadata_hash, + size: blob.size, + expiry: object_state.expiry, + metadata: object_state.metadata.clone(), + })) + } + BlobStatus::Added | BlobStatus::Pending | BlobStatus::Failed => Ok(None), + } +} + +fn validate_metadata(metadata: &HashMap) -> Result<(), ActorError> { + if metadata.len() as u32 > MAX_METADATA_ENTRIES { + return Err(ActorError::illegal_state(format!( + "the maximum metadata entries allowed is {}", + MAX_METADATA_ENTRIES + ))); + } + + for (key, value) in metadata { + if key.len() as u32 > MAX_METADATA_KEY_SIZE { + return Err(ActorError::illegal_state(format!( + "key must be less than or equal to {}", + MAX_METADATA_KEY_SIZE + ))); + } + + if value.is_empty() || value.len() as u32 > MAX_METADATA_VALUE_SIZE { + return Err(ActorError::illegal_state(format!( + "value must non-empty and less than or equal to {}", + MAX_METADATA_VALUE_SIZE + ))); + } + } + + Ok(()) +} + +fn validate_metadata_optional( + metadata: &HashMap>, +) -> Result<(), ActorError> { + for (key, value) in metadata { + if key.len() as u32 > MAX_METADATA_KEY_SIZE { + return Err(ActorError::illegal_state(format!( + "key must be less than or equal to {}", + MAX_METADATA_KEY_SIZE + ))); + } + + if let Some(value) = value { + if value.is_empty() || value.len() as u32 > MAX_METADATA_VALUE_SIZE { + return Err(ActorError::illegal_state(format!( + "value must non-empty and less than or equal to {}", + MAX_METADATA_VALUE_SIZE + ))); + } + } + } + + Ok(()) +} + +impl MachineActor for Actor { + type State = State; +} + +impl ActorCode for Actor { + type Methods = Method; + + fn name() -> &'static str { + BUCKET_ACTOR_NAME + } + + actor_dispatch_unrestricted! { + Constructor => constructor, + Init => init, + GetAddress => get_address, + GetMetadata => get_metadata, + AddObject => add_object, + DeleteObject => delete_object, + GetObject => get_object, + ListObjects => list_objects, + UpdateObjectMetadata => update_object_metadata, + // EVM interop + InvokeContract => invoke_contract, + _ => fallback, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use fendermint_actor_blobs_shared::{ + blobs::Subscription, + bytes::B256, + credit::{CreditApproval, GetCreditApprovalParams}, + method::Method as BlobMethod, + BLOBS_ACTOR_ADDR, + }; + use fendermint_actor_blobs_testing::{new_hash, new_pk, setup_logs}; + use fendermint_actor_machine::{ + sol_facade::{MachineCreated, MachineInitialized}, + ConstructorParams, InitParams, Kind, + }; + use fil_actors_evm_shared::address::EthAddress; + use fil_actors_runtime::{ + runtime::Runtime, + test_utils::{ + expect_empty, MockRuntime, ADM_ACTOR_CODE_ID, ETHACCOUNT_ACTOR_CODE_ID, + INIT_ACTOR_CODE_ID, + }, + }; + use fil_actors_runtime::{ADM_ACTOR_ADDR, INIT_ACTOR_ADDR}; + use fvm_ipld_encoding::ipld_block::IpldBlock; + use fvm_shared::{ + clock::ChainEpoch, econ::TokenAmount, error::ExitCode, sys::SendFlags, MethodNum, + }; + use recall_actor_sdk::evm::to_actor_event; + + fn get_runtime() -> (MockRuntime, Address) { + let origin_id_addr = Address::new_id(110); + let rt = construct_and_verify(origin_id_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, origin_id_addr); + rt.set_origin(origin_id_addr); + (rt, origin_id_addr) + } + + fn construct_and_verify(owner_id_addr: Address) -> MockRuntime { + let owner_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let owner_delegated_addr = Address::new_delegated(10, &owner_eth_addr.0).unwrap(); + + let buck_addr = Address::new_id(111); + let rt = MockRuntime { + receiver: buck_addr, + ..Default::default() + }; + rt.set_delegated_address(owner_id_addr.id().unwrap(), owner_delegated_addr); + + rt.set_caller(*INIT_ACTOR_CODE_ID, INIT_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![INIT_ACTOR_ADDR]); + let metadata = HashMap::new(); + let event = to_actor_event(MachineCreated::new( + Kind::Bucket, + owner_delegated_addr, + &metadata, + )) + .unwrap(); + rt.expect_emitted_event(event); + let actor_construction = rt + .call::( + Method::Constructor as u64, + IpldBlock::serialize_cbor(&ConstructorParams { + owner: owner_id_addr, + metadata, + }) + .unwrap(), + ) + .unwrap(); + expect_empty(actor_construction); + rt.verify(); + + rt.set_caller(*ADM_ACTOR_CODE_ID, ADM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![ADM_ACTOR_ADDR]); + let event = to_actor_event(MachineInitialized::new(Kind::Bucket, buck_addr)).unwrap(); + rt.expect_emitted_event(event); + let actor_init = rt + .call::( + Method::Init as u64, + IpldBlock::serialize_cbor(&InitParams { address: buck_addr }).unwrap(), + ) + .unwrap(); + expect_empty(actor_init); + rt.verify(); + + rt.reset(); + rt + } + + fn expect_emitted_add_event(rt: &MockRuntime, params: &AddParams) { + let event = to_actor_event(sol::ObjectAdded::new( + ¶ms.key, + ¶ms.hash, + ¶ms.metadata, + )) + .unwrap(); + rt.expect_emitted_event(event); + } + + fn expect_emitted_delete_event(rt: &MockRuntime, params: &DeleteParams, hash: B256) { + let event = to_actor_event(sol::ObjectDeleted::new(¶ms.0, &hash)).unwrap(); + rt.expect_emitted_event(event); + } + + #[test] + pub fn test_add_object() { + setup_logs(); + let (rt, origin) = get_runtime(); + + // Add an object + let hash = new_hash(256); + let key = vec![0, 1, 2]; + let add_params: AddParams = AddParams { + source: new_pk(), + key: key.clone(), + hash: hash.0, + recovery_hash: new_hash(256).0, + size: hash.1, + ttl: None, + metadata: HashMap::new(), + overwrite: false, + }; + rt.expect_validate_caller_any(); + let state = rt.state::().unwrap(); + let sub_id = get_blob_id(&state, &key).unwrap(); + rt.expect_send_simple( + BLOBS_ACTOR_ADDR, + BlobMethod::AddBlob as MethodNum, + IpldBlock::serialize_cbor(&AddBlobParams { + sponsor: Some(origin), + source: add_params.source, + hash: add_params.hash, + metadata_hash: add_params.recovery_hash, + id: sub_id, + size: add_params.size, + ttl: add_params.ttl, + from: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + IpldBlock::serialize_cbor(&Subscription::default()).unwrap(), + ExitCode::OK, + ); + expect_emitted_add_event(&rt, &add_params); + let result = rt + .call::( + Method::AddObject as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(add_params.hash, result.hash); + assert_eq!(add_params.recovery_hash, result.recovery_hash); + assert_eq!(add_params.size, result.size); + assert_eq!(add_params.metadata, result.metadata); + rt.verify(); + } + + #[test] + pub fn test_add_overwrite() { + let (rt, origin) = get_runtime(); + + // Add an object + let hash = new_hash(256); + let key = vec![0, 1, 2]; + let add_params: AddParams = AddParams { + source: new_pk(), + key: key.clone(), + hash: hash.0, + recovery_hash: new_hash(256).0, + size: hash.1, + ttl: None, + metadata: HashMap::new(), + overwrite: false, + }; + rt.expect_validate_caller_any(); + let state = rt.state::().unwrap(); + let sub_id = get_blob_id(&state, &key).unwrap(); + rt.expect_send_simple( + BLOBS_ACTOR_ADDR, + BlobMethod::AddBlob as MethodNum, + IpldBlock::serialize_cbor(&AddBlobParams { + sponsor: Some(origin), + source: add_params.source, + hash: add_params.hash, + metadata_hash: add_params.recovery_hash, + id: sub_id.clone(), + size: add_params.size, + ttl: add_params.ttl, + from: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + IpldBlock::serialize_cbor(&Subscription::default()).unwrap(), + ExitCode::OK, + ); + expect_emitted_add_event(&rt, &add_params); + let result = rt + .call::( + Method::AddObject as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(add_params.hash, result.hash); + assert_eq!(add_params.metadata, result.metadata); + assert_eq!(add_params.recovery_hash, result.recovery_hash); + assert_eq!(add_params.size, result.size); + rt.verify(); + + // Overwrite object (old blob is deleted) + let hash = new_hash(256); + let add_params2 = AddParams { + source: add_params.source, + key: add_params.key, + hash: hash.0, + recovery_hash: new_hash(256).0, + size: hash.1, + ttl: None, + metadata: HashMap::new(), + overwrite: true, + }; + rt.expect_validate_caller_any(); + rt.expect_send_simple( + BLOBS_ACTOR_ADDR, + BlobMethod::OverwriteBlob as MethodNum, + IpldBlock::serialize_cbor(&OverwriteBlobParams { + old_hash: add_params.hash, + add: AddBlobParams { + id: sub_id, + hash: add_params2.hash, + sponsor: Some(origin), + source: add_params2.source, + metadata_hash: add_params2.recovery_hash, + size: add_params2.size, + ttl: add_params2.ttl, + from: origin, + }, + }) + .unwrap(), + TokenAmount::from_whole(0), + IpldBlock::serialize_cbor(&Subscription::default()).unwrap(), + ExitCode::OK, + ); + expect_emitted_add_event(&rt, &add_params2); + let result = rt + .call::( + Method::AddObject as u64, + IpldBlock::serialize_cbor(&add_params2).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(add_params2.hash, result.hash); + assert_eq!(add_params2.metadata, result.metadata); + assert_eq!(add_params2.recovery_hash, result.recovery_hash); + assert_eq!(add_params2.size, result.size); + rt.verify(); + } + + #[test] + pub fn test_add_overwrite_fail() { + let (rt, origin) = get_runtime(); + + // Add an object + let hash = new_hash(256); + let key = vec![0, 1, 2]; + let add_params: AddParams = AddParams { + source: new_pk(), + key: key.clone(), + hash: hash.0, + size: hash.1, + recovery_hash: new_hash(256).0, + ttl: None, + metadata: HashMap::new(), + overwrite: false, + }; + rt.expect_validate_caller_any(); + let state = rt.state::().unwrap(); + let sub_id = get_blob_id(&state, &key).unwrap(); + rt.expect_send_simple( + BLOBS_ACTOR_ADDR, + BlobMethod::AddBlob as MethodNum, + IpldBlock::serialize_cbor(&AddBlobParams { + sponsor: Some(origin), + source: add_params.source, + hash: add_params.hash, + metadata_hash: add_params.recovery_hash, + id: sub_id, + size: add_params.size, + ttl: add_params.ttl, + from: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + IpldBlock::serialize_cbor(&Subscription { + added: 0, + overlap: 0, + expiry: ChainEpoch::from(3600), + source: add_params.source, + delegate: None, + failed: false, + }) + .unwrap(), + ExitCode::OK, + ); + expect_emitted_add_event(&rt, &add_params); + let result = rt + .call::( + Method::AddObject as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + let state = rt.state::().unwrap(); + assert_eq!(add_params.hash, result.hash); + assert_eq!(add_params.metadata, result.metadata); + assert_eq!(add_params.recovery_hash, result.recovery_hash); + assert_eq!(add_params.size, result.size); + rt.verify(); + + // Try to overwrite + let hash = new_hash(256); + let add_params2 = AddParams { + source: add_params.source, + key: add_params.key, + hash: hash.0, + size: hash.1, + recovery_hash: new_hash(256).0, + ttl: None, + metadata: HashMap::new(), + overwrite: false, + }; + rt.expect_validate_caller_any(); + let result = rt.call::( + Method::AddObject as u64, + IpldBlock::serialize_cbor(&add_params2).unwrap(), + ); + assert!(result.is_err_and(|e| { e.msg().eq("key exists; use overwrite") })); + let state2 = rt.state::().unwrap(); + assert_eq!(state2.objects.root, state.objects.root); + rt.verify(); + } + + #[test] + pub fn test_delete_object() { + let (rt, origin) = get_runtime(); + + // Add an object + let key = vec![0, 1, 2]; + let hash = new_hash(256); + let add_params: AddParams = AddParams { + source: new_pk(), + key: key.clone(), + hash: hash.0, + size: hash.1, + recovery_hash: new_hash(256).0, + ttl: None, + metadata: HashMap::new(), + overwrite: false, + }; + rt.expect_validate_caller_any(); + let state = rt.state::().unwrap(); + let sub_id = get_blob_id(&state, &key).unwrap(); + rt.expect_send_simple( + BLOBS_ACTOR_ADDR, + BlobMethod::AddBlob as MethodNum, + IpldBlock::serialize_cbor(&AddBlobParams { + sponsor: Some(origin), + source: add_params.source, + hash: add_params.hash, + id: sub_id.clone(), + size: add_params.size, + metadata_hash: add_params.recovery_hash, + ttl: add_params.ttl, + from: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + IpldBlock::serialize_cbor(&Subscription::default()).unwrap(), + ExitCode::OK, + ); + expect_emitted_add_event(&rt, &add_params); + let result_add = rt + .call::( + Method::AddObject as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(add_params.hash, result_add.hash); + assert_eq!(add_params.metadata, result_add.metadata); + assert_eq!(add_params.recovery_hash, result_add.recovery_hash); + assert_eq!(add_params.size, result_add.size); + rt.verify(); + + // Delete object + let delete_params = DeleteParams(key); + rt.expect_validate_caller_any(); + rt.expect_send_simple( + BLOBS_ACTOR_ADDR, + BlobMethod::DeleteBlob as MethodNum, + IpldBlock::serialize_cbor(&DeleteBlobParams { + from: origin, + sponsor: Some(origin), + hash: add_params.hash, + id: sub_id, + }) + .unwrap(), + TokenAmount::from_whole(0), + None, + ExitCode::OK, + ); + expect_emitted_delete_event(&rt, &delete_params, add_params.hash); + let result_delete = rt.call::( + Method::DeleteObject as u64, + IpldBlock::serialize_cbor(&delete_params).unwrap(), + ); + assert!(result_delete.is_ok()); + rt.verify(); + } + + #[test] + pub fn test_get_object_none() { + let (rt, _) = get_runtime(); + + let get_params = GetParams(vec![0, 1, 2]); + rt.expect_validate_caller_any(); + let result = rt + .call::( + Method::GetObject as u64, + IpldBlock::serialize_cbor(&get_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>(); + assert!(result.is_ok()); + assert_eq!(result, Ok(None)); + rt.verify(); + } + + #[test] + pub fn test_get_object() { + let (rt, origin) = get_runtime(); + + // Add an object + let key = vec![0, 1, 2]; + let hash = new_hash(256); + let ttl = ChainEpoch::from(3600); + let add_params: AddParams = AddParams { + source: new_pk(), + key: key.clone(), + hash: hash.0, + size: hash.1, + recovery_hash: new_hash(256).0, + ttl: Some(ttl), + metadata: HashMap::new(), + overwrite: false, + }; + rt.expect_validate_caller_any(); + let state = rt.state::().unwrap(); + let sub_id = get_blob_id(&state, &key).unwrap(); + rt.expect_send_simple( + BLOBS_ACTOR_ADDR, + BlobMethod::AddBlob as MethodNum, + IpldBlock::serialize_cbor(&AddBlobParams { + sponsor: Some(origin), + source: add_params.source, + hash: add_params.hash, + id: sub_id.clone(), + size: add_params.size, + metadata_hash: add_params.recovery_hash, + ttl: add_params.ttl, + from: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + IpldBlock::serialize_cbor(&Subscription { + added: 0, + overlap: 0, + expiry: ttl, + source: add_params.source, + delegate: None, + failed: false, + }) + .unwrap(), + ExitCode::OK, + ); + expect_emitted_add_event(&rt, &add_params); + rt.call::( + Method::AddObject as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + rt.verify(); + + // Get the object + let blob = Blob { + size: add_params.size, + subscribers: HashMap::from([(sub_id, ttl)]), + status: BlobStatus::Resolved, + metadata_hash: add_params.recovery_hash, + }; + + rt.expect_validate_caller_any(); + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetBlob as MethodNum, + IpldBlock::serialize_cbor(&GetBlobParams(add_params.hash)).unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor(&Some(blob)).unwrap(), + ExitCode::OK, + None, + ); + let get_params = GetParams(key); + let result = rt + .call::( + Method::GetObject as u64, + IpldBlock::serialize_cbor(&get_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>(); + assert!(result.is_ok()); + assert_eq!( + result.unwrap(), + Some(Object { + hash: hash.0, + recovery_hash: add_params.recovery_hash, + size: add_params.size, + expiry: ttl, + metadata: add_params.metadata, + }) + ); + rt.verify(); + } + + #[test] + pub fn test_update_object_metadata() { + let (rt, origin) = get_runtime(); + + // Add an object + let hash = new_hash(256); + let key = vec![0, 1, 2]; + let ttl = ChainEpoch::from(3600); + let add_params: AddParams = AddParams { + source: new_pk(), + key: key.clone(), + hash: hash.0, + size: hash.1, + recovery_hash: new_hash(256).0, + ttl: Some(ttl), + metadata: HashMap::from([("foo".into(), "bar".into()), ("foo2".into(), "bar".into())]), + overwrite: false, + }; + rt.expect_validate_caller_any(); + let state = rt.state::().unwrap(); + let sub_id = get_blob_id(&state, &key).unwrap(); + rt.expect_send_simple( + BLOBS_ACTOR_ADDR, + BlobMethod::AddBlob as MethodNum, + IpldBlock::serialize_cbor(&AddBlobParams { + sponsor: Some(origin), + source: add_params.source, + hash: add_params.hash, + metadata_hash: add_params.recovery_hash, + id: sub_id.clone(), + size: add_params.size, + ttl: add_params.ttl, + from: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + IpldBlock::serialize_cbor(&Subscription { + added: 0, + overlap: 0, + expiry: ttl, + source: add_params.source, + delegate: None, + failed: false, + }) + .unwrap(), + ExitCode::OK, + ); + expect_emitted_add_event(&rt, &add_params); + let result = rt + .call::( + Method::AddObject as u64, + IpldBlock::serialize_cbor(&add_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + assert_eq!(add_params.hash, result.hash); + assert_eq!(add_params.metadata, result.metadata); + assert_eq!(add_params.recovery_hash, result.recovery_hash); + assert_eq!(add_params.size, result.size); + rt.verify(); + + // Update metadata + let update_object_params = UpdateObjectMetadataParams { + key: add_params.key.clone(), + metadata: HashMap::from([ + ("foo".into(), Some("zar".into())), + ("foo2".into(), None), + ("foo3".into(), Some("bar".into())), + ]), + }; + rt.expect_validate_caller_any(); + let event = to_actor_event(sol::ObjectMetadataUpdated { + key: &add_params.key, + metadata: &HashMap::from([("foo".into(), "zar".into()), ("foo3".into(), "bar".into())]), + }) + .unwrap(); + rt.expect_emitted_event(event); + let result = rt.call::( + Method::UpdateObjectMetadata as u64, + IpldBlock::serialize_cbor(&update_object_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Sent from an alien address with no credit approval hence no access rights + let alien_id_addr = Address::new_id(112); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, alien_id_addr); + rt.set_origin(alien_id_addr); + rt.expect_validate_caller_any(); + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(&GetCreditApprovalParams { + from: origin, + to: alien_id_addr, + }) + .unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + // We do not care what is inside credit approval. We only care if it is present. + IpldBlock::serialize_cbor::>(&None).unwrap(), + ExitCode::OK, + None, + ); + let result = rt.call::( + Method::UpdateObjectMetadata as u64, + IpldBlock::serialize_cbor(&update_object_params).unwrap(), + ); + assert!(result.is_err()); + rt.verify(); + + // Fail if "from" is not the owner, and has no delegation. + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, alien_id_addr); + rt.set_origin(alien_id_addr); + rt.expect_validate_caller_any(); + let alien_update = UpdateObjectMetadataParams { + key: update_object_params.key, + metadata: update_object_params.metadata, + }; + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(&GetCreditApprovalParams { + from: origin, + to: alien_id_addr, + }) + .unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor::>(&None).unwrap(), + ExitCode::OK, + None, + ); + let result = rt.call::( + Method::UpdateObjectMetadata as u64, + IpldBlock::serialize_cbor(&alien_update).unwrap(), + ); + assert!(result.is_err()); + rt.verify(); + + // Allowed if there is a delegation + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, alien_id_addr); + rt.set_origin(alien_id_addr); + rt.expect_validate_caller_any(); + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(&GetCreditApprovalParams { + from: origin, + to: alien_id_addr, + }) + .unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + // We do not care what is inside credit approval. We only care if it is present. + IpldBlock::serialize_cbor::>(&Some(CreditApproval { + credit_limit: None, + gas_allowance_limit: None, + expiry: None, + credit_used: TokenAmount::from_whole(0), + gas_allowance_used: TokenAmount::from_whole(0), + })) + .unwrap(), + ExitCode::OK, + None, + ); + let event = to_actor_event(sol::ObjectMetadataUpdated { + key: &alien_update.key, + metadata: &HashMap::from([("foo".into(), "zar".into()), ("foo3".into(), "bar".into())]), + }) + .unwrap(); + rt.expect_emitted_event(event); + let result = rt.call::( + Method::UpdateObjectMetadata as u64, + IpldBlock::serialize_cbor(&alien_update).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Get the object and check metadata + let blob = Blob { + size: add_params.size, + subscribers: HashMap::from([(sub_id, ttl)]), + status: BlobStatus::Resolved, + metadata_hash: add_params.recovery_hash, + }; + rt.expect_validate_caller_any(); + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetBlob as MethodNum, + IpldBlock::serialize_cbor(&GetBlobParams(add_params.hash)).unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor(&Some(blob)).unwrap(), + ExitCode::OK, + None, + ); + let get_params = GetParams(key); + let result = rt + .call::( + Method::GetObject as u64, + IpldBlock::serialize_cbor(&get_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>(); + assert!(result.is_ok()); + assert_eq!( + result.unwrap(), + Some(Object { + hash: hash.0, + recovery_hash: add_params.recovery_hash, + size: add_params.size, + expiry: ChainEpoch::from(3600), + metadata: HashMap::from([ + ("foo".into(), "zar".into()), + ("foo3".into(), "bar".into()) + ]), + }) + ); + rt.verify(); + } +} diff --git a/fendermint/actors/bucket/src/lib.rs b/fendermint/actors/bucket/src/lib.rs new file mode 100644 index 0000000000..a784389323 --- /dev/null +++ b/fendermint/actors/bucket/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod actor; +mod shared; +mod sol_facade; +mod state; + +pub use shared::*; diff --git a/fendermint/actors/bucket/src/shared.rs b/fendermint/actors/bucket/src/shared.rs new file mode 100644 index 0000000000..ad7f597b00 --- /dev/null +++ b/fendermint/actors/bucket/src/shared.rs @@ -0,0 +1,123 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_actor_machine::{ + GET_ADDRESS_METHOD, GET_METADATA_METHOD, INIT_METHOD, METHOD_CONSTRUCTOR, +}; +use fvm_ipld_encoding::{strict_bytes, tuple::*}; +use fvm_shared::clock::ChainEpoch; +use num_derive::FromPrimitive; +use serde::{Deserialize, Serialize}; + +pub use crate::state::{ObjectState, State}; + +pub const BUCKET_ACTOR_NAME: &str = "bucket"; +pub const MAX_METADATA_ENTRIES: u32 = 20; +pub const MAX_METADATA_KEY_SIZE: u32 = 32; +pub const MAX_METADATA_VALUE_SIZE: u32 = 128; + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Init = INIT_METHOD, + GetAddress = GET_ADDRESS_METHOD, + GetMetadata = GET_METADATA_METHOD, + AddObject = frc42_dispatch::method_hash!("AddObject"), + DeleteObject = frc42_dispatch::method_hash!("DeleteObject"), + GetObject = frc42_dispatch::method_hash!("GetObject"), + ListObjects = frc42_dispatch::method_hash!("ListObjects"), + UpdateObjectMetadata = frc42_dispatch::method_hash!("UpdateObjectMetadata"), + // EVM Interop + InvokeContract = frc42_dispatch::method_hash!("InvokeEVM"), +} + +/// Params for adding an object. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct AddParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Object key. + #[serde(with = "strict_bytes")] + pub key: Vec, + /// Object blake3 hash. + pub hash: B256, + /// Blake3 hash of the metadata to use for object recovery. + pub recovery_hash: B256, + /// Object size. + pub size: u64, + /// Object time-to-live epochs. + /// If not specified, the current default TTL from the config actor is used. + pub ttl: Option, + /// Object metadata. + pub metadata: HashMap, + /// Whether to overwrite a key if it already exists. + pub overwrite: bool, +} + +/// Key of the object to delete from a bucket. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct DeleteParams(#[serde(with = "strict_bytes")] pub Vec); + +/// Params for getting an object. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetParams(#[serde(with = "strict_bytes")] pub Vec); + +/// Params for listing objects. +#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListParams { + /// The prefix to filter objects by. + #[serde(with = "strict_bytes")] + pub prefix: Vec, + /// The delimiter used to define object hierarchy. + #[serde(with = "strict_bytes")] + pub delimiter: Vec, + /// The key to start listing objects from. + pub start_key: Option>, + /// The maximum number of objects to list. + pub limit: u64, +} + +/// The stored representation of an object in the bucket. +#[derive(Clone, Debug, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Object { + /// The object blake3 hash. + pub hash: B256, + /// Blake3 hash of the metadata to use for object recovery. + pub recovery_hash: B256, + /// The object size. + pub size: u64, + /// Expiry block. + pub expiry: ChainEpoch, + /// User-defined object metadata (e.g., last modified timestamp, etc.). + pub metadata: HashMap, +} + +/// A list of objects and their common prefixes. +#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListObjectsReturn { + /// List of key-values matching the list query. + pub objects: Vec<(Vec, ObjectState)>, + /// When a delimiter is used in the list query, this contains common key prefixes. + pub common_prefixes: Vec>, + /// Next key to use for paginating when there are more objects to list. + pub next_key: Option>, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct UpdateObjectMetadataParams { + /// Object key. + #[serde(with = "strict_bytes")] + pub key: Vec, + /// Object metadata to be inserted/updated/deleted. + /// + /// If a key-value is present, we'll update the entry (or insert if it does not exist) + /// If only the key is present, we will delete the metadata entry + pub metadata: HashMap>, +} diff --git a/fendermint/actors/bucket/src/sol_facade.rs b/fendermint/actors/bucket/src/sol_facade.rs new file mode 100644 index 0000000000..33ec957844 --- /dev/null +++ b/fendermint/actors/bucket/src/sol_facade.rs @@ -0,0 +1,413 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; +use std::string::ToString; + +use anyhow::Error; +use fendermint_actor_blobs_shared::bytes::B256; +use fil_actors_runtime::{actor_error, ActorError}; +use fvm_shared::clock::ChainEpoch; +use num_traits::Zero; +use recall_actor_sdk::{declare_abi_call, evm::TryIntoEVMEvent}; +pub use recall_sol_facade::bucket::Calls; +use recall_sol_facade::{ + bucket as sol, + types::{SolCall, SolInterface}, +}; + +use crate::{ + AddParams, DeleteParams, GetParams, ListObjectsReturn, ListParams, Object, + UpdateObjectMetadataParams, +}; + +declare_abi_call!(); + +// ----- Events ----- // + +pub struct ObjectAdded<'a> { + pub key: &'a Vec, + pub blob_hash: &'a B256, + pub metadata: &'a HashMap, +} +impl<'a> ObjectAdded<'a> { + pub fn new( + key: &'a Vec, + blob_hash: &'a B256, + metadata: &'a HashMap, + ) -> Self { + Self { + key, + blob_hash, + metadata, + } + } +} +impl TryIntoEVMEvent for ObjectAdded<'_> { + type Target = sol::Events; + + fn try_into_evm_event(self) -> Result { + let metadata = fvm_ipld_encoding::to_vec(self.metadata)?; + Ok(sol::Events::ObjectAdded(sol::ObjectAdded { + key: self.key.clone().into(), + blobHash: self.blob_hash.0.into(), + metadata: metadata.into(), + })) + } +} + +pub struct ObjectMetadataUpdated<'a> { + pub key: &'a Vec, + pub metadata: &'a HashMap, +} +impl<'a> ObjectMetadataUpdated<'a> { + pub fn new(key: &'a Vec, metadata: &'a HashMap) -> Self { + Self { key, metadata } + } +} +impl TryIntoEVMEvent for ObjectMetadataUpdated<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let metadata = fvm_ipld_encoding::to_vec(self.metadata)?; + Ok(sol::Events::ObjectMetadataUpdated( + sol::ObjectMetadataUpdated { + key: self.key.clone().into(), + metadata: metadata.into(), + }, + )) + } +} + +pub struct ObjectDeleted<'a> { + pub key: &'a Vec, + pub blob_hash: &'a B256, +} +impl<'a> ObjectDeleted<'a> { + pub fn new(key: &'a Vec, blob_hash: &'a B256) -> Self { + Self { key, blob_hash } + } +} +impl TryIntoEVMEvent for ObjectDeleted<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::ObjectDeleted(sol::ObjectDeleted { + key: self.key.clone().into(), + blobHash: self.blob_hash.0.into(), + })) + } +} + +// ----- Calls ----- // + +pub fn can_handle(input_data: &recall_actor_sdk::evm::InputData) -> bool { + Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &recall_actor_sdk::evm::InputData) -> Result { + Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +impl AbiCall for sol::addObject_0Call { + type Params = AddParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let source = B256(self.source.into()); + let key: Vec = self.key.clone().into_bytes(); + let hash = B256(self.hash.into()); + let recovery_hash = B256(self.recoveryHash.into()); + let size = self.size; + AddParams { + source, + key, + hash, + recovery_hash, + size, + ttl: None, + metadata: HashMap::default(), + overwrite: false, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCall for sol::addObject_1Call { + type Params = AddParams; + type Returns = (); + type Output = Vec; + fn params(&self) -> Self::Params { + let source = B256(self.source.into()); + let key: Vec = self.key.clone().into_bytes(); + let hash = B256(self.hash.into()); + let recovery_hash = B256(self.recoveryHash.into()); + let size = self.size; + let ttl = if self.ttl.clone().is_zero() { + None + } else { + Some(self.ttl as ChainEpoch) + }; + let mut metadata: HashMap = HashMap::with_capacity(self.metadata.len()); + for kv in self.metadata.iter().cloned() { + metadata.insert(kv.key, kv.value); + } + let overwrite = self.overwrite; + AddParams { + source, + key, + hash, + recovery_hash, + size, + ttl, + metadata, + overwrite, + } + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCall for sol::deleteObjectCall { + type Params = DeleteParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let key: Vec = self.key.clone().into_bytes(); + DeleteParams(key) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCall for sol::getObjectCall { + type Params = GetParams; + type Returns = Option; + type Output = Vec; + + fn params(&self) -> Self::Params { + let key = self.key.clone().into_bytes(); + GetParams(key) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let object = returns + .map(|object| sol::ObjectValue { + blobHash: object.hash.0.into(), + recoveryHash: object.recovery_hash.0.into(), + size: object.size, + expiry: object.expiry as u64, + metadata: sol_metadata(object.metadata), + }) + .unwrap_or(sol::ObjectValue { + blobHash: [0u8; 32].into(), + recoveryHash: [0u8; 32].into(), + size: 0, + expiry: 0, + metadata: vec![], + }); + Self::abi_encode_returns(&(object,)) + } +} + +fn sol_metadata(metadata: HashMap) -> Vec { + metadata + .iter() + .map(|(k, v)| sol::KeyValue { + key: k.clone(), + value: v.clone(), + }) + .collect() +} + +fn sol_query(list: ListObjectsReturn) -> sol::Query { + sol::Query { + objects: list + .objects + .iter() + .map(|(key, object_state)| sol::Object { + key: String::from_utf8_lossy(key.as_slice()).to_string(), + state: sol::ObjectState { + blobHash: object_state.hash.0.into(), + size: object_state.size, + expiry: object_state.expiry as u64, + metadata: sol_metadata(object_state.metadata.clone()), + }, + }) + .collect(), + commonPrefixes: list + .common_prefixes + .iter() + .map(|prefix| String::from_utf8_lossy(prefix.as_slice()).to_string()) + .collect(), + nextKey: list + .next_key + .map(|k| String::from_utf8_lossy(k.as_slice()).to_string()) + .unwrap_or_default(), + } +} + +const DEFAULT_DELIMITER: &[u8] = b"/"; // "/" in ASCII and UTF-8 +const DEFAULT_START_KEY: Option> = None; //= "" +const DEFAULT_PREFIX: Vec = vec![]; //= "" +const DEFAULT_LIMIT: u64 = 0; + +impl AbiCall for sol::queryObjects_0Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = self.prefix.clone().into_bytes(); + let delimiter = self.delimiter.clone().into_bytes(); + let start_key = if self.startKey.is_empty() { + None + } else { + Some(self.startKey.clone().into_bytes()) + }; + let limit = self.limit; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::queryObjects_1Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = self.prefix.clone().into_bytes(); + let delimiter = self.delimiter.clone().into_bytes(); + let start_key = if self.startKey.is_empty() { + None + } else { + Some(self.startKey.clone().into_bytes()) + }; + let limit = DEFAULT_LIMIT; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::queryObjects_2Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = self.prefix.clone().into_bytes(); + let delimiter = DEFAULT_DELIMITER.to_vec(); + let start_key = DEFAULT_START_KEY; + let limit = DEFAULT_LIMIT; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::queryObjects_3Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = DEFAULT_PREFIX; + let delimiter = DEFAULT_DELIMITER.to_vec(); + let start_key = DEFAULT_START_KEY; + let limit = 0; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::queryObjects_4Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = self.prefix.clone().into_bytes(); + let delimiter = self.delimiter.clone().into_bytes(); + let start_key = DEFAULT_START_KEY; + let limit = DEFAULT_LIMIT; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::updateObjectMetadataCall { + type Params = UpdateObjectMetadataParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let mut metadata: HashMap> = HashMap::default(); + for kv in self.metadata.iter().cloned() { + let key = kv.key; + let value = kv.value; + let value = if value.is_empty() { None } else { Some(value) }; + metadata.insert(key, value); + } + UpdateObjectMetadataParams { + key: self.key.clone().into_bytes(), + metadata, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} diff --git a/fendermint/actors/bucket/src/state.rs b/fendermint/actors/bucket/src/state.rs new file mode 100644 index 0000000000..cb7d712081 --- /dev/null +++ b/fendermint/actors/bucket/src/state.rs @@ -0,0 +1,790 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; +use std::fmt::{Debug, Display, Formatter}; +use std::string::FromUtf8Error; + +use cid::Cid; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_actor_machine::{Kind, MachineAddress, MachineState}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use recall_ipld::hamt::{self, map::TrackedFlushResult, BytesKey, MapKey}; +use serde::{Deserialize, Serialize}; + +const MAX_LIST_LIMIT: usize = 1000; + +fn utf8_error(e: FromUtf8Error) -> ActorError { + ActorError::illegal_argument(e.to_string()) +} + +/// The state represents a bucket backed by a Hamt. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// The machine address set by the init actor. + pub address: MachineAddress, + /// The machine robust owner address. + pub owner: Address, + /// The objects Hamt. + pub objects: ObjectsState, + /// User-defined metadata (e.g., bucket name, etc.). + pub metadata: HashMap, +} +impl MachineState for State { + fn new( + store: &BS, + owner: Address, + metadata: HashMap, + ) -> Result { + Ok(Self { + address: Default::default(), + objects: ObjectsState::new(store)?, + owner, + metadata, + }) + } + + fn init(&mut self, address: Address) -> Result<(), ActorError> { + self.address.set(address) + } + + fn address(&self) -> MachineAddress { + self.address.clone() + } + + fn kind(&self) -> Kind { + Kind::Bucket + } + + fn owner(&self) -> Address { + self.owner + } + + fn metadata(&self) -> HashMap { + self.metadata.clone() + } +} + +/// The stored representation of an object in the bucket. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ObjectState { + /// The object blake3 hash. + pub hash: B256, + /// The object size. + pub size: u64, + /// Expiry block. + pub expiry: ChainEpoch, + /// User-defined object metadata (e.g., last modified timestamp, etc.). + pub metadata: HashMap, +} + +/// A list of objects and their common prefixes. +#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ObjectList { + /// List of key-values matching the list query. + pub objects: Vec<(Vec, ObjectState)>, + /// When a delimiter is used in the list query, this contains common key prefixes. + pub common_prefixes: Vec>, +} + +impl State { + #[allow(clippy::too_many_arguments)] + pub fn add( + &mut self, + store: &BS, + key: BytesKey, + hash: B256, + size: u64, + expiry: ChainEpoch, + metadata: HashMap, + overwrite: bool, + ) -> Result { + let object_key = ObjectKey(key.clone()); + let mut objects = self.objects.hamt(store)?; + let object = ObjectState { + hash, + size, + expiry, + metadata, + }; + if overwrite { + objects.set(&object_key, object)?; + } else { + objects.set_if_absent(&object_key, object)?; + } + self.objects.save_tracked(objects.flush_tracked()?); + Ok(*self.objects.root.cid()) + } + + pub fn delete( + &mut self, + store: &BS, + key: &BytesKey, + ) -> Result<(ObjectState, Cid), ActorError> { + let mut objects = self.objects.hamt(store)?; + let object_key = ObjectKey(key.clone()); + let (tracked_result, object) = objects.delete_and_flush_tracked(&object_key)?; + self.objects.save_tracked(tracked_result); + + match object { + Some(object) => Ok((object, self.objects.root.cid().to_owned())), + None => Err(ActorError::not_found("key not found".into())), + } + } + + pub fn get( + &self, + store: &BS, + key: &BytesKey, + ) -> Result, ActorError> { + let object_key = ObjectKey(key.clone()); + let object = self.objects.hamt(store)?.get(&object_key)?; + Ok(object) + } + + pub fn list( + &self, + store: &BS, + prefix: Vec, + delimiter: Vec, + start_key: Option<&BytesKey>, + limit: u64, + mut collector: F, + ) -> Result<(Vec>, Option), ActorError> + where + F: FnMut(Vec, ObjectState) -> Result<(), ActorError>, + { + let objects = self.objects.hamt(store)?; + let mut common_prefixes = std::collections::BTreeSet::>::new(); + let limit = if limit == 0 { + MAX_LIST_LIMIT + } else { + (limit as usize).min(MAX_LIST_LIMIT) + }; + + let (_, next_key) = objects.for_each_ranged(start_key, Some(limit), |k, v| { + let key = k.0 .0.clone(); + if !prefix.is_empty() && !key.starts_with(&prefix) { + return Ok(false); + } + if !delimiter.is_empty() { + let utf8_prefix = String::from_utf8(prefix.clone()).map_err(utf8_error)?; + let prefix_length = utf8_prefix.len(); + let utf8_key = String::from_utf8(key.clone()).map_err(utf8_error)?; + let utf8_delimiter = String::from_utf8(delimiter.clone()).map_err(utf8_error)?; + if let Some(index) = utf8_key[prefix_length..].find(&utf8_delimiter) { + let subset = utf8_key[..=(index + prefix_length)].as_bytes().to_owned(); + common_prefixes.insert(subset); + return Ok(false); + } + } + collector(key, v.to_owned())?; + Ok(true) + })?; + + let common_prefixes = common_prefixes.into_iter().collect(); + Ok((common_prefixes, next_key.map(|key| key.0))) + } +} + +#[derive(Debug, PartialEq)] +pub struct ObjectKey(pub BytesKey); + +impl MapKey for ObjectKey { + fn from_bytes(b: &[u8]) -> Result { + Ok(ObjectKey(BytesKey(b.to_vec()))) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.0 .0.to_vec()) + } +} + +impl Display for ObjectKey { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "{}", String::from_utf8_lossy(&self.0 .0)) + } +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ObjectsState { + pub root: hamt::Root, + size: u64, +} + +impl ObjectsState { + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "objects")?; + Ok(Self { root, size: 0 }) + } + + pub fn hamt( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size + } + + pub fn len(&self) -> u64 { + self.size + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use fendermint_actor_blobs_testing::{new_hash, new_hash_from_vec}; + use fvm_ipld_blockstore::MemoryBlockstore; + use quickcheck::Arbitrary; + use quickcheck_macros::quickcheck; + use std::str::FromStr; + + impl Arbitrary for ObjectState { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + let hash = new_hash(u16::arbitrary(g) as usize); + ObjectState { + hash: hash.0, + expiry: i64::arbitrary(g), + size: u64::arbitrary(g), + metadata: HashMap::arbitrary(g), + } + } + } + + fn object_one() -> ObjectState { + let (hash, size) = new_hash_from_vec([1, 2, 3, 4, 5].to_vec()); + let mut metadata = HashMap::::new(); + metadata.insert("_created".to_string(), String::from("1718464344")); + metadata.insert("_modified".to_string(), String::from("1718464345")); + ObjectState { + hash, + size, + expiry: 123456789, + metadata, + } + } + + const OBJECT_ONE_CID: &str = "bafy2bzacea5tbd4x6okckdkb2yl7wbyjqpxkow6whr46dswwv5xj7va4uro2g"; + + fn object_two() -> ObjectState { + let (hash, size) = new_hash_from_vec([6, 7, 8, 9, 10, 11].to_vec()); + let mut metadata = HashMap::::new(); + metadata.insert("_created".to_string(), String::from("1718464456")); + metadata.insert("_modified".to_string(), String::from("1718480987")); + ObjectState { + hash, + size, + expiry: 123456789, + metadata, + } + } + + fn object_three() -> ObjectState { + let (hash, size) = new_hash_from_vec([11, 12, 13, 14, 15, 16, 17].to_vec()); + let mut metadata = HashMap::::new(); + metadata.insert("_created".to_string(), String::from("1718465678")); + metadata.insert("_modified".to_string(), String::from("1718512346")); + ObjectState { + hash, + size, + expiry: 123456789, + metadata, + } + } + + #[allow(clippy::type_complexity)] + fn list( + state: &State, + store: &BS, + prefix: Vec, + delimiter: Vec, + start_key: Option<&BytesKey>, + limit: u64, + ) -> Result<(Vec<(Vec, ObjectState)>, Vec>, Option), ActorError> { + let mut objects = Vec::new(); + let (prefixes, next_key) = state.list( + store, + prefix, + delimiter, + start_key, + limit, + |key: Vec, object: ObjectState| -> Result<(), ActorError> { + objects.push((key, object)); + Ok(()) + }, + )?; + Ok((objects, prefixes, next_key)) + } + + fn get_lex_sequence(start: Vec, count: usize) -> Vec> { + let mut current = start; + let mut sequence = Vec::with_capacity(count); + for _ in 0..count { + sequence.push(current.clone()); + for i in (0..current.len()).rev() { + if current[i] < 255 { + current[i] += 1; + break; + } else { + current[i] = 0; // Reset this byte to 0 and carry to the next byte + } + } + } + sequence + } + + #[test] + fn test_constructor() { + let store = MemoryBlockstore::default(); + let state = State::new(&store, Address::new_id(100), HashMap::new()); + assert!(state.is_ok()); + assert_eq!( + *state.unwrap().objects.root.cid(), + Cid::from_str("bafy2bzaceamp42wmmgr2g2ymg46euououzfyck7szknvfacqscohrvaikwfay") + .unwrap() + ); + } + + #[test] + fn test_add() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let object = object_one(); + assert!(state + .add( + &store, + BytesKey(vec![1, 2, 3]), + object.hash, + object.size, + object.expiry, + object.metadata, + true, + ) + .is_ok()); + + assert_eq!( + *state.objects.root.cid(), + Cid::from_str(OBJECT_ONE_CID).unwrap() + ); + } + + #[quickcheck] + fn test_delete(object: ObjectState) { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let key = BytesKey(vec![1, 2, 3]); + state + .add( + &store, + key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + true, + ) + .unwrap(); + assert!(state.delete(&store, &key).is_ok()); + + let result = state.get(&store, &key); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), None); + } + + #[quickcheck] + fn test_get(object: ObjectState) { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let key = BytesKey(vec![1, 2, 3]); + let md = object.metadata.clone(); + state + .add( + &store, + key.clone(), + object.hash, + object.size, + object.expiry, + md, + true, + ) + .unwrap(); + let result = state.get(&store, &key); + + assert!(result.is_ok()); + assert_eq!(result.unwrap().unwrap(), object); + } + + fn create_and_put_objects( + state: &mut State, + store: &MemoryBlockstore, + ) -> anyhow::Result<(BytesKey, BytesKey, BytesKey)> { + let baz_key = BytesKey("foo/baz.png".as_bytes().to_vec()); // index 0 + let object = object_one(); + state.add( + store, + baz_key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + )?; + let bar_key = BytesKey("foo/bar.png".as_bytes().to_vec()); // index 1 + let object = object_two(); + state.add( + store, + bar_key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + )?; + // We'll mostly ignore this one + let other_key = BytesKey("zzzz/image.png".as_bytes().to_vec()); // index 2 + let hash = new_hash(256); + state.add( + &store, + other_key.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + )?; + let jpeg_key = BytesKey("foo.jpeg".as_bytes().to_vec()); // index 3 + let object = object_three(); + state.add( + store, + jpeg_key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + )?; + Ok((baz_key, bar_key, jpeg_key)) + } + + #[test] + fn test_list_all_keys() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let (baz_key, _, _) = create_and_put_objects(&mut state, &store).unwrap(); + + // List all keys with a limit + let result = list(&state, &store, vec![], vec![], None, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 4); + assert_eq!(result.0.first(), Some(&(baz_key.0, object_one()))); + assert_eq!(result.2, None); + } + + #[test] + fn test_list_more_than_max_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let sequence = get_lex_sequence(vec![0, 0, 0], MAX_LIST_LIMIT + 10); + for key in sequence { + let key = BytesKey(key); + let object = object_one(); + state + .add( + &store, + key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + ) + .unwrap(); + } + + // List all keys but has more + let result = list(&state, &store, vec![], vec![], None, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), MAX_LIST_LIMIT); + // Note: This isn't the element at MAX_LIST_LIMIT + 1 as one might expect. + // The ordering is deterministic but depends on the HAMT structure. + assert_eq!(result.2, Some(BytesKey(vec![0, 3, 86]))); + + let next_key = result.2.unwrap(); + + // List remaining objects + let result = list(&state, &store, vec![], vec![], Some(&next_key), 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 10); + assert_eq!(result.2, None); + } + + #[test] + fn test_list_at_max_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + for i in 0..MAX_LIST_LIMIT { + let key = BytesKey(format!("{}.txt", i).as_bytes().to_vec()); + let object = object_one(); + state + .add( + &store, + key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + ) + .unwrap(); + } + + // List all keys + let result = list(&state, &store, vec![], vec![], None, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), MAX_LIST_LIMIT); + assert_eq!(result.2, None); + } + + #[test] + fn test_list_keys_with_prefix() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let (baz_key, bar_key, _) = create_and_put_objects(&mut state, &store).unwrap(); + + let foo_key = BytesKey("foo".as_bytes().to_vec()); + let result = list(&state, &store, foo_key.0.clone(), vec![], None, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 3); + assert_eq!(result.0[0], (baz_key.0, object_one())); + assert_eq!(result.0[1], (bar_key.0, object_two())); + } + + #[test] + fn test_list_keys_with_delimiter() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let (_, _, jpeg_key) = create_and_put_objects(&mut state, &store).unwrap(); + + let foo_key = BytesKey("foo".as_bytes().to_vec()); + let delimiter_key = BytesKey("/".as_bytes().to_vec()); + let full_key = [foo_key.clone(), delimiter_key.clone()].concat(); + let result = list( + &state, + &store, + foo_key.0.clone(), + delimiter_key.0.clone(), + None, + 4, + ); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + assert_eq!(result.0[0], (jpeg_key.0, object_three())); + assert_eq!(result.1[0], full_key); + } + + #[test] + fn test_list_keys_with_nested_delimiter() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let jpeg_key = BytesKey("foo.jpeg".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + jpeg_key.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + let bar_key = BytesKey("bin/foo/bar.png".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + bar_key.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + let baz_key = BytesKey("bin/foo/baz.png".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + baz_key.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + + let bin_key = BytesKey("bin/".as_bytes().to_vec()); + let full_key = BytesKey("bin/foo/".as_bytes().to_vec()); + let delimiter_key = BytesKey("/".as_bytes().to_vec()); + let result = list( + &state, + &store, + bin_key.0.clone(), + delimiter_key.0.clone(), + None, + 0, + ); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 0); + assert_eq!(result.1.len(), 1); + assert_eq!(result.1[0], full_key.0); + } + + #[test] + fn test_list_with_start_key_and_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let (_, bar_key, _) = create_and_put_objects(&mut state, &store).unwrap(); + + // List all keys with a limit and start key + let result = list(&state, &store, vec![], vec![], Some(&bar_key), 1); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + // Note that baz is listed first in order + assert_eq!(result.0.first(), Some(&(bar_key.0, object_two()))); + } + + #[test] + fn test_list_with_prefix_delimiter_and_start_key_and_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let one = BytesKey("hello/world".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + one.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + let two = BytesKey("hello/again".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + two.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + + // List all keys with a limit and start key + let result = list( + &state, + &store, + "hello/".as_bytes().to_vec(), + "/".as_bytes().to_vec(), + Some(&two), + 0, + ); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + } + + #[test] + fn test_list_with_prefix_and_without_and_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let one = BytesKey("test/hello".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + one.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + let two = BytesKey("hello".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + two.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + + // List with prefix and limit 1 + let result = list( + &state, + &store, + "test/".as_bytes().to_vec(), + "/".as_bytes().to_vec(), + None, + 1, + ); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + assert_eq!( + result.0.first().unwrap().0, + "test/hello".as_bytes().to_vec(), + ); + + // List without a prefix and limit 1 + let result = list(&state, &store, vec![], "/".as_bytes().to_vec(), None, 1); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + assert_eq!(result.0.first().unwrap().0, "hello".as_bytes().to_vec()); + } +} diff --git a/fendermint/actors/init/Cargo.toml b/fendermint/actors/init/Cargo.toml new file mode 100644 index 0000000000..7776738cdd --- /dev/null +++ b/fendermint/actors/init/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "fendermint_actor_init" +description = "Builtin Init actor replacement for IPC with ADM support" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true } +fil_actors_runtime = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +log = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +serde = { workspace = true } + +[dev-dependencies] +fil_actors_runtime = { workspace = true, features = ["test_utils"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/init/src/lib.rs b/fendermint/actors/init/src/lib.rs new file mode 100644 index 0000000000..4201bacdb3 --- /dev/null +++ b/fendermint/actors/init/src/lib.rs @@ -0,0 +1,175 @@ +// Copyright 2025 Recall Contributors +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Custom Init actor for IPC that allows the ADM actor to spawn any actor type. + +use cid::Cid; +use fil_actors_runtime::runtime::builtins::Type; +use fil_actors_runtime::runtime::{ActorCode, Runtime}; +use fil_actors_runtime::{ + actor_dispatch_unrestricted, actor_error, extract_send_result, ActorContext, ActorError, + AsActorError, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::address::Address; +use fvm_shared::{ActorID, MethodNum, METHOD_CONSTRUCTOR}; +use num_derive::FromPrimitive; + +pub use fil_actors_runtime::INIT_ACTOR_ADDR; + +mod state; +mod types; + +pub use state::State; +pub use types::*; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(IPCInitActor); + +/// ADM Actor ID - hardcoded to match fendermint_vm_actor_interface::adm::ADM_ACTOR_ID +pub const ADM_ACTOR_ID: ActorID = 17; + +/// Custom Init actor name for the manifest +pub const IPC_INIT_ACTOR_NAME: &str = "init"; + +/// Init actor methods +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Exec = 2, + Exec4 = 3, +} + +/// IPC Init actor with ADM support +pub struct IPCInitActor; + +impl IPCInitActor { + pub fn constructor(rt: &impl Runtime, params: ConstructorParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + let state = State::new(rt.store(), params.network_name)?; + rt.create(&state)?; + Ok(()) + } + + pub fn exec(rt: &impl Runtime, params: ExecParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let caller_code = + rt.get_actor_code_cid(&rt.message().caller().id().unwrap()).ok_or_else(|| { + actor_error!(illegal_state, "no code for caller as {}", rt.message().caller()) + })?; + + if !can_exec(rt, &caller_code, ¶ms.code_cid) { + return Err(actor_error!(forbidden; + "caller type {} cannot exec actor type {}", + &caller_code, ¶ms.code_cid + )); + } + + let robust_address = rt.new_actor_address()?; + + let (id_address, existing): (ActorID, bool) = rt.transaction(|s: &mut State, rt| { + s.map_addresses_to_id(rt.store(), &robust_address, None) + .context("failed to allocate ID address") + })?; + + if existing { + return Err(actor_error!(forbidden, "cannot exec over existing actor {}", id_address)); + } + + rt.create_actor(params.code_cid, id_address, None)?; + + extract_send_result(rt.send_simple( + &Address::new_id(id_address), + METHOD_CONSTRUCTOR, + params.constructor_params.into(), + rt.message().value_received(), + )) + .context("constructor failed")?; + + Ok(ExecReturn { + id_address: Address::new_id(id_address), + robust_address, + }) + } + + pub fn exec4(rt: &impl Runtime, params: Exec4Params) -> Result { + rt.validate_immediate_caller_is(std::iter::once(&fil_actors_runtime::EAM_ACTOR_ADDR))?; + + let caller_id = rt.message().caller().id().unwrap(); + let delegated_address = + Address::new_delegated(caller_id, ¶ms.subaddress.to_vec()).map_err(|e| { + ActorError::illegal_argument(format!("invalid delegated address: {}", e)) + })?; + + let robust_address = rt.new_actor_address()?; + + let (id_address, existing): (ActorID, bool) = rt.transaction(|s: &mut State, rt| { + s.map_addresses_to_id(rt.store(), &robust_address, Some(&delegated_address)) + .context("failed to map addresses to ID") + })?; + + if existing { + let code_cid = rt + .get_actor_code_cid(&id_address) + .context_code(fvm_shared::error::ExitCode::USR_FORBIDDEN, "cannot redeploy a deleted actor")?; + let placeholder_cid = rt.get_code_cid_for_type(Type::Placeholder); + if code_cid != placeholder_cid { + return Err(ActorError::forbidden(format!( + "cannot replace existing non-placeholder actor with code: {code_cid}" + ))); + } + } + + rt.create_actor(params.code_cid, id_address, Some(delegated_address))?; + + extract_send_result(rt.send_simple( + &Address::new_id(id_address), + METHOD_CONSTRUCTOR, + params.constructor_params.into(), + rt.message().value_received(), + )) + .context("constructor failed")?; + + Ok(Exec4Return { + id_address: Address::new_id(id_address), + robust_address, + }) + } +} + +impl ActorCode for IPCInitActor { + type Methods = Method; + + fn name() -> &'static str { + IPC_INIT_ACTOR_NAME + } + + actor_dispatch_unrestricted! { + Constructor => constructor, + Exec => exec, + Exec4 => exec4, + } +} + +/// Key modification: Allow ADM actor to exec any actor type +fn can_exec(rt: &impl Runtime, caller: &Cid, exec: &Cid) -> bool { + let caller_id = rt.message().caller().id(); + + // Allow ADM actor (ID 17) to create any actor type + if caller_id == Ok(ADM_ACTOR_ID) { + return true; + } + + // Standard builtin actor checks + rt.resolve_builtin_actor_type(exec) + .map(|typ| match typ { + Type::Multisig | Type::PaymentChannel => true, + Type::Miner if rt.resolve_builtin_actor_type(caller) == Some(Type::Power) => true, + _ => false, + }) + .unwrap_or(false) +} diff --git a/fendermint/actors/init/src/state.rs b/fendermint/actors/init/src/state.rs new file mode 100644 index 0000000000..3503fcb753 --- /dev/null +++ b/fendermint/actors/init/src/state.rs @@ -0,0 +1,96 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::{Address, Protocol}; +use fvm_shared::ActorID; + +use fil_actors_runtime::{ + actor_error, ActorError, Map2, DEFAULT_HAMT_CONFIG, FIRST_NON_SINGLETON_ADDR, +}; + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)] +pub struct State { + /// HAMT[Address]ActorID + pub address_map: Cid, + pub next_id: ActorID, + pub network_name: String, +} + +pub type AddressMap = Map2; + +impl State { + pub fn new(store: &BS, network_name: String) -> Result { + let empty = AddressMap::flush_empty(store, DEFAULT_HAMT_CONFIG)?; + Ok(Self { address_map: empty, next_id: FIRST_NON_SINGLETON_ADDR, network_name }) + } + + /// Maps argument addresses to to a new or existing actor ID. + /// With no delegated address, or if the delegated address is not already mapped, + /// allocates a new ID address and maps both to it. + /// If the delegated address is already present, maps the robust address to that actor ID. + /// Fails if the robust address is already mapped. The assignment of an ID to an address is one-time-only, even if the actor at that ID is deleted. + /// Returns the actor ID and a boolean indicating whether or not the actor already exists. + pub fn map_addresses_to_id( + &mut self, + store: &BS, + robust_addr: &Address, + delegated_addr: Option<&Address>, + ) -> Result<(ActorID, bool), ActorError> { + let mut map = AddressMap::load(store, &self.address_map, DEFAULT_HAMT_CONFIG, "addresses")?; + let (id, existing) = if let Some(delegated_addr) = delegated_addr { + // If there's a delegated address, either recall the already-mapped actor ID or + // create and map a new one. + if let Some(existing_id) = map.get(delegated_addr)? { + (*existing_id, true) + } else { + let new_id = self.next_id; + self.next_id += 1; + map.set(delegated_addr, new_id)?; + (new_id, false) + } + } else { + // With no delegated address, always create a new actor ID. + let new_id = self.next_id; + self.next_id += 1; + (new_id, false) + }; + + // Map the robust address to the ID, failing if it's already mapped to anything. + let is_new = map.set_if_absent(robust_addr, id)?; + if !is_new { + return Err(actor_error!( + forbidden, + "robust address {} is already allocated in the address map", + robust_addr + )); + } + self.address_map = map.flush()?; + Ok((id, existing)) + } + + /// ResolveAddress resolves an address to an ID-address, if possible. + /// If the provided address is an ID address, it is returned as-is. + /// This means that mapped ID-addresses (which should only appear as values, not keys) and + /// singleton actor addresses (which are not in the map) pass through unchanged. + /// + /// Returns an ID-address and `true` if the address was already an ID-address or was resolved + /// in the mapping. + /// Returns an undefined address and `false` if the address was not an ID-address and not found + /// in the mapping. + /// Returns an error only if state was inconsistent. + pub fn resolve_address( + &self, + store: &BS, + addr: &Address, + ) -> Result, ActorError> { + if addr.protocol() == Protocol::ID { + return Ok(Some(*addr)); + } + let map = AddressMap::load(store, &self.address_map, DEFAULT_HAMT_CONFIG, "addresses")?; + let found = map.get(addr)?; + Ok(found.copied().map(Address::new_id)) + } +} \ No newline at end of file diff --git a/fendermint/actors/init/src/types.rs b/fendermint/actors/init/src/types.rs new file mode 100644 index 0000000000..2c0a2def66 --- /dev/null +++ b/fendermint/actors/init/src/types.rs @@ -0,0 +1,40 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::Address; + +/// Init actor Constructor parameters +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + pub network_name: String, +} + +/// Init actor Exec Params +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ExecParams { + pub code_cid: Cid, + pub constructor_params: RawBytes, +} + +/// Init actor Exec Return value +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ExecReturn { + /// ID based address for created actor + pub id_address: Address, + /// Reorg safe address for actor + pub robust_address: Address, +} + +/// Init actor Exec4 Params +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct Exec4Params { + pub code_cid: Cid, + pub constructor_params: RawBytes, + pub subaddress: RawBytes, +} + +/// Init actor Exec4 Return value +pub type Exec4Return = ExecReturn; diff --git a/fendermint/actors/machine/Cargo.toml b/fendermint/actors/machine/Cargo.toml new file mode 100644 index 0000000000..eae6f5d5d3 --- /dev/null +++ b/fendermint/actors/machine/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "fendermint_actor_machine" +description = "Shared types for ADM machine actors" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +fil_actors_runtime = { workspace = true } +fil_actor_adm = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +recall_sol_facade = { workspace = true, features = ["machine"] } +serde = { workspace = true, features = ["derive"] } + +recall_actor_sdk = { path = "../../../recall/actor_sdk" } + +[dev-dependencies] +fil_actors_runtime = { workspace = true, features = ["test_utils"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/machine/src/lib.rs b/fendermint/actors/machine/src/lib.rs new file mode 100644 index 0000000000..d4c6a1367d --- /dev/null +++ b/fendermint/actors/machine/src/lib.rs @@ -0,0 +1,167 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +pub use fil_actor_adm::Kind; +use fil_actors_runtime::{ + actor_error, runtime::Runtime, ActorError, FIRST_EXPORTED_METHOD_NUMBER, INIT_ACTOR_ADDR, +}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{ipld_block::IpldBlock, tuple::*}; +pub use fvm_shared::METHOD_CONSTRUCTOR; +use fvm_shared::{address::Address, MethodNum}; +use recall_actor_sdk::constants::ADM_ACTOR_ADDR; +use recall_actor_sdk::{ + evm::emit_evm_event, + util::{to_delegated_address, to_id_address, to_id_and_delegated_address}, +}; +use serde::{de::DeserializeOwned, Serialize}; + +use crate::sol_facade::{MachineCreated, MachineInitialized}; + +pub mod sol_facade; + +/// Params for creating a machine. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + /// The machine owner ID address. + pub owner: Address, + /// User-defined metadata. + pub metadata: HashMap, +} + +/// Params for initializing a machine. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct InitParams { + /// The machine ID address. + pub address: Address, +} + +/// Machine initialization method number. +pub const INIT_METHOD: MethodNum = 2; +/// Get machine address method number. +pub const GET_ADDRESS_METHOD: MethodNum = frc42_dispatch::method_hash!("GetAddress"); +/// Get machine metadata method number. +pub const GET_METADATA_METHOD: MethodNum = frc42_dispatch::method_hash!("GetMetadata"); + +// TODO: Add method for changing owner from ADM actor. +pub trait MachineActor { + type State: MachineState + Serialize + DeserializeOwned; + + /// Machine actor constructor. + fn constructor(rt: &impl Runtime, params: ConstructorParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&INIT_ACTOR_ADDR))?; + + let (id_addr, delegated_addr) = to_id_and_delegated_address(rt, params.owner)?; + + let state = Self::State::new(rt.store(), id_addr, params.metadata)?; + rt.create(&state)?; + + emit_evm_event( + rt, + MachineCreated::new(state.kind(), delegated_addr, &state.metadata()), + ) + } + + /// Initializes the machine with its ID address. + fn init(rt: &impl Runtime, params: InitParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&ADM_ACTOR_ADDR))?; + + let id_addr = to_id_address(rt, params.address, false)?; + + let kind = rt.transaction(|st: &mut Self::State, _| { + st.init(id_addr)?; + Ok(st.kind()) + })?; + + emit_evm_event(rt, MachineInitialized::new(kind, id_addr)) + } + + /// Get machine robust address. + fn get_address(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + let st = rt.state::()?; + st.address().get() + } + + /// Get machine metadata. + fn get_metadata(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + let st = rt.state::()?; + let owner = st.owner(); + let address = to_delegated_address(rt, owner).unwrap_or(owner); + Ok(Metadata { + owner: address, + kind: st.kind(), + metadata: st.metadata(), + }) + } + + fn fallback( + rt: &impl Runtime, + method: MethodNum, + _: Option, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + if method >= FIRST_EXPORTED_METHOD_NUMBER { + Ok(None) + } else { + Err(actor_error!(unhandled_message; "invalid method: {}", method)) + } + } +} + +/// Machine metadata. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Metadata { + /// Machine kind. + pub kind: Kind, + /// Machine owner ID address. + pub owner: Address, + /// User-defined data. + pub metadata: HashMap, +} + +/// Trait that must be implemented by machine state. +pub trait MachineState { + fn new( + store: &BS, + owner: Address, + metadata: HashMap, + ) -> Result + where + Self: Sized; + fn init(&mut self, address: Address) -> Result<(), ActorError>; + fn address(&self) -> MachineAddress; + fn kind(&self) -> Kind; + fn owner(&self) -> Address; + fn metadata(&self) -> HashMap; +} + +/// Machine address wrapper. +#[derive(Debug, Clone, Default, Serialize_tuple, Deserialize_tuple)] +pub struct MachineAddress { + address: Option
, +} + +impl MachineAddress { + /// Get machine address. + pub fn get(&self) -> Result { + self.address.ok_or(ActorError::illegal_state(String::from( + "machine address not set", + ))) + } + + /// Set machine address. This can only be called once. + pub fn set(&mut self, address: Address) -> Result<(), ActorError> { + if self.address.is_some() { + return Err(ActorError::forbidden(String::from( + "machine address already set", + ))); + } + self.address = Some(address); + Ok(()) + } +} diff --git a/fendermint/actors/machine/src/sol_facade.rs b/fendermint/actors/machine/src/sol_facade.rs new file mode 100644 index 0000000000..59548ee677 --- /dev/null +++ b/fendermint/actors/machine/src/sol_facade.rs @@ -0,0 +1,60 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fil_actor_adm::Kind; +use fvm_shared::address::Address; +use recall_actor_sdk::evm::TryIntoEVMEvent; +use recall_sol_facade::{machine as sol, types::H160}; + +pub struct MachineCreated<'a> { + kind: Kind, + owner: Address, + metadata: &'a HashMap, +} +impl<'a> MachineCreated<'a> { + pub fn new(kind: Kind, owner: Address, metadata: &'a HashMap) -> Self { + Self { + kind, + owner, + metadata, + } + } +} +impl TryIntoEVMEvent for MachineCreated<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let owner: H160 = self.owner.try_into()?; + let metadata = fvm_ipld_encoding::to_vec(self.metadata)?; + Ok(sol::Events::MachineCreated(sol::MachineCreated { + kind: self.kind as u8, + owner: owner.into(), + metadata: metadata.into(), + })) + } +} + +pub struct MachineInitialized { + kind: Kind, + machine_address: Address, +} +impl MachineInitialized { + pub fn new(kind: Kind, machine_address: Address) -> Self { + Self { + kind, + machine_address, + } + } +} +impl TryIntoEVMEvent for MachineInitialized { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let machine_address: H160 = self.machine_address.try_into()?; + Ok(sol::Events::MachineInitialized(sol::MachineInitialized { + kind: self.kind as u8, + machineAddress: machine_address.into(), + })) + } +} diff --git a/fendermint/actors/recall_config/Cargo.toml b/fendermint/actors/recall_config/Cargo.toml new file mode 100644 index 0000000000..300e3e409a --- /dev/null +++ b/fendermint/actors/recall_config/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "fendermint_actor_recall_config" +description = "Singleton actor for updateable Recall network parameters" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +fendermint_actor_blobs_shared = { path = "../blobs/shared" } +fil_actors_runtime = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-traits = { workspace = true } +recall_sol_facade = { workspace = true, features = ["config"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_recall_config_shared = { path = "../recall_config/shared" } +recall_actor_sdk = { path = "../../../recall/actor_sdk" } + +[dev-dependencies] +fil_actors_evm_shared = { workspace = true } +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +hex-literal = { workspace = true } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/recall_config/shared/Cargo.toml b/fendermint/actors/recall_config/shared/Cargo.toml new file mode 100644 index 0000000000..cfc59c9c3b --- /dev/null +++ b/fendermint/actors/recall_config/shared/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "fendermint_actor_recall_config_shared" +description = "Shared resources for the recall config" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +fendermint_actor_blobs_shared = { path = "../../blobs/shared" } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +serde = { workspace = true, features = ["derive"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/recall_config/shared/src/lib.rs b/fendermint/actors/recall_config/shared/src/lib.rs new file mode 100644 index 0000000000..9df7997cc6 --- /dev/null +++ b/fendermint/actors/recall_config/shared/src/lib.rs @@ -0,0 +1,103 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::TokenCreditRate; +use fil_actors_runtime::{deserialize_block, extract_send_result, runtime::Runtime, ActorError}; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{ + address::Address, clock::ChainEpoch, econ::TokenAmount, sys::SendFlags, ActorID, MethodNum, + METHOD_CONSTRUCTOR, +}; +use num_derive::FromPrimitive; +use num_traits::Zero; +use serde::{Deserialize, Serialize}; + +pub const RECALL_CONFIG_ACTOR_ID: ActorID = 70; +pub const RECALL_CONFIG_ACTOR_ADDR: Address = Address::new_id(RECALL_CONFIG_ACTOR_ID); + +/// The updatable config. +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +pub struct RecallConfig { + /// The total storage capacity of the subnet. + pub blob_capacity: u64, + /// The token to credit rate. + pub token_credit_rate: TokenCreditRate, + /// Epoch interval at which to debit all credit accounts. + pub blob_credit_debit_interval: ChainEpoch, + /// The minimum epoch duration a blob can be stored. + pub blob_min_ttl: ChainEpoch, + /// The default epoch duration a blob is stored. + pub blob_default_ttl: ChainEpoch, + /// Maximum number of blobs to delete in a single batch during debit. + pub blob_delete_batch_size: u64, + /// Maximum number of accounts to process in a single batch during debit. + pub account_debit_batch_size: u64, +} + +impl Default for RecallConfig { + fn default() -> Self { + Self { + blob_capacity: 10 * 1024 * 1024 * 1024 * 1024, // 10 TiB + // 1 RECALL buys 1e18 credits ~ 1 RECALL buys 1e36 atto credits. + token_credit_rate: TokenCreditRate::from(10u128.pow(36)), + // This needs to be low enough to avoid out-of-gas errors. + // TODO: Stress test with max-throughput (~100 blobs/s) + blob_credit_debit_interval: ChainEpoch::from(60 * 10), // ~10 min + blob_min_ttl: ChainEpoch::from(60 * 60), // ~1 hour + blob_default_ttl: ChainEpoch::from(60 * 60 * 24), // ~1 day + blob_delete_batch_size: 100, + account_debit_batch_size: 1000, + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SetAdminParams(pub Address); + +pub type SetConfigParams = RecallConfig; + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + SetAdmin = frc42_dispatch::method_hash!("SetAdmin"), + GetAdmin = frc42_dispatch::method_hash!("GetAdmin"), + SetConfig = frc42_dispatch::method_hash!("SetConfig"), + GetConfig = frc42_dispatch::method_hash!("GetConfig"), +} + +pub fn get_admin(rt: &impl Runtime) -> Result, ActorError> { + deserialize_block(extract_send_result(rt.send( + &RECALL_CONFIG_ACTOR_ADDR, + Method::GetAdmin as MethodNum, + None, + TokenAmount::zero(), + None, + SendFlags::READ_ONLY, + ))?) +} + +/// Requires caller is the Recall Admin. +pub fn require_caller_is_admin(rt: &impl Runtime) -> Result<(), ActorError> { + let admin = get_admin(rt)?; + if admin.is_none() { + Err(ActorError::illegal_state( + "admin address not set".to_string(), + )) + } else { + Ok(rt.validate_immediate_caller_is(std::iter::once(&admin.unwrap()))?) + } +} + +pub fn get_config(rt: &impl Runtime) -> Result { + deserialize_block(extract_send_result(rt.send( + &RECALL_CONFIG_ACTOR_ADDR, + Method::GetConfig as MethodNum, + None, + TokenAmount::zero(), + None, + SendFlags::READ_ONLY, + ))?) +} diff --git a/fendermint/actors/recall_config/src/lib.rs b/fendermint/actors/recall_config/src/lib.rs new file mode 100644 index 0000000000..cf98acbd8a --- /dev/null +++ b/fendermint/actors/recall_config/src/lib.rs @@ -0,0 +1,618 @@ +// Copyright 2024 Textile +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::TokenCreditRate; +use fendermint_actor_recall_config_shared::{ + Method, RecallConfig, SetAdminParams, SetConfigParams, +}; +use fil_actors_runtime::{ + actor_dispatch, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, bigint::BigUint, clock::ChainEpoch}; +use num_traits::Zero; +use recall_actor_sdk::{ + evm::emit_evm_event, + util::{to_delegated_address, to_id_and_delegated_address}, +}; + +use crate::sol_facade::{ConfigAdminSet, ConfigSet}; + +mod sol_facade; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(Actor); + +pub const ACTOR_NAME: &str = "recall_config"; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +pub struct State { + /// The admin address that is allowed to update the config. + pub admin: Option
, + /// The Recall network configuration. + pub config: RecallConfig, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +pub struct ConstructorParams { + initial_blob_capacity: u64, + initial_token_credit_rate: TokenCreditRate, + initial_blob_credit_debit_interval: ChainEpoch, + initial_blob_min_ttl: ChainEpoch, + initial_blob_default_ttl: ChainEpoch, + initial_blob_delete_batch_size: u64, + initial_account_debit_batch_size: u64, +} + +pub struct Actor {} + +impl Actor { + /// Creates the actor + pub fn constructor(rt: &impl Runtime, params: ConstructorParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + let st = State { + admin: None, + config: RecallConfig { + blob_capacity: params.initial_blob_capacity, + token_credit_rate: params.initial_token_credit_rate, + blob_credit_debit_interval: params.initial_blob_credit_debit_interval, + blob_min_ttl: params.initial_blob_min_ttl, + blob_default_ttl: params.initial_blob_default_ttl, + blob_delete_batch_size: params.initial_blob_delete_batch_size, + account_debit_batch_size: params.initial_account_debit_batch_size, + }, + }; + rt.create(&st) + } + + fn set_admin(rt: &impl Runtime, params: SetAdminParams) -> Result<(), ActorError> { + Self::ensure_update_allowed(rt)?; + + let (admin_id_addr, admin_delegated_addr) = to_id_and_delegated_address(rt, params.0)?; + + rt.transaction(|st: &mut State, _rt| { + st.admin = Some(admin_id_addr); + Ok(()) + })?; + + emit_evm_event(rt, ConfigAdminSet::new(admin_delegated_addr))?; + + Ok(()) + } + + fn get_admin(rt: &impl Runtime) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + match rt.state::().map(|s| s.admin)? { + Some(admin) => { + let admin = to_delegated_address(rt, admin)?; + Ok(Some(admin)) + } + None => Ok(None), + } + } + + fn set_config(rt: &impl Runtime, params: SetConfigParams) -> Result<(), ActorError> { + let admin_exists = Self::ensure_update_allowed(rt)?; + + if params.token_credit_rate.rate() <= &BigUint::zero() { + return Err(actor_error!( + illegal_argument, + "token credit rate must be positive" + )); + } + if params.blob_capacity == 0 { + return Err(actor_error!( + illegal_argument, + "blob capacity must be positive" + )); + } + if params.blob_credit_debit_interval <= 0 { + return Err(actor_error!( + illegal_argument, + "credit debit interval must be positive" + )); + } + if params.blob_min_ttl <= 0 { + return Err(actor_error!( + illegal_argument, + "minimum TTL must be positive" + )); + } + if params.blob_default_ttl <= 0 { + return Err(actor_error!( + illegal_argument, + "default TTL must be positive" + )); + } + if params.blob_default_ttl < params.blob_min_ttl { + return Err(actor_error!( + illegal_argument, + "default TTL must be greater than or equal to minimum TTL" + )); + } + if params.blob_delete_batch_size == 0 { + return Err(actor_error!( + illegal_argument, + "blob delete batch size must be positive" + )); + } + if params.account_debit_batch_size == 0 { + return Err(actor_error!( + illegal_argument, + "account debit batch size must be positive" + )); + } + + let (admin_id_addr, admin_delegated_addr) = if !admin_exists { + // The first caller becomes admin + let addrs = to_id_and_delegated_address(rt, rt.message().caller())?; + (Some(addrs.0), Some(addrs.1)) + } else { + (None, None) + }; + + rt.transaction(|st: &mut State, _rt| { + if let Some(admin) = admin_id_addr { + st.admin = Some(admin); + } + st.config = params.clone(); + Ok(()) + })?; + + if let Some(admin) = admin_delegated_addr { + emit_evm_event(rt, ConfigAdminSet::new(admin))?; + } + emit_evm_event( + rt, + ConfigSet { + blob_capacity: params.blob_capacity, + token_credit_rate: params.token_credit_rate, + blob_credit_debit_interval: params.blob_credit_debit_interval, + blob_min_ttl: params.blob_min_ttl, + blob_default_ttl: params.blob_default_ttl, + blob_delete_batch_size: params.blob_delete_batch_size, + account_debit_batch_size: params.account_debit_batch_size, + }, + )?; + + Ok(()) + } + + fn get_config(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + rt.state::().map(|s| s.config) + } + + /// Ensures that immediate caller is allowed to update the config. + /// Returns whether the admin exists. + fn ensure_update_allowed(rt: &impl Runtime) -> Result { + let st = rt.state::()?; + let admin_exists = if let Some(admin) = st.admin { + if let Some(admin_id) = rt.resolve_address(&admin) { + rt.validate_immediate_caller_is(std::iter::once(&Address::new_id(admin_id)))? + } else { + // This should not happen. + return Err(ActorError::forbidden(String::from( + "failed to resolve config admin id", + ))); + } + true + } else { + // The first caller becomes the admin + rt.validate_immediate_caller_accept_any()?; + false + }; + Ok(admin_exists) + } +} + +impl ActorCode for Actor { + type Methods = Method; + + fn name() -> &'static str { + ACTOR_NAME + } + + actor_dispatch! { + Constructor => constructor, + SetAdmin => set_admin, + GetAdmin => get_admin, + SetConfig => set_config, + GetConfig => get_config, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use fendermint_actor_recall_config_shared::{RecallConfig, RECALL_CONFIG_ACTOR_ID}; + use fil_actors_evm_shared::address::EthAddress; + use fil_actors_runtime::test_utils::{ + expect_empty, MockRuntime, ETHACCOUNT_ACTOR_CODE_ID, SYSTEM_ACTOR_CODE_ID, + }; + use fvm_ipld_encoding::ipld_block::IpldBlock; + use fvm_shared::error::ExitCode; + use recall_actor_sdk::evm::to_actor_event; + + pub fn construct_and_verify( + blob_capacity: u64, + token_credit_rate: TokenCreditRate, + blob_credit_debit_interval: i32, + initial_blob_min_ttl: ChainEpoch, + initial_blob_default_ttl: ChainEpoch, + ) -> MockRuntime { + let rt = MockRuntime { + receiver: Address::new_id(RECALL_CONFIG_ACTOR_ID), + ..Default::default() + }; + + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + + let result = rt + .call::( + Method::Constructor as u64, + IpldBlock::serialize_cbor(&ConstructorParams { + initial_blob_capacity: blob_capacity, + initial_token_credit_rate: token_credit_rate, + initial_blob_credit_debit_interval: ChainEpoch::from( + blob_credit_debit_interval, + ), + initial_blob_min_ttl, + initial_blob_default_ttl, + initial_blob_delete_batch_size: 100, + initial_account_debit_batch_size: 100, + }) + .unwrap(), + ) + .unwrap(); + expect_empty(result); + rt.verify(); + rt.reset(); + + rt + } + + #[test] + fn test_get_initial_admin() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + rt.expect_validate_caller_any(); + let admin = rt + .call::(Method::GetAdmin as u64, None) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + rt.verify(); + + assert!(admin.is_none()); + } + + #[test] + fn test_set_admin() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.expect_validate_caller_any(); + let event = to_actor_event(ConfigAdminSet::new(f4_eth_addr)).unwrap(); + rt.expect_emitted_event(event); + let result = rt.call::( + Method::SetAdmin as u64, + IpldBlock::serialize_cbor(&SetAdminParams(f4_eth_addr)).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + rt.expect_validate_caller_any(); + let admin = rt + .call::(Method::GetAdmin as u64, None) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + rt.verify(); + + assert_eq!(admin, Some(f4_eth_addr)); + + // Reset admin + let new_id_addr = Address::new_id(111); + let new_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000001" + )); + let new_f4_eth_addr = Address::new_delegated(10, &new_eth_addr.0).unwrap(); + rt.set_delegated_address(new_id_addr.id().unwrap(), new_f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); // current admin + rt.expect_validate_caller_addr(vec![id_addr]); + let event = to_actor_event(ConfigAdminSet::new(new_f4_eth_addr)).unwrap(); + rt.expect_emitted_event(event); + let result = rt.call::( + Method::SetAdmin as u64, + IpldBlock::serialize_cbor(&SetAdminParams(new_f4_eth_addr)).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + rt.expect_validate_caller_any(); + let admin = rt + .call::(Method::GetAdmin as u64, None) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + rt.verify(); + + assert_eq!(admin, Some(new_f4_eth_addr)); + } + + #[test] + fn test_set_admin_unauthorized() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.expect_validate_caller_any(); + let event = to_actor_event(ConfigAdminSet::new(f4_eth_addr)).unwrap(); + rt.expect_emitted_event(event); + let result = rt.call::( + Method::SetAdmin as u64, + IpldBlock::serialize_cbor(&SetAdminParams(f4_eth_addr)).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Try to set again with a different caller + let unauthorized_id_addr = Address::new_id(111); + let unauthorized_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000001" + )); + let unauthorized_f4_eth_addr = + Address::new_delegated(10, &unauthorized_eth_addr.0).unwrap(); + rt.set_delegated_address(unauthorized_id_addr.id().unwrap(), unauthorized_f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, unauthorized_id_addr); // unauthorized caller + rt.expect_validate_caller_addr(vec![id_addr]); // expect current admin + let result = rt.call::( + Method::SetAdmin as u64, + IpldBlock::serialize_cbor(&SetAdminParams(unauthorized_f4_eth_addr)).unwrap(), + ); + rt.verify(); + + assert!(result.is_err()); + assert_eq!(result.unwrap_err().exit_code(), ExitCode::USR_FORBIDDEN); + } + + #[test] + fn test_set_config() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.expect_validate_caller_any(); + + let admin_event = to_actor_event(ConfigAdminSet::new(f4_eth_addr)).unwrap(); + rt.expect_emitted_event(admin_event); + + let config = RecallConfig { + blob_capacity: 2048, + token_credit_rate: TokenCreditRate::from(10usize), + blob_credit_debit_interval: ChainEpoch::from(1800), + blob_min_ttl: ChainEpoch::from(2 * 60 * 60), + blob_default_ttl: ChainEpoch::from(24 * 60 * 60), + blob_delete_batch_size: 100, + account_debit_batch_size: 100, + }; + let config_event = to_actor_event(ConfigSet { + blob_capacity: config.blob_capacity, + token_credit_rate: config.token_credit_rate.clone(), + blob_credit_debit_interval: config.blob_credit_debit_interval, + blob_min_ttl: config.blob_min_ttl, + blob_default_ttl: config.blob_default_ttl, + blob_delete_batch_size: config.blob_delete_batch_size, + account_debit_batch_size: config.account_debit_batch_size, + }) + .unwrap(); + rt.expect_emitted_event(config_event); + + let result = rt.call::( + Method::SetConfig as u64, + IpldBlock::serialize_cbor(&config).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + rt.expect_validate_caller_any(); + let recall_config = rt + .call::(Method::GetConfig as u64, None) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + rt.verify(); + + assert_eq!(recall_config.blob_capacity, 2048); + assert_eq!( + recall_config.token_credit_rate, + TokenCreditRate::from(10usize) + ); + assert_eq!(recall_config.blob_credit_debit_interval, 1800); + assert_eq!(recall_config.blob_min_ttl, ChainEpoch::from(2 * 60 * 60)); + assert_eq!( + recall_config.blob_default_ttl, + ChainEpoch::from(24 * 60 * 60) + ); + + rt.expect_validate_caller_any(); + let admin = rt + .call::(Method::GetAdmin as u64, None) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + rt.verify(); + + assert_eq!(admin, Some(f4_eth_addr)); + } + + #[test] + fn test_set_invalid_config() { + struct TestCase { + name: &'static str, + config: RecallConfig, + } + + let valid_config = RecallConfig { + blob_capacity: 2048, + token_credit_rate: TokenCreditRate::from(10usize), + blob_credit_debit_interval: ChainEpoch::from(1800), + blob_min_ttl: ChainEpoch::from(2 * 60 * 60), + blob_default_ttl: ChainEpoch::from(24 * 60 * 60), + blob_delete_batch_size: 100, + account_debit_batch_size: 100, + }; + + let test_cases = vec![ + // Token credit rate validation + TestCase { + name: "token credit rate cannot be zero", + config: RecallConfig { + token_credit_rate: TokenCreditRate::from(0usize), + ..valid_config.clone() + }, + }, + // Blob capacity validation + TestCase { + name: "blob capacity cannot be zero", + config: RecallConfig { + blob_capacity: 0, + ..valid_config.clone() + }, + }, + // Credit debit interval validation + TestCase { + name: "blob credit debit interval cannot be zero", + config: RecallConfig { + blob_credit_debit_interval: 0, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob credit debit interval cannot be negative", + config: RecallConfig { + blob_credit_debit_interval: -1, + ..valid_config.clone() + }, + }, + // TTL validations + TestCase { + name: "blob min ttl cannot be negative", + config: RecallConfig { + blob_min_ttl: -1, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob min ttl cannot be zero", + config: RecallConfig { + blob_min_ttl: 0, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob default ttl must be greater than or equal to min ttl", + config: RecallConfig { + blob_min_ttl: 4 * 60 * 60, + blob_default_ttl: 2 * 60 * 60, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob default ttl cannot be zero", + config: RecallConfig { + blob_default_ttl: 0, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob default ttl cannot be negative", + config: RecallConfig { + blob_default_ttl: -1, + ..valid_config.clone() + }, + }, + ]; + + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + + // Now test all invalid configurations + for test_case in test_cases { + rt.expect_validate_caller_any(); + let result = rt.call::( + Method::SetConfig as u64, + IpldBlock::serialize_cbor(&test_case.config).unwrap(), + ); + rt.verify(); + assert!( + result.is_err(), + "expected case \"{}\" to fail but it succeeded", + test_case.name + ); + } + } + + #[test] + fn test_get_config() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + rt.expect_validate_caller_any(); + let recall_config = rt + .call::(Method::GetConfig as u64, None) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + rt.verify(); + + assert_eq!(recall_config.blob_capacity, 1024); + assert_eq!( + recall_config.token_credit_rate, + TokenCreditRate::from(5usize) + ); + assert_eq!(recall_config.blob_credit_debit_interval, 3600); + assert_eq!(recall_config.blob_min_ttl, 3600); + assert_eq!(recall_config.blob_default_ttl, 3600); + } +} diff --git a/fendermint/actors/recall_config/src/sol_facade.rs b/fendermint/actors/recall_config/src/sol_facade.rs new file mode 100644 index 0000000000..447d6e0253 --- /dev/null +++ b/fendermint/actors/recall_config/src/sol_facade.rs @@ -0,0 +1,54 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::TokenCreditRate; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use recall_actor_sdk::evm::TryIntoEVMEvent; +use recall_sol_facade::{ + config as sol, + primitives::U256, + types::{BigUintWrapper, H160}, +}; + +pub struct ConfigAdminSet { + pub admin: Address, +} +impl ConfigAdminSet { + pub fn new(admin: Address) -> Self { + Self { admin } + } +} +impl TryIntoEVMEvent for ConfigAdminSet { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let admin: H160 = self.admin.try_into()?; + Ok(sol::Events::ConfigAdminSet(sol::ConfigAdminSet { + admin: admin.into(), + })) + } +} + +pub struct ConfigSet { + pub blob_capacity: u64, + pub token_credit_rate: TokenCreditRate, + pub blob_credit_debit_interval: ChainEpoch, + pub blob_min_ttl: ChainEpoch, + pub blob_default_ttl: ChainEpoch, + pub blob_delete_batch_size: u64, + pub account_debit_batch_size: u64, +} +impl TryIntoEVMEvent for ConfigSet { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::ConfigSet(sol::ConfigSet { + blobCapacity: U256::from(self.blob_capacity), + tokenCreditRate: BigUintWrapper(self.token_credit_rate.rate().clone()).into(), + blobCreditDebitInterval: U256::from(self.blob_credit_debit_interval), + blobMinTtl: U256::from(self.blob_min_ttl), + blobDefaultTtl: U256::from(self.blob_default_ttl), + blobDeleteBatchSize: U256::from(self.blob_delete_batch_size), + accountDebitBatchSize: U256::from(self.account_debit_batch_size), + })) + } +} diff --git a/fendermint/actors/timehub/Cargo.toml b/fendermint/actors/timehub/Cargo.toml new file mode 100644 index 0000000000..9e76083e4d --- /dev/null +++ b/fendermint/actors/timehub/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "fendermint_actor_timehub" +description = "Actor for timestamping data hashes" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true, default-features = false } +multihash-codetable = { workspace = true } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_amt = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +recall_sol_facade = { workspace = true, features = ["timehub"] } +serde = { workspace = true, features = ["derive"] } +tracing = { workspace = true, features = ["log"] } + +fendermint_actor_blobs_shared = { path = "../blobs/shared" } +fendermint_actor_machine = { path = "../machine" } +recall_actor_sdk = { path = "../../../recall/actor_sdk" } + +[dev-dependencies] +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +fil_actors_evm_shared = { workspace = true } +hex-literal = { workspace = true } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/timehub/src/actor.rs b/fendermint/actors/timehub/src/actor.rs new file mode 100644 index 0000000000..ae40f5c094 --- /dev/null +++ b/fendermint/actors/timehub/src/actor.rs @@ -0,0 +1,582 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fendermint_actor_blobs_shared::sdk::has_credit_approval; +use fendermint_actor_machine::MachineActor; +use fil_actors_runtime::{ + actor_dispatch_unrestricted, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, +}; +use recall_actor_sdk::evm::emit_evm_event; +use recall_actor_sdk::evm::{InputData, InvokeContractParams, InvokeContractReturn}; +use recall_sol_facade::timehub::Calls; +use tracing::debug; + +use crate::sol_facade::{AbiCall, EventPushed}; +use crate::{sol_facade, Leaf, Method, PushParams, PushReturn, State, TIMEHUB_ACTOR_NAME}; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(TimehubActor); + +pub struct TimehubActor; + +// Raw type persisted in the store. +// This avoids using CID so that the store does not try to validate or resolve it. +type RawLeaf = (u64, Vec); + +impl TimehubActor { + fn push(rt: &impl Runtime, params: PushParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + // Check access control. + // Either the caller needs to be the Timehub owner, or the owner needs to have given a + // credit approval to the caller. + let state = rt.state::()?; + let owner = state.owner; + let from = rt.message().caller(); + + let actor_address = state.address.get()?; + if !has_credit_approval(rt, owner, from)? { + return Err(actor_error!( + forbidden; + format!("Unauthorized: missing credit approval from Timehub owner {} to {} for Timehub {}", owner, from, actor_address))); + } + + // Decode the raw bytes as a Cid and report any errors. + // However, we pass opaque bytes to the store as it tries to validate and resolve any CID + // it stores. + let cid = Cid::try_from(params.0.as_slice()).map_err(|_err| { + actor_error!(illegal_argument; + "data must be valid CID bytes") + })?; + let timestamp = rt.tipset_timestamp(); + let data: RawLeaf = (timestamp, params.0); + + let ret = rt.transaction(|st: &mut State, rt| st.push(rt.store(), data))?; + + emit_evm_event(rt, EventPushed::new(ret.index, timestamp, cid))?; + + Ok(ret) + } + + fn get_leaf_at(rt: &impl Runtime, index: u64) -> Result, ActorError> { + debug!(index, "get_leaf_at"); + rt.validate_immediate_caller_accept_any()?; + let st: State = rt.state()?; + // Decode leaf as timestamp and raw bytes. Then decode as a CID + let leaf: Option = st.get_leaf_at(rt.store(), index)?; + leaf.map(|(timestamp, bytes)| -> Result { + Ok(Leaf { + timestamp, + witnessed: Cid::try_from(bytes).map_err( + |_err| actor_error!(illegal_argument; "internal bytes are not a valid CID"), + )?, + }) + }) + .transpose() + } + + fn get_root(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + let st: State = rt.state()?; + st.get_root(rt.store()) + } + + fn get_peaks(rt: &impl Runtime) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + let st: State = rt.state()?; + st.get_peaks(rt.store()) + } + + fn get_count(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + let st: State = rt.state()?; + Ok(st.leaf_count) + } + + fn invoke_contract( + rt: &impl Runtime, + params: InvokeContractParams, + ) -> Result { + let input_data: InputData = params.try_into()?; + if sol_facade::can_handle(&input_data) { + let output_data: Vec = match sol_facade::parse_input(&input_data)? { + Calls::getCount(call) => { + let count = Self::get_count(rt)?; + call.returns(count) + } + Calls::getLeafAt(call) => { + let params = call.params(); + let push_return = Self::get_leaf_at(rt, params)?; + call.returns(push_return) + } + Calls::getPeaks(call) => { + let peaks = Self::get_peaks(rt)?; + call.returns(peaks) + } + Calls::getRoot(call) => { + let root = Self::get_root(rt)?; + call.returns(root) + } + Calls::push(call) => { + let params = call.params(); + let push_return = Self::push(rt, params)?; + call.returns(push_return) + } + }; + Ok(InvokeContractReturn { output_data }) + } else { + Err(actor_error!(illegal_argument, "invalid call".to_string())) + } + } +} + +impl MachineActor for TimehubActor { + type State = State; +} + +impl ActorCode for TimehubActor { + type Methods = Method; + + fn name() -> &'static str { + TIMEHUB_ACTOR_NAME + } + + actor_dispatch_unrestricted! { + Constructor => constructor, + Init => init, + GetAddress => get_address, + GetMetadata => get_metadata, + Push => push, + Get => get_leaf_at, + Root => get_root, + Peaks => get_peaks, + Count => get_count, + // EVM interop + InvokeContract => invoke_contract, + _ => fallback, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::sol_facade::EventPushed; + + use std::collections::HashMap; + use std::str::FromStr; + + use fendermint_actor_blobs_shared::credit::{CreditApproval, GetCreditApprovalParams}; + use fendermint_actor_blobs_shared::method::Method as BlobMethod; + use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; + + use fendermint_actor_machine::sol_facade::{MachineCreated, MachineInitialized}; + use fendermint_actor_machine::{ConstructorParams, InitParams, Kind}; + use fil_actors_evm_shared::address::EthAddress; + use fil_actors_runtime::{ + test_utils::{ + expect_empty, MockRuntime, ADM_ACTOR_CODE_ID, ETHACCOUNT_ACTOR_CODE_ID, + INIT_ACTOR_CODE_ID, + }, + ADM_ACTOR_ADDR, INIT_ACTOR_ADDR, + }; + use fvm_ipld_encoding::ipld_block::IpldBlock; + use fvm_shared::{ + address::Address, clock::ChainEpoch, econ::TokenAmount, error::ExitCode, sys::SendFlags, + MethodNum, + }; + use recall_actor_sdk::evm::to_actor_event; + + pub fn construct_runtime(actor_address: Address, owner_id_addr: Address) -> MockRuntime { + let owner_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let owner_delegated_addr = Address::new_delegated(10, &owner_eth_addr.0).unwrap(); + + let rt = MockRuntime { + receiver: actor_address, + ..Default::default() + }; + rt.set_delegated_address(owner_id_addr.id().unwrap(), owner_delegated_addr); + + rt.set_caller(*INIT_ACTOR_CODE_ID, INIT_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![INIT_ACTOR_ADDR]); + let metadata = HashMap::new(); + let event = to_actor_event(MachineCreated::new( + Kind::Timehub, + owner_delegated_addr, + &metadata, + )) + .unwrap(); + rt.expect_emitted_event(event); + let result = rt + .call::( + Method::Constructor as u64, + IpldBlock::serialize_cbor(&ConstructorParams { + owner: owner_id_addr, + metadata, + }) + .unwrap(), + ) + .unwrap(); + expect_empty(result); + rt.verify(); + + rt.set_caller(*ADM_ACTOR_CODE_ID, ADM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![ADM_ACTOR_ADDR]); + let event = to_actor_event(MachineInitialized::new(Kind::Timehub, actor_address)).unwrap(); + rt.expect_emitted_event(event); + let actor_init = rt + .call::( + Method::Init as u64, + IpldBlock::serialize_cbor(&InitParams { + address: actor_address, + }) + .unwrap(), + ) + .unwrap(); + expect_empty(actor_init); + rt.verify(); + + rt.reset(); + rt + } + + fn get_count(rt: &MockRuntime) -> u64 { + rt.expect_validate_caller_any(); + rt.call::(Method::Count as u64, None) + .unwrap() + .unwrap() + .deserialize::() + .unwrap() + } + + fn get_root(rt: &MockRuntime) -> Cid { + rt.expect_validate_caller_any(); + rt.call::(Method::Root as u64, None) + .unwrap() + .unwrap() + .deserialize::() + .unwrap() + } + + fn get_leaf(rt: &MockRuntime, index: u64) -> Leaf { + rt.expect_validate_caller_any(); + rt.call::( + Method::Get as u64, + IpldBlock::serialize_cbor(&index).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap() + .unwrap() + } + + fn push_cid(rt: &mut MockRuntime, cid: Cid, timestamp: u64, expected_index: u64) -> PushReturn { + rt.expect_validate_caller_any(); + rt.tipset_timestamp = timestamp; + let push_params = PushParams(cid.to_bytes()); + let event = to_actor_event(EventPushed::new(expected_index, timestamp, cid)).unwrap(); + rt.expect_emitted_event(event); + rt.call::( + Method::Push as u64, + IpldBlock::serialize_cbor(&push_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap() + } + + #[test] + pub fn test_basic_crud() { + let owner = Address::new_id(110); + let actor_address = Address::new_id(111); + + let mut rt = construct_runtime(actor_address, owner); + + // Push calls comes from Timehub owner + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, owner); + rt.set_origin(owner); + + // Check the initial count + let count = get_count(&rt); + assert_eq!(count, 0); + + // Check the initial root + let root = get_root(&rt); + assert_eq!(root, Cid::from_str("baeaaaaa").unwrap()); + + // Push one CID + let t0 = 1738787063; + let cid0 = Cid::from_str("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi") + .unwrap(); + let result0 = push_cid(&mut rt, cid0, t0, 0); + + assert_eq!(0, result0.index); + let expected_root0 = + Cid::from_str("bafy2bzacebva5uaq4ayn6ax7zzywcqapf3w4q3oamez6sukidiqiz3m4c6osu") + .unwrap(); + assert_eq!(result0.root, expected_root0); + + // Read the value pushed + let leaf = get_leaf(&rt, 0); + assert_eq!(leaf.witnessed, cid0); + assert_eq!(leaf.timestamp, t0); + + // Check the root + let root = get_root(&rt); + assert_eq!(root, expected_root0); + + // Check the count + let count = get_count(&rt); + assert_eq!(count, 1); + + // Push a second CID + let t1 = t0 + 1; + let cid1 = + Cid::from_str("baeabeidtz333ke5c4ultzeg6jkyzgdmvduytt2so3ahozm4zqstiuwq33e").unwrap(); + let result1 = push_cid(&mut rt, cid1, t1, 1); + + assert_eq!(1, result1.index); + let expected_root1 = + Cid::from_str("bafy2bzaceb6nrirwdm2ebk5ygl4nhwqjaegpbhavjg2obkshcgoogy4kbovds") + .unwrap(); + assert_eq!(result1.root, expected_root1); + + // Read the first value pushed + let leaf0 = get_leaf(&rt, 0); + assert_eq!(leaf0.witnessed, cid0); + assert_eq!(leaf0.timestamp, t0); + + // Read the second value pushed + let leaf1 = get_leaf(&rt, 1); + assert_eq!(leaf1.witnessed, cid1); + assert_eq!(leaf1.timestamp, t1); + + // Check the root + let root = get_root(&rt); + assert_eq!(root, expected_root1); + + // Check the count + let count = get_count(&rt); + assert_eq!(count, 2); + + rt.verify(); + } + + #[test] + pub fn test_push_access_control_with_no_approval() { + let owner = Address::new_id(110); + let actor_address = Address::new_id(111); + let origin = Address::new_id(112); + + let rt = construct_runtime(actor_address, owner); + + // Push calls comes from the origin Address, which is *not* the Timehub owner. + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, origin); + rt.set_origin(origin); + + // Set up that the account doing the push does not have a credit approval from the Timehub owner + let missing_approval: Option = None; + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(&GetCreditApprovalParams { + from: owner, + to: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor(&missing_approval).unwrap(), + ExitCode::OK, + None, + ); + + // Attempt to push a CID, should fail with access control error. + let cid = Cid::from_str("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi") + .unwrap(); + let push_params = PushParams(cid.to_bytes()); + rt.expect_validate_caller_any(); + + let err = rt + .call::( + Method::Push as u64, + IpldBlock::serialize_cbor(&push_params).unwrap(), + ) + .expect_err("Push succeeded despite not having a valid credit approval"); + assert_eq!(err.exit_code(), ExitCode::USR_FORBIDDEN); + + rt.verify(); + } + + #[test] + pub fn test_push_access_control_with_valid_approval_no_expiry() { + let owner = Address::new_id(110); + let actor_address = Address::new_id(111); + let origin = Address::new_id(112); + + let mut rt = construct_runtime(actor_address, owner); + + // Push calls comes from the origin Address, which is *not* the Timehub owner. + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, origin); + rt.set_origin(origin); + + // Set up valid credit approval from the Timehub owner to the address that will perform the push + let approval = CreditApproval { + credit_limit: None, + gas_allowance_limit: None, + expiry: None, + credit_used: Default::default(), + gas_allowance_used: Default::default(), + }; + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(&GetCreditApprovalParams { + from: owner, + to: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor(&approval).unwrap(), + ExitCode::OK, + None, + ); + + // Push a CID + let tipset_timestamp = 1738787063; + let cid = Cid::from_str("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi") + .unwrap(); + let result = push_cid(&mut rt, cid, tipset_timestamp, 0); + + assert_eq!(0, result.index); + let expected_root0 = + Cid::from_str("bafy2bzacebva5uaq4ayn6ax7zzywcqapf3w4q3oamez6sukidiqiz3m4c6osu") + .unwrap(); + assert_eq!(result.root, expected_root0); + + rt.verify(); + } + + #[test] + pub fn test_push_access_control_with_valid_approval_future_expiry() { + let owner = Address::new_id(110); + let actor_address = Address::new_id(111); + let origin = Address::new_id(112); + + let mut rt = construct_runtime(actor_address, owner); + + // Push calls comes from the origin Address, which is *not* the Timehub owner. + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, origin); + rt.set_origin(origin); + + // Set up valid credit approval from the Timehub owner to the address that will perform the push + let epoch0: ChainEpoch = 100; + let epoch1 = epoch0 + 1; + rt.set_epoch(epoch0); + + let approval = CreditApproval { + credit_limit: None, + gas_allowance_limit: None, + expiry: Some(epoch1), + credit_used: Default::default(), + gas_allowance_used: Default::default(), + }; + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(&GetCreditApprovalParams { + from: owner, + to: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor(&approval).unwrap(), + ExitCode::OK, + None, + ); + + // Push a CID + let tipset_timestamp = 1738787063; + let cid = Cid::from_str("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi") + .unwrap(); + + let result = push_cid(&mut rt, cid, tipset_timestamp, 0); + assert_eq!(0, result.index); + let expected_root0 = + Cid::from_str("bafy2bzacebva5uaq4ayn6ax7zzywcqapf3w4q3oamez6sukidiqiz3m4c6osu") + .unwrap(); + assert_eq!(result.root, expected_root0); + + rt.verify(); + } + + #[test] + pub fn test_push_access_control_with_expired_approval() { + let owner = Address::new_id(110); + let actor_address = Address::new_id(111); + let origin = Address::new_id(112); + + let rt = construct_runtime(actor_address, owner); + + // Push calls comes from the origin Address, which is *not* the Timehub owner. + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, origin); + rt.set_origin(origin); + + // Set up that the account doing the push does have a credit approval from the Timehub owner, + // but it is expired + let epoch0: ChainEpoch = 100; + let epoch1 = epoch0 + 1; + rt.set_epoch(epoch1); + + let expired_approval = CreditApproval { + credit_limit: None, + gas_allowance_limit: None, + expiry: Some(epoch0), + credit_used: Default::default(), + gas_allowance_used: Default::default(), + }; + rt.expect_send( + BLOBS_ACTOR_ADDR, + BlobMethod::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(&GetCreditApprovalParams { + from: owner, + to: origin, + }) + .unwrap(), + TokenAmount::from_whole(0), + None, + SendFlags::READ_ONLY, + IpldBlock::serialize_cbor(&expired_approval).unwrap(), + ExitCode::OK, + None, + ); + + // Attempt to push a CID, should fail with access control error. + let cid = Cid::from_str("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi") + .unwrap(); + let push_params = PushParams(cid.to_bytes()); + rt.expect_validate_caller_any(); + + let err = rt + .call::( + Method::Push as u64, + IpldBlock::serialize_cbor(&push_params).unwrap(), + ) + .expect_err("Push succeeded despite not having a valid credit approval"); + assert_eq!(err.exit_code(), ExitCode::USR_FORBIDDEN); + + rt.verify(); + } +} diff --git a/fendermint/actors/timehub/src/lib.rs b/fendermint/actors/timehub/src/lib.rs new file mode 100644 index 0000000000..8bf738f1dd --- /dev/null +++ b/fendermint/actors/timehub/src/lib.rs @@ -0,0 +1,9 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod actor; +mod shared; +mod sol_facade; + +pub use shared::*; diff --git a/fendermint/actors/timehub/src/shared.rs b/fendermint/actors/timehub/src/shared.rs new file mode 100644 index 0000000000..c9b30eeadd --- /dev/null +++ b/fendermint/actors/timehub/src/shared.rs @@ -0,0 +1,528 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use cid::Cid; +use fendermint_actor_machine::{ + Kind, MachineAddress, MachineState, GET_ADDRESS_METHOD, GET_METADATA_METHOD, INIT_METHOD, + METHOD_CONSTRUCTOR, +}; +use fil_actors_runtime::ActorError; +use fvm_ipld_amt::Amt; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{strict_bytes, to_vec, tuple::*, CborStore, DAG_CBOR}; +use fvm_shared::address::Address; +use multihash_codetable::{Code, MultihashDigest}; +use num_derive::FromPrimitive; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +pub const TIMEHUB_ACTOR_NAME: &str = "timehub"; +const BIT_WIDTH: u32 = 3; + +fn state_error(e: fvm_ipld_amt::Error) -> ActorError { + ActorError::illegal_state(e.to_string()) +} + +fn store_error(e: anyhow::Error) -> ActorError { + ActorError::illegal_state(e.to_string()) +} + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Init = INIT_METHOD, + GetAddress = GET_ADDRESS_METHOD, + GetMetadata = GET_METADATA_METHOD, + Push = frc42_dispatch::method_hash!("Push"), + Get = frc42_dispatch::method_hash!("Get"), + Root = frc42_dispatch::method_hash!("Root"), + Peaks = frc42_dispatch::method_hash!("Peaks"), + Count = frc42_dispatch::method_hash!("Count"), + // EVM Interop + InvokeContract = frc42_dispatch::method_hash!("InvokeEVM"), +} + +/// Bytes of a CID to add. +#[derive(Serialize, Deserialize)] +#[serde(transparent)] +pub struct PushParams(#[serde(with = "strict_bytes")] pub Vec); + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct PushReturn { + /// The new root of the timehub MMR after the object was pushed into it. + pub root: Cid, + /// The index of the object that was just pushed into the timehub. + pub index: u64, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Leaf { + /// Timestamp of the witness in seconds since the UNIX epoch + pub timestamp: u64, + /// Witnessed CID + pub witnessed: Cid, +} + +/// Compute the hash of a pair of CIDs. +/// The hash is the CID of a new block containing the concatenation of the two CIDs. +/// We do not include the index of the element(s) because incoming data should already be "nonced". +fn hash_pair(left: Option<&Cid>, right: Option<&Cid>) -> anyhow::Result { + if let (Some(left), Some(right)) = (left, right) { + // Encode the CIDs into a binary format + let data = to_vec(&[left, right])?; + // Compute the CID for the block + let mh_code = Code::Blake2b256; + let mh = mh_code.digest(&data); + let cid = Cid::new_v1(DAG_CBOR, mh); + Ok(cid) + } else { + Err(ActorError::illegal_argument( + "hash_pair requires two CIDs".into(), + )) + } +} + +/// Compute and store the hash of a pair of CIDs. +/// The hash is the CID of a new block containing the concatenation of the two CIDs. +/// We do not include the index of the element(s) because incoming data should already be "nonced". +fn hash_and_put_pair( + store: &BS, + left: Option<&Cid>, + right: Option<&Cid>, +) -> anyhow::Result { + if let (Some(left), Some(right)) = (left, right) { + // Compute the CID for the block + store + .put_cbor(&[left, right], Code::Blake2b256) + .map_err(store_error) + } else { + Err(ActorError::illegal_argument( + "hash_pair requires two CIDs".into(), + )) + } +} + +/// Return the new peaks of the timehub after adding `new_leaf`. +fn push( + store: &BS, + leaf_count: u64, + peaks: &mut Amt, + obj: S, +) -> anyhow::Result { + // Create new leaf + let leaf = store + .put_cbor(&obj, Code::Blake2b256) + .map_err(store_error)?; + // Push the new leaf onto the peaks + peaks.set(peaks.count(), leaf).map_err(state_error)?; + // Count trailing ones in binary representation of the previous leaf_count + // This works because adding a leaf fills the next available spot, + // and the binary representation of this index will have trailing ones + // where merges are required. + let mut new_peaks = (!leaf_count).trailing_zeros(); + while new_peaks > 0 { + // Pop the last two peaks and push their hash + let right = peaks.delete(peaks.count() - 1).map_err(state_error)?; + let left = peaks.delete(peaks.count() - 1).map_err(state_error)?; + // Push the new peak onto the peak array + peaks + .set( + peaks.count(), + hash_and_put_pair(store, left.as_ref(), right.as_ref())?, + ) + .map_err(state_error)?; + new_peaks -= 1; + } + peaks.flush().map_err(state_error) +} + +/// Collect the peaks and combine to compute the root commitment. +fn bag_peaks(peaks: &Amt) -> anyhow::Result { + let peaks_count = peaks.count(); + // Handle special cases where we have no peaks or only one peak + if peaks_count == 0 { + return Ok(Cid::default()); + } + // If there is only one leaf element, we simply "promote" that to the root peak + if peaks_count == 1 { + return Ok(peaks.get(0).map_err(state_error)?.unwrap().to_owned()); + } + // Walk backward through the peaks, combining them pairwise + let mut root = hash_pair( + peaks.get(peaks_count - 2).map_err(state_error)?, + peaks.get(peaks_count - 1).map_err(state_error)?, + )?; + for i in 2..peaks_count { + root = hash_pair( + peaks.get(peaks_count - 1 - i).map_err(state_error)?, + Some(&root), + )?; + } + Ok(root) +} + +/// Given the size of the MMR and an index into the MMR, returns a tuple where the first element +/// represents the path through the subtree that the leaf node lives in. +/// The second element represents the index of the peak containing the subtree that the leaf node +/// lives in. +fn path_for_eigen_root(leaf_index: u64, leaf_count: u64) -> anyhow::Result> { + // Ensure `leaf_index` is within bounds. + if leaf_index >= leaf_count { + return Ok(None); + } + // XOR turns matching bits into zeros and differing bits into ones, so to determine when + // the two "paths" converge, we simply look for the most significant 1 bit... + let diff = leaf_index ^ leaf_count; + // ...and then merge height of `leaf_index` and `leaf_count` occurs at ⌊log2(x ⊕ y)⌋ + let eigentree_height = u64::BITS - diff.leading_zeros() - 1; + let merge_height = 1 << eigentree_height; + // Compute a bitmask (all the lower bits set to 1) + let bitmask = merge_height - 1; + // The Hamming weight of leaf_count is the number of eigentrees in the structure. + let eigentree_count = leaf_count.count_ones(); + // Isolates the lower bits of leaf_count up to the merge_height, and count the one-bits. + // This is essentially the offset to the eigentree containing leaf_index + let offset = (leaf_count & bitmask).count_ones(); + // The index is simply the total eigentree count minus the offset (minus one) + let eigen_index = eigentree_count - offset - 1; + // Now that we have the offset, we need to determine the path within the local eigentree + let local_offset = leaf_index & bitmask; + // The local_index is the local_offset plus the merge_height for the local eigentree + let local_path = local_offset + merge_height; + Ok(Some((local_path, eigen_index as u64))) +} + +/// Returns None when the index doesn't point to a leaf. +/// If the index is valid, it will return a value or error. +fn get_at( + store: &BS, + leaf_index: u64, + leaf_count: u64, + peaks: &Amt, +) -> anyhow::Result> { + let (path, eigen_index) = match path_for_eigen_root(leaf_index, leaf_count)? { + None => return Ok(None), + Some(res) => res, + }; + let cid = match peaks.get(eigen_index)? { + Some(cid) => cid, + None => return Ok(None), + }; + // Special case where eigentree has a height of one + if path == 1 { + return Ok(Some(store.get_cbor::(cid)?.ok_or_else(|| { + anyhow::anyhow!("failed to get leaf for cid {}", cid) + })?)); + } + + let mut pair = match store.get_cbor::<[Cid; 2]>(cid)? { + Some(value) => value, + None => anyhow::bail!("failed to get eigentree root node for cid {}", cid), + }; + + let leading_zeros = path.leading_zeros(); + let significant_bits = 64 - leading_zeros; + + // Iterate over each bit from the most significant bit to the least + for i in 1..(significant_bits - 1) { + let bit = ((path >> (significant_bits - i - 1)) & 1) as usize; + let cid = &pair[bit]; + pair = store.get_cbor(cid)?.ok_or_else(|| { + anyhow::anyhow!("failed to get eigentree intermediate node for cid {}", cid) + })?; + } + + let bit = (path & 1) as usize; + let cid = &pair[bit]; + let leaf = store + .get_cbor::(cid)? + .ok_or_else(|| anyhow::anyhow!("failed to get leaf for cid {}", cid))?; + + Ok(Some(leaf)) +} + +/// The state represents an MMR with peaks stored in an AMT +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// The machine address set by the init actor. + pub address: MachineAddress, + /// The machine rubust owner address. + pub owner: Address, + /// Root of the AMT that is storing the peaks of the MMR + pub peaks: Cid, + /// Number of leaf nodes in the timehub MMR. + pub leaf_count: u64, + /// User-defined metadata. + pub metadata: HashMap, +} + +impl MachineState for State { + fn new( + store: &BS, + owner: Address, + metadata: HashMap, + ) -> anyhow::Result { + let peaks = match Amt::<(), _>::new_with_bit_width(store, BIT_WIDTH).flush() { + Ok(cid) => cid, + Err(e) => { + return Err(ActorError::illegal_state(format!( + "timehub actor failed to create empty Amt: {}", + e + ))); + } + }; + Ok(Self { + address: Default::default(), + owner, + peaks, + leaf_count: 0, + metadata, + }) + } + + fn init(&mut self, address: Address) -> anyhow::Result<(), ActorError> { + self.address.set(address) + } + + fn address(&self) -> MachineAddress { + self.address.clone() + } + + fn kind(&self) -> Kind { + Kind::Timehub + } + + fn owner(&self) -> Address { + self.owner + } + + fn metadata(&self) -> HashMap { + self.metadata.clone() + } +} + +impl State { + pub fn peak_count(&self) -> u32 { + self.leaf_count.count_ones() + } + + pub fn leaf_count(&self) -> u64 { + self.leaf_count + } + + pub fn push( + &mut self, + store: &BS, + obj: S, + ) -> anyhow::Result { + let mut amt = Amt::::load(&self.peaks, store).map_err(state_error)?; + self.peaks = push(store, self.leaf_count, &mut amt, obj)?; + self.leaf_count += 1; + + let root = bag_peaks(&amt)?; + Ok(PushReturn { + root, + index: self.leaf_count - 1, + }) + } + + pub fn get_root(&self, store: &BS) -> anyhow::Result { + let amt = Amt::::load(&self.peaks, store).map_err(state_error)?; + bag_peaks(&amt) + } + + pub fn get_peaks(&self, store: &BS) -> anyhow::Result, ActorError> { + let amt = Amt::::load(&self.peaks, store).map_err(state_error)?; + let mut peaks = Vec::new(); + amt.for_each(|_, cid| { + peaks.push(cid.to_owned()); + Ok(()) + }) + .map_err(state_error)?; + Ok(peaks) + } + + pub fn get_leaf_at( + &self, + store: &BS, + index: u64, + ) -> anyhow::Result, ActorError> { + let amt = Amt::::load(&self.peaks, store).map_err(state_error)?; + get_at::(store, index, self.leaf_count, &amt) + .map_err(|e| ActorError::serialization(e.to_string())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn test_constructor() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let state = State::new(&store, Address::new_id(100), HashMap::new()); + assert!(state.is_ok()); + let state = state.unwrap(); + assert_eq!( + state.peaks, + Cid::from_str("bafy2bzacedijw74yui7otvo63nfl3hdq2vdzuy7wx2tnptwed6zml4vvz7wee") + .unwrap() + ); + assert_eq!(state.leaf_count(), 0); + } + + #[test] + fn test_hash_and_put_pair() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let obj1 = vec![1, 2, 3]; + let obj2 = vec![1, 2, 3]; + let cid1 = state.push(&store, obj1).expect("push1 failed").root; + let cid2 = state.push(&store, obj2).expect("push2 failed").root; + + let pair_cid = + hash_and_put_pair(&store, Some(&cid1), Some(&cid2)).expect("hash_and_put_pair failed"); + let merkle_node = store + .get_cbor::<[Cid; 2]>(&pair_cid) + .expect("get_cbor failed") + .expect("get_cbor returned None"); + let expected = [cid1, cid2]; + assert_eq!(merkle_node, expected); + } + + #[test] + fn test_hash_pair() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let obj1 = vec![1, 2, 3]; + let obj2 = vec![1, 2, 3]; + let cid1 = state.push(&store, obj1).expect("push1 failed").root; + let cid2 = state.push(&store, obj2).expect("push2 failed").root; + + // Compare hash_pair and hash_and_put_pair and make sure they result in the same CID. + let hash1 = hash_pair(Some(&cid1), Some(&cid2)).expect("hash_pair failed"); + let hash2 = + hash_and_put_pair(&store, Some(&cid1), Some(&cid2)).expect("hash_and_put_pair failed"); + assert_eq!(hash1, hash2); + } + + #[test] + fn test_push_simple() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let obj = vec![1, 2, 3]; + let res = state.push(&store, obj).expect("push failed"); + assert_eq!(res.root, state.get_root(&store).expect("get_root failed")); + assert_eq!(res.index, 0); + assert_eq!(state.leaf_count(), 1); + } + + #[test] + fn test_get_peaks() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let obj = vec![1, 2, 3]; + assert!(state.push(&store, obj).is_ok()); + assert_eq!(state.leaf_count(), 1); + let peaks = state.get_peaks(&store); + assert!(peaks.is_ok()); + let peaks = peaks.unwrap(); + assert_eq!(peaks.len(), 1); + assert_eq!( + peaks[0], + Cid::from_str("bafy2bzacebltuz74cvzod3x7cx3eledj4gn5vjcer7znymoq56htf2e3cclok") + .unwrap() + ); + } + + #[test] + fn test_bag_peaks() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let mut root = Cid::default(); + for i in 1..=11 { + let res = state.push(&store, vec![i]).unwrap(); + root = res.root; + assert_eq!(res.index, i - 1); + } + let peaks = state.get_peaks(&store).unwrap(); + assert_eq!(peaks.len(), 3); + assert_eq!(state.leaf_count(), 11); + assert_eq!(root, state.get_root(&store).expect("get_root failed")); + } + + #[test] + fn test_get_obj_basic() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + state.push(&store, vec![0]).unwrap(); + assert_eq!(state.peak_count(), 1); + assert_eq!(state.leaf_count(), 1); + let item0 = state + .get_leaf_at::<_, Vec>(&store, 0u64) + .unwrap() + .unwrap(); + assert_eq!(item0, vec![0]); + + state.push(&store, vec![1]).unwrap(); + assert_eq!(state.peak_count(), 1); + assert_eq!(state.leaf_count(), 2); + let item0 = state + .get_leaf_at::<_, Vec>(&store, 0u64) + .unwrap() + .unwrap(); + let item1 = state + .get_leaf_at::<_, Vec>(&store, 1u64) + .unwrap() + .unwrap(); + assert_eq!(item0, vec![0]); + assert_eq!(item1, vec![1]); + + state.push(&store, vec![2]).unwrap(); + assert_eq!(state.peak_count(), 2); + assert_eq!(state.leaf_count(), 3); + let item0 = state + .get_leaf_at::<_, Vec>(&store, 0u64) + .unwrap() + .unwrap(); + let item1 = state + .get_leaf_at::<_, Vec>(&store, 1u64) + .unwrap() + .unwrap(); + let item2 = state + .get_leaf_at::<_, Vec>(&store, 2u64) + .unwrap() + .unwrap(); + assert_eq!(item0, vec![0]); + assert_eq!(item1, vec![1]); + assert_eq!(item2, vec![2]); + } + + #[test] + fn test_get_obj() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + for i in 0..31 { + state.push(&store, vec![i]).unwrap(); + assert_eq!(state.leaf_count(), i + 1); + + // As more items are added to the timehub, ensure each item remains gettable at + // each phase of the growth of the inner tree structures. + for j in 0..i { + let item = state + .get_leaf_at::<_, Vec>(&store, j) + .unwrap() + .unwrap(); + assert_eq!(item, vec![j]); + } + } + assert_eq!(state.peak_count(), 5); + } +} diff --git a/fendermint/actors/timehub/src/sol_facade.rs b/fendermint/actors/timehub/src/sol_facade.rs new file mode 100644 index 0000000000..a5c5bf1257 --- /dev/null +++ b/fendermint/actors/timehub/src/sol_facade.rs @@ -0,0 +1,115 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::Error; +use cid::Cid; +use fil_actors_runtime::{actor_error, ActorError}; +use recall_actor_sdk::declare_abi_call; +use recall_actor_sdk::evm::{InputData, TryIntoEVMEvent}; +use recall_sol_facade::primitives::U256; +use recall_sol_facade::timehub as sol; +use recall_sol_facade::types::{SolCall, SolInterface}; + +use crate::{Leaf, PushParams, PushReturn}; + +pub struct EventPushed { + index: u64, + timestamp: u64, + cid: Cid, +} +impl EventPushed { + pub fn new(index: u64, timestamp: u64, cid: Cid) -> Self { + Self { + index, + timestamp, + cid, + } + } +} +impl TryIntoEVMEvent for EventPushed { + type Target = sol::Events; + + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::EventPushed(sol::EventPushed { + index: U256::from(self.index), + timestamp: U256::from(self.timestamp), + cid: self.cid.to_bytes().into(), + })) + } +} + +// ----- Calls ----- // + +declare_abi_call!(); + +pub fn can_handle(input_data: &InputData) -> bool { + sol::Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &InputData) -> Result { + sol::Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +impl AbiCall for sol::pushCall { + type Params = PushParams; + type Returns = PushReturn; + type Output = Vec; + fn params(&self) -> Self::Params { + PushParams(self.cid.0.iter().as_slice().to_vec()) + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + let root = returns.root.to_bytes(); + let index = returns.index; + Self::abi_encode_returns(&(root, index)) + } +} + +impl AbiCall for sol::getLeafAtCall { + type Params = u64; + type Returns = Option; + type Output = Vec; + fn params(&self) -> Self::Params { + self.index + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + let (timestamp, witnessed) = if let Some(leaf) = returns { + (leaf.timestamp, leaf.witnessed.to_bytes()) + } else { + (u64::default(), Vec::default()) + }; + Self::abi_encode_returns(&(timestamp, witnessed)) + } +} + +impl AbiCall for sol::getCountCall { + type Params = (); + type Returns = u64; + type Output = Vec; + fn params(&self) -> Self::Params {} + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&(returns,)) + } +} + +impl AbiCall for sol::getPeaksCall { + type Params = (); + type Returns = Vec; + type Output = Vec; + fn params(&self) -> Self::Params {} + fn returns(&self, returns: Self::Returns) -> Self::Output { + let cids = returns.iter().map(|cid| cid.to_bytes()).collect::>(); + Self::abi_encode_returns(&(cids,)) + } +} + +impl AbiCall for sol::getRootCall { + type Params = (); + type Returns = Cid; + type Output = Vec; + fn params(&self) -> Self::Params {} + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&(returns.to_bytes(),)) + } +} diff --git a/fendermint/app/Cargo.toml b/fendermint/app/Cargo.toml index 33ba6fad21..9a7c67e85d 100644 --- a/fendermint/app/Cargo.toml +++ b/fendermint/app/Cargo.toml @@ -26,6 +26,18 @@ openssl = { workspace = true } paste = { workspace = true } prometheus = { workspace = true } prometheus_exporter = { workspace = true } +# Objects/Recall HTTP API dependencies +warp = { workspace = true } +uuid = { workspace = true } +mime_guess = { workspace = true } +urlencoding = { workspace = true } +entangler = { workspace = true } +entangler_storage = { workspace = true } +iroh_manager = { path = "../../recall/iroh_manager" } +iroh = { workspace = true } +iroh-blobs = { workspace = true } +thiserror = { workspace = true } +futures-util = { workspace = true } prost = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } @@ -48,6 +60,7 @@ url = { workspace = true } fendermint_abci = { path = "../abci" } actors-custom-api = { path = "../actors/api" } +fendermint_actor_bucket = { path = "../actors/bucket" } fendermint_actor_f3_light_client = { path = "../actors/f3-light-client" } fendermint_app_options = { path = "./options" } fendermint_app_settings = { path = "./settings" } @@ -59,6 +72,7 @@ fendermint_rpc = { path = "../rpc" } fendermint_storage = { path = "../storage" } fendermint_tracing = { path = "../tracing" } fendermint_actor_gas_market_eip1559 = { path = "../actors/gas_market/eip1559" } +fendermint_actor_blobs_shared = { path = "../actors/blobs/shared" } fendermint_vm_actor_interface = { path = "../vm/actor_interface" } fendermint_vm_core = { path = "../vm/core" } fendermint_vm_encoding = { path = "../vm/encoding" } @@ -71,6 +85,10 @@ fendermint_vm_message = { path = "../vm/message" } fendermint_vm_resolver = { path = "../vm/resolver" } fendermint_vm_snapshot = { path = "../vm/snapshot" } fendermint_vm_topdown = { path = "../vm/topdown" } +fendermint_vm_iroh_resolver = { path = "../vm/iroh_resolver" } + +# Recall actors needed for objects command +# fendermint_actor_bucket = { path = "../actors/bucket" } # TODO: depends on machine/ADM (not in main) ipc_actors_abis = { path = "../../contract-bindings" } ethers = {workspace = true} diff --git a/fendermint/app/config/default.toml b/fendermint/app/config/default.toml index 1aa0174248..3b1b2684f4 100644 --- a/fendermint/app/config/default.toml +++ b/fendermint/app/config/default.toml @@ -258,6 +258,47 @@ rate_limit_bytes = 0 # Length of the time period at which the consumption limit fills. 0 means no limit. rate_limit_period = 0 +# Iroh Blob Storage Configuration +[resolver.iroh_resolver_config] +# IPv4 address for Iroh node (UDP). Leave blank to use defaults. +# Default: 0.0.0.0:11204 +# v4_addr = "0.0.0.0:11204" + +# IPv6 address for Iroh node (UDP). Leave blank to disable IPv6. +# Default: None +# v6_addr = "[::]:11205" + +# Data directory for Iroh blob storage +iroh_data_dir = "data/iroh_resolver" + +# RPC address for Iroh client communication (TCP, local only) +rpc_addr = "127.0.0.1:4444" + +# Objects HTTP API Configuration (for blob upload/download) +[objects] +# Maximum allowed object/file size for uploads (in bytes) +# Default: 100MB +max_object_size = 104857600 + +# HTTP API listen address +[objects.listen] +host = "127.0.0.1" +port = 8080 + +# Tracing configuration for Objects API +[objects.tracing] +[objects.tracing.console] +enabled = true +[objects.tracing.file] +enabled = false + +# Metrics configuration for Objects API +[objects.metrics] +enabled = true +[objects.metrics.listen] +host = "127.0.0.1" +port = 9186 + # IPC related configuration parameters [ipc] # Default subnet ID, which basically means IPC is disabled. diff --git a/fendermint/app/options/src/lib.rs b/fendermint/app/options/src/lib.rs index ac44c2069a..7f27afd6a0 100644 --- a/fendermint/app/options/src/lib.rs +++ b/fendermint/app/options/src/lib.rs @@ -10,10 +10,9 @@ use fvm_shared::address::Network; use lazy_static::lazy_static; use self::{ - eth::EthArgs, genesis::GenesisArgs, key::KeyArgs, materializer::MaterializerArgs, rpc::RpcArgs, - run::RunArgs, + eth::EthArgs, genesis::GenesisArgs, key::KeyArgs, materializer::MaterializerArgs, + rpc::RpcArgs, run::RunArgs, }; - pub mod config; pub mod debug; pub mod eth; diff --git a/fendermint/app/settings/src/resolver.rs b/fendermint/app/settings/src/resolver.rs index 4aa4d545c4..958357de2d 100644 --- a/fendermint/app/settings/src/resolver.rs +++ b/fendermint/app/settings/src/resolver.rs @@ -1,7 +1,11 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use std::{path::PathBuf, time::Duration}; +use std::{ + net::{SocketAddr, SocketAddrV4, SocketAddrV6}, + path::PathBuf, + time::Duration, +}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DurationSeconds}; @@ -22,6 +26,7 @@ pub struct ResolverSettings { pub membership: MembershipSettings, pub connection: ConnectionSettings, pub content: ContentSettings, + pub iroh_resolver_config: IrohResolverSettings, } impl Default for ResolverSettings { @@ -33,6 +38,7 @@ impl Default for ResolverSettings { membership: Default::default(), connection: Default::default(), content: Default::default(), + iroh_resolver_config: Default::default(), } } } @@ -174,3 +180,27 @@ impl Default for ContentSettings { } } } + +/// Configuration for Iroh blob storage and transfer +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct IrohResolverSettings { + /// IPv4 address for Iroh node + pub v4_addr: Option, + /// IPv6 address for Iroh node + pub v6_addr: Option, + /// Data directory for Iroh + pub iroh_data_dir: PathBuf, + /// RPC address for Iroh + pub rpc_addr: SocketAddr, +} + +impl Default for IrohResolverSettings { + fn default() -> Self { + Self { + v4_addr: None, + v6_addr: None, + iroh_data_dir: PathBuf::from("data/iroh_resolver"), + rpc_addr: "127.0.0.1:4444".parse().unwrap(), + } + } +} diff --git a/fendermint/app/src/cmd/rpc.rs b/fendermint/app/src/cmd/rpc.rs index b06e67563b..37ca94ebc4 100644 --- a/fendermint/app/src/cmd/rpc.rs +++ b/fendermint/app/src/cmd/rpc.rs @@ -287,7 +287,7 @@ async fn fevm_estimate_gas( /// /// People can use `jq` to turn it into compact form if they want to save the results to a `.jsonline` /// file, but the default of having human readable output seems more useful. -fn print_json(value: &T) -> anyhow::Result<()> { +pub fn print_json(value: &T) -> anyhow::Result<()> { let json = serde_json::to_string_pretty(&value)?; println!("{}", json); Ok(()) diff --git a/fendermint/app/src/ipc.rs b/fendermint/app/src/ipc.rs index dad0a74b67..61292b2985 100644 --- a/fendermint/app/src/ipc.rs +++ b/fendermint/app/src/ipc.rs @@ -13,7 +13,7 @@ use fendermint_vm_interpreter::fvm::state::{FvmExecState, FvmStateParams}; use fendermint_vm_interpreter::fvm::store::ReadOnlyBlockstore; use fendermint_vm_interpreter::MessagesInterpreter; use fendermint_vm_topdown::sync::ParentFinalityStateQuery; -use fendermint_vm_topdown::IPCParentFinality; +use fendermint_vm_topdown::{IPCBlobFinality, IPCParentFinality, IPCReadRequestClosed}; use fvm_ipld_blockstore::Blockstore; use ipc_actors_abis::subnet_actor_checkpointing_facet::{ AppHashBreakdown, Commitment, CompressedActivityRollup, @@ -57,6 +57,10 @@ pub fn derive_subnet_app_hash(state: &SubnetAppState) -> tendermint::hash::AppHa pub enum AppVote { /// The validator considers a certain block final on the parent chain. ParentFinality(IPCParentFinality), + /// The validator considers a certain blob final. + BlobFinality(IPCBlobFinality), + /// The validator considers a certain read request completed. + ReadRequestClosed(IPCReadRequestClosed), } /// Queries the LATEST COMMITTED parent finality from the storage diff --git a/fendermint/app/src/service/node.rs b/fendermint/app/src/service/node.rs index d2baffacd8..e564a4f18c 100644 --- a/fendermint/app/src/service/node.rs +++ b/fendermint/app/src/service/node.rs @@ -9,16 +9,21 @@ use fendermint_rocksdb::{blockstore::NamespaceBlockstore, namespaces, RocksDb, R use fendermint_vm_actor_interface::eam::EthAddress; use fendermint_vm_interpreter::fvm::interpreter::FvmMessagesInterpreter; use fendermint_vm_interpreter::fvm::observe::register_metrics as register_interpreter_metrics; +use fendermint_vm_interpreter::fvm::recall_env::{BlobPool, ReadRequestPool}; use fendermint_vm_interpreter::fvm::topdown::TopDownManager; use fendermint_vm_interpreter::fvm::upgrades::UpgradeScheduler; +use fendermint_vm_iroh_resolver::iroh::IrohResolver; +use fendermint_vm_iroh_resolver::pool::ResolvePool; use fendermint_vm_snapshot::{SnapshotManager, SnapshotParams}; use fendermint_vm_topdown::observe::register_metrics as register_topdown_metrics; use fendermint_vm_topdown::proxy::{IPCProviderProxy, IPCProviderProxyWithLatency}; use fendermint_vm_topdown::sync::launch_polling_syncer; use fendermint_vm_topdown::voting::{publish_vote_loop, Error as VoteError, VoteTally}; -use fendermint_vm_topdown::{CachedFinalityProvider, IPCParentFinality, Toggle}; +use fendermint_vm_topdown::{ + CachedFinalityProvider, IPCBlobFinality, IPCParentFinality, IPCReadRequestClosed, Toggle, +}; use fvm_shared::address::{current_network, Address, Network}; -use ipc_ipld_resolver::{Event as ResolverEvent, VoteRecord}; +use ipc_ipld_resolver::{Event as ResolverEvent, IrohConfig, VoteRecord}; use ipc_observability::observe::register_metrics as register_default_metrics; use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig}; use ipc_provider::IpcProvider; @@ -123,12 +128,16 @@ pub async fn run( let parent_finality_votes = VoteTally::empty(); + // Create Recall blob and read request resolution pools early so they can be used by IrohResolver + let blob_pool: BlobPool = ResolvePool::new(); + let read_request_pool: ReadRequestPool = ResolvePool::new(); + let topdown_enabled = settings.topdown_enabled(); // If enabled, start a resolver that communicates with the application through the resolve pool. if settings.resolver_enabled() { let mut service = - make_resolver_service(&settings, db.clone(), state_store.clone(), ns.bit_store)?; + make_resolver_service(&settings, db.clone(), state_store.clone(), ns.bit_store).await?; // Register all metrics from the IPLD resolver stack if let Some(ref registry) = metrics_registry { @@ -146,8 +155,11 @@ pub async fn run( .context("error adding own provided subnet.")?; if topdown_enabled { - if let Some(key) = validator_keypair { + if let Some(ref key) = validator_keypair { let parent_finality_votes = parent_finality_votes.clone(); + let key = key.clone(); + let client_for_voting = client.clone(); + let subnet_id_for_voting = own_subnet_id.clone(); tracing::info!("starting the parent finality vote gossip loop..."); tokio::spawn(async move { @@ -156,8 +168,8 @@ pub async fn run( settings.ipc.vote_interval, settings.ipc.vote_timeout, key, - own_subnet_id, - client, + subnet_id_for_voting, + client_for_voting, |height, block_hash| { AppVote::ParentFinality(IPCParentFinality { height, block_hash }) }, @@ -169,6 +181,41 @@ pub async fn run( tracing::info!("parent finality vote gossip disabled"); } + // Spawn Iroh resolvers for blob and read request resolution + if let Some(ref key) = validator_keypair { + // Blob resolver + let iroh_resolver = IrohResolver::new( + client.clone(), + blob_pool.queue(), + settings.resolver.retry_delay, + parent_finality_votes.clone(), + key.clone(), + own_subnet_id.clone(), + |hash, success| AppVote::BlobFinality(IPCBlobFinality::new(hash, success)), + blob_pool.results(), + ); + + println!("starting the Iroh blob resolver..."); + tokio::spawn(async move { iroh_resolver.run().await }); + + // Read request resolver + let read_request_resolver = IrohResolver::new( + client.clone(), + read_request_pool.queue(), + settings.resolver.retry_delay, + parent_finality_votes.clone(), + key.clone(), + own_subnet_id.clone(), + |hash, _| AppVote::ReadRequestClosed(IPCReadRequestClosed::new(hash)), + read_request_pool.results(), + ); + + println!("starting the Iroh read request resolver..."); + tokio::spawn(async move { read_request_resolver.run().await }); + } else { + tracing::info!("Iroh resolvers disabled (no validator key)."); + } + tracing::info!("subscribing to gossip..."); let rx = service.subscribe(); let parent_finality_votes = parent_finality_votes.clone(); @@ -370,7 +417,7 @@ fn open_db(settings: &Settings, ns: &Namespaces) -> anyhow::Result { Ok(db) } -fn make_resolver_service( +async fn make_resolver_service( settings: &Settings, db: RocksDb, state_store: NamespaceBlockstore, @@ -385,6 +432,7 @@ fn make_resolver_service( let config = to_resolver_config(settings).context("error creating resolver config")?; let service = ipc_ipld_resolver::Service::new(config, bitswap_store) + .await .context("error creating IPLD Resolver Service")?; Ok(service) @@ -465,6 +513,12 @@ fn to_resolver_config(settings: &Settings) -> anyhow::Result { + let res = atomically_or_err(|| { + parent_finality_votes.add_blob_vote( + vote.public_key.clone(), + blob.hash.as_bytes().to_vec(), + blob.success, + ) + }) + .await; + + match res { + Ok(_) => tracing::debug!(hash = %blob.hash, "blob vote handled"), + Err(e) => { + tracing::debug!(hash = %blob.hash, error = %e, "failed to handle blob vote") + } + }; + } + AppVote::ReadRequestClosed(read_req) => { + let res = atomically_or_err(|| { + parent_finality_votes.add_blob_vote( + vote.public_key.clone(), + read_req.hash.as_bytes().to_vec(), + true, // read request completed successfully + ) + }) + .await; + + match res { + Ok(_) => tracing::debug!(hash = %read_req.hash, "read request vote handled"), + Err(e) => { + tracing::debug!(hash = %read_req.hash, error = %e, "failed to handle read request vote") + } + }; + } } } diff --git a/fendermint/rpc/Cargo.toml b/fendermint/rpc/Cargo.toml index 834a591802..8748c5a0a0 100644 --- a/fendermint/rpc/Cargo.toml +++ b/fendermint/rpc/Cargo.toml @@ -24,6 +24,8 @@ cid = { workspace = true } fvm_ipld_encoding = { workspace = true } fvm_shared = { workspace = true } +fendermint_actor_blobs_shared = { path = "../actors/blobs/shared" } +fendermint_actor_bucket = { path = "../actors/bucket" } fendermint_crypto = { path = "../crypto" } fendermint_vm_actor_interface = { path = "../vm/actor_interface" } fendermint_vm_message = { path = "../vm/message" } diff --git a/fendermint/rpc/src/message.rs b/fendermint/rpc/src/message.rs index a3996f76aa..08389c39a9 100644 --- a/fendermint/rpc/src/message.rs +++ b/fendermint/rpc/src/message.rs @@ -6,6 +6,7 @@ use std::path::Path; use anyhow::Context; use base64::Engine; use bytes::Bytes; +use fendermint_actor_bucket::{GetParams, Method::GetObject}; use fendermint_crypto::SecretKey; use fendermint_vm_actor_interface::{eam, evm}; use fendermint_vm_message::{chain::ChainMessage, signed::SignedMessage}; @@ -116,6 +117,33 @@ impl MessageFactory { Ok(msg) } + + /// Get an object from a bucket. + pub fn os_get( + &mut self, + address: Address, + params: GetParams, + value: TokenAmount, + gas_params: GasParams, + ) -> anyhow::Result { + let params = RawBytes::serialize(params)?; + Ok(self.transaction(address, GetObject as u64, params, value, gas_params)) + } + + pub fn blob_get( + &mut self, + blob_hash: fendermint_actor_blobs_shared::bytes::B256, + value: TokenAmount, + gas_params: GasParams, + ) -> anyhow::Result { + use fendermint_actor_blobs_shared::blobs::GetBlobParams; + use fendermint_actor_blobs_shared::method::Method::GetBlob; + use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; + + let params = GetBlobParams(blob_hash); + let params = RawBytes::serialize(params)?; + Ok(self.transaction(BLOBS_ACTOR_ADDR, GetBlob as u64, params, value, gas_params)) + } } /// Wrapper for MessageFactory which generates signed messages /// diff --git a/fendermint/rpc/src/query.rs b/fendermint/rpc/src/query.rs index 930606229e..a61f832b80 100644 --- a/fendermint/rpc/src/query.rs +++ b/fendermint/rpc/src/query.rs @@ -19,7 +19,11 @@ use fendermint_vm_message::query::{ ActorState, BuiltinActors, FvmQuery, FvmQueryHeight, GasEstimate, StateParams, }; -use crate::response::encode_data; +use crate::message::{GasParams, MessageFactory}; +use crate::response::{decode_blob_get, decode_os_get, encode_data}; +use fendermint_actor_bucket::{GetParams, Object}; +use fendermint_vm_actor_interface::system; +use fvm_shared::econ::TokenAmount; #[derive(Serialize, Debug, Clone)] /// The parsed value from a query, along with the height at which the query was performed. @@ -128,6 +132,50 @@ pub trait QueryClient: Sync { Ok(QueryResponse { height, value }) } + /// Get an object in a bucket without including a transaction on the blockchain. + async fn os_get_call( + &mut self, + address: Address, + params: GetParams, + value: TokenAmount, + gas_params: GasParams, + height: FvmQueryHeight, + ) -> anyhow::Result> { + let msg = MessageFactory::new(system::SYSTEM_ACTOR_ADDR, 0) + .os_get(address, params, value, gas_params)?; + + let response = self.call(msg, height).await?; + if response.value.code.is_err() { + return Err(anyhow!("{}", response.value.info)); + } + println!("os_get_call: {:?}", response.value); + let return_data = decode_os_get(&response.value) + .context("error decoding data from deliver_tx in call")?; + + Ok(return_data) + } + + /// Get a blob from the blobs actor without including a transaction on the blockchain. + async fn blob_get_call( + &mut self, + blob_hash: fendermint_actor_blobs_shared::bytes::B256, + value: TokenAmount, + gas_params: GasParams, + height: FvmQueryHeight, + ) -> anyhow::Result> { + let msg = MessageFactory::new(system::SYSTEM_ACTOR_ADDR, 0) + .blob_get(blob_hash, value, gas_params)?; + + let response = self.call(msg, height).await?; + if response.value.code.is_err() { + return Err(anyhow!("{}", response.value.info)); + } + let return_data = decode_blob_get(&response.value) + .context("error decoding blob data from deliver_tx in call")?; + + Ok(return_data) + } + /// Run an ABCI query. async fn perform(&self, query: FvmQuery, height: FvmQueryHeight) -> anyhow::Result; } diff --git a/fendermint/rpc/src/response.rs b/fendermint/rpc/src/response.rs index f6ed6d567d..6f356513d0 100644 --- a/fendermint/rpc/src/response.rs +++ b/fendermint/rpc/src/response.rs @@ -3,6 +3,7 @@ use anyhow::{anyhow, Context}; use base64::Engine; use bytes::Bytes; +use fendermint_actor_bucket::Object; use fendermint_vm_actor_interface::eam::{self, CreateReturn}; use fvm_ipld_encoding::{BytesDe, RawBytes}; use tendermint::abci::response::DeliverTx; @@ -58,3 +59,18 @@ pub fn decode_fevm_return_data(data: RawBytes) -> anyhow::Result> { .map(|bz| bz.0) .map_err(|e| anyhow!("failed to deserialize bytes returned by FEVM method invocation: {e}")) } + +/// Decode the result of a bucket GetObject call. +pub fn decode_os_get(deliver_tx: &DeliverTx) -> anyhow::Result> { + let data = decode_data(&deliver_tx.data)?; + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing as Option: {e}")) +} + +pub fn decode_blob_get( + deliver_tx: &DeliverTx, +) -> anyhow::Result> { + let data = decode_data(&deliver_tx.data)?; + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing as Option: {e}")) +} diff --git a/fendermint/vm/actor_interface/src/adm.rs b/fendermint/vm/actor_interface/src/adm.rs new file mode 100644 index 0000000000..4f08d564c8 --- /dev/null +++ b/fendermint/vm/actor_interface/src/adm.rs @@ -0,0 +1,76 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; +use fvm_shared::{address::Address, ActorID, METHOD_CONSTRUCTOR}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fmt::Display; + +define_singleton!(ADM { + id: 17, + code_id: 17 +}); + +pub const ADM_ACTOR_NAME: &str = "adm"; + +/// ADM actor methods available. +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + CreateExternal = 1214262202, + UpdateDeployers = 1768606754, + ListMetadata = 2283215593, + GetMachineCode = 2892692559, +} + +/// The kinds of machines available. +#[derive(Debug, Serialize, Deserialize)] +pub enum Kind { + /// A bucket with S3-like key semantics. + Bucket, + /// An MMR accumulator, used for timestamping data. + Timehub, +} + +impl Display for Kind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self { + Self::Bucket => "bucket", + Self::Timehub => "timehub", + }; + write!(f, "{}", str) + } +} + +/// Machine metadata. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Metadata { + /// Machine kind. + pub kind: Kind, + /// Machine ID address. + pub address: Address, + /// User-defined metadata. + pub metadata: HashMap, +} + +/// Helper for machine creation. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct CreateExternalParams { + pub owner: Address, + pub kind: Kind, + pub metadata: HashMap, +} + +/// Helper to read return value from machine creation. +#[derive(Debug, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct CreateExternalReturn { + pub actor_id: ActorID, + pub robust_address: Option
, +} + +/// Helper for listing machine metadata by owner. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListMetadataParams { + pub owner: Address, +} diff --git a/fendermint/vm/actor_interface/src/blob_reader.rs b/fendermint/vm/actor_interface/src/blob_reader.rs new file mode 100644 index 0000000000..94bce68b41 --- /dev/null +++ b/fendermint/vm/actor_interface/src/blob_reader.rs @@ -0,0 +1,4 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +define_id!(BLOB_READER { id: 67 }); diff --git a/fendermint/vm/actor_interface/src/blobs.rs b/fendermint/vm/actor_interface/src/blobs.rs new file mode 100644 index 0000000000..7eaf992bca --- /dev/null +++ b/fendermint/vm/actor_interface/src/blobs.rs @@ -0,0 +1,4 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +define_id!(BLOBS { id: 66 }); diff --git a/fendermint/vm/actor_interface/src/bucket.rs b/fendermint/vm/actor_interface/src/bucket.rs new file mode 100644 index 0000000000..4353840af6 --- /dev/null +++ b/fendermint/vm/actor_interface/src/bucket.rs @@ -0,0 +1,5 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +// Note: See this thread about choosing the ids https://filecoinproject.slack.com/archives/C04JR5R1UL8/p1706638112395409 +define_code!(BUCKET { code_id: 68 }); diff --git a/fendermint/vm/actor_interface/src/init.rs b/fendermint/vm/actor_interface/src/init.rs index 858a13faf8..b6245c9953 100644 --- a/fendermint/vm/actor_interface/src/init.rs +++ b/fendermint/vm/actor_interface/src/init.rs @@ -18,6 +18,8 @@ pub const FIRST_NON_SINGLETON_ADDR: ActorID = 100; define_singleton!(INIT { id: 1, code_id: 2 }); +pub const INIT_ACTOR_NAME: &str = "init"; + pub type AddressMap = BTreeMap; /// Delegated address of an Ethereum built-in actor. diff --git a/fendermint/vm/actor_interface/src/lib.rs b/fendermint/vm/actor_interface/src/lib.rs index dea7cd1b70..033a45367a 100644 --- a/fendermint/vm/actor_interface/src/lib.rs +++ b/fendermint/vm/actor_interface/src/lib.rs @@ -44,6 +44,9 @@ macro_rules! define_singleton { pub mod account; pub mod activity; +pub mod adm; +pub mod blob_reader; +pub mod blobs; pub mod burntfunds; pub mod chainmetadata; pub mod cron; @@ -57,5 +60,6 @@ pub mod init; pub mod ipc; pub mod multisig; pub mod placeholder; +pub mod recall_config; pub mod reward; pub mod system; diff --git a/fendermint/vm/actor_interface/src/recall_config.rs b/fendermint/vm/actor_interface/src/recall_config.rs new file mode 100644 index 0000000000..0e18bd50aa --- /dev/null +++ b/fendermint/vm/actor_interface/src/recall_config.rs @@ -0,0 +1,4 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +define_id!(RECALL_CONFIG { id: 70 }); diff --git a/fendermint/vm/interpreter/Cargo.toml b/fendermint/vm/interpreter/Cargo.toml index b364e3c5f0..dd1a3aa166 100644 --- a/fendermint/vm/interpreter/Cargo.toml +++ b/fendermint/vm/interpreter/Cargo.toml @@ -29,9 +29,22 @@ fendermint_actor_activity_tracker = { path = "../../actors/activity-tracker" } fendermint_actor_f3_light_client = { path = "../../actors/f3-light-client" } fendermint_actor_gas_market_eip1559 = { path = "../../actors/gas_market/eip1559" } fendermint_actor_eam = { path = "../../actors/eam" } +fendermint_actor_init = { path = "../../actors/init" } +fendermint_actor_adm = { path = "../../actors/adm" } +fendermint_actor_blobs = { path = "../../actors/blobs" } +fendermint_actor_blobs_shared = { path = "../../actors/blobs/shared" } +fendermint_actor_blob_reader = { path = "../../actors/blob_reader" } +fendermint_actor_recall_config = { path = "../../actors/recall_config" } +fendermint_actor_recall_config_shared = { path = "../../actors/recall_config/shared" } +fil_actor_adm = { workspace = true } fil_actor_evm = { workspace = true } fendermint_testing = { path = "../../testing", optional = true } ipc_actors_abis = { path = "../../../contract-bindings" } +recall_executor = { path = "../../../recall/executor" } +recall_kernel = { path = "../../../recall/kernel" } +fendermint_vm_iroh_resolver = { path = "../iroh_resolver" } +iroh = { workspace = true } +iroh-blobs = { workspace = true } fil_actor_eam = { workspace = true } ipc-api = { path = "../../../ipc/api" } ipc-observability = { path = "../../../ipc/observability" } diff --git a/fendermint/vm/interpreter/src/fvm/interpreter.rs b/fendermint/vm/interpreter/src/fvm/interpreter.rs index 5a3cb5bc52..aa134fe220 100644 --- a/fendermint/vm/interpreter/src/fvm/interpreter.rs +++ b/fendermint/vm/interpreter/src/fvm/interpreter.rs @@ -1,24 +1,15 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::{Context, Result}; -use cid::Cid; -use fendermint_vm_message::chain::ChainMessage; -use fendermint_vm_message::ipc::IpcMessage; -use fendermint_vm_message::query::{FvmQuery, StateParams}; -use fendermint_vm_message::signed::SignedMessage; -use fvm_ipld_blockstore::Blockstore; -use fvm_ipld_encoding::{self}; -use fvm_shared::{address::Address, error::ExitCode}; -use std::sync::Arc; -use std::time::Instant; - use crate::errors::*; use crate::fvm::end_block_hook::{EndBlockManager, PowerUpdates}; use crate::fvm::executions::{ execute_cron_message, execute_signed_message, push_block_to_chainmeta_actor_if_possible, }; use crate::fvm::gas_estimation::{estimate_gassed_msg, gas_search}; +use crate::fvm::recall_helpers::{ + close_read_request, read_request_callback, set_read_request_pending, +}; use crate::fvm::topdown::TopDownManager; use crate::fvm::{ activity::ValidatorActivityTracker, @@ -33,10 +24,21 @@ use crate::selectors::{ }; use crate::types::*; use crate::MessagesInterpreter; +use anyhow::{Context, Result}; +use cid::Cid; +use fendermint_vm_message::chain::ChainMessage; +use fendermint_vm_message::ipc::IpcMessage; +use fendermint_vm_message::query::{FvmQuery, StateParams}; +use fendermint_vm_message::signed::SignedMessage; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding; use fvm_shared::state::ActorState; use fvm_shared::ActorID; +use fvm_shared::{address::Address, error::ExitCode}; use ipc_observability::emit; use std::convert::TryInto; +use std::sync::Arc; +use std::time::Instant; struct Actor { id: ActorID, @@ -267,8 +269,8 @@ where }) .collect::>(); - let signed_msgs = - select_messages_above_base_fee(signed_msgs, state.block_gas_tracker().base_fee()); + // let signed_msgs = + // select_messages_above_base_fee(signed_msgs, state.block_gas_tracker().base_fee()); let total_gas_limit = state.block_gas_tracker().available(); let signed_msgs_iter = select_messages_by_gas_limit(signed_msgs, total_gas_limit) @@ -281,8 +283,11 @@ where .await .into_iter(); - let mut all_msgs = top_down_iter - .chain(signed_msgs_iter) + let chain_msgs: Vec = top_down_iter.chain(signed_msgs_iter).collect(); + + // Encode all chain messages to IPLD + let mut all_msgs = chain_msgs + .into_iter() .map(|msg| fvm_ipld_encoding::to_vec(&msg).context("failed to encode message as IPLD")) .collect::>>>()?; @@ -338,6 +343,14 @@ where return Ok(AttestMessagesResponse::Reject); } } + ChainMessage::Ipc(IpcMessage::ReadRequestPending(_)) => { + // Read request pending messages are validated in prepare_messages_for_block + // Just accept them here + } + ChainMessage::Ipc(IpcMessage::ReadRequestClosed(_)) => { + // Read request closed messages are validated in prepare_messages_for_block + // Just accept them here + } ChainMessage::Signed(signed) => { if signed.message.gas_fee_cap < *base_fee { tracing::warn!( @@ -467,6 +480,42 @@ where domain_hash: None, }) } + IpcMessage::ReadRequestPending(read_request) => { + // Set the read request to "pending" state + let ret = set_read_request_pending(state, read_request.id)?; + + tracing::debug!( + request_id = %read_request.id, + "chain interpreter has set read request to pending" + ); + + Ok(ApplyMessageResponse { + applied_message: ret.into(), + domain_hash: None, + }) + } + IpcMessage::ReadRequestClosed(read_request) => { + // Send the data to the callback address. + // If this fails (e.g., the callback address is not reachable), + // we will still close the request. + // + // We MUST use a non-privileged actor (BLOB_READER_ACTOR_ADDR) to call the callback. + // This is to prevent malicious user from accessing unauthorized APIs. + read_request_callback(state, &read_request)?; + + // Set the status of the request to closed. + let ret = close_read_request(state, read_request.id)?; + + tracing::debug!( + hash = %read_request.id, + "chain interpreter has closed read request" + ); + + Ok(ApplyMessageResponse { + applied_message: ret.into(), + domain_hash: None, + }) + } }, } } diff --git a/fendermint/vm/interpreter/src/fvm/mod.rs b/fendermint/vm/interpreter/src/fvm/mod.rs index 762c8b696a..2c28c52b12 100644 --- a/fendermint/vm/interpreter/src/fvm/mod.rs +++ b/fendermint/vm/interpreter/src/fvm/mod.rs @@ -6,6 +6,8 @@ mod executions; mod externs; pub mod interpreter; pub mod observe; +pub mod recall_env; +pub mod recall_helpers; pub mod state; pub mod store; pub mod topdown; diff --git a/fendermint/vm/interpreter/src/fvm/recall_env.rs b/fendermint/vm/interpreter/src/fvm/recall_env.rs new file mode 100644 index 0000000000..9e82a4f924 --- /dev/null +++ b/fendermint/vm/interpreter/src/fvm/recall_env.rs @@ -0,0 +1,70 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Recall environment types for blob and read request resolution. + +use fendermint_actor_blobs_shared::blobs::SubscriptionId; +use fendermint_vm_iroh_resolver::pool::{ + ResolveKey as IrohResolveKey, ResolvePool as IrohResolvePool, + ResolveSource as IrohResolveSource, TaskType as IrohTaskType, +}; +use fvm_shared::{address::Address, MethodNum}; +use iroh::NodeId; +use iroh_blobs::Hash; + +pub type BlobPool = IrohResolvePool; +pub type ReadRequestPool = IrohResolvePool; + +#[derive(Clone, Hash, PartialEq, Eq)] +pub struct BlobPoolItem { + pub subscriber: Address, + pub hash: Hash, + pub size: u64, + pub id: SubscriptionId, + pub source: NodeId, +} + +impl From<&BlobPoolItem> for IrohResolveKey { + fn from(value: &BlobPoolItem) -> Self { + Self { hash: value.hash } + } +} + +impl From<&BlobPoolItem> for IrohTaskType { + fn from(value: &BlobPoolItem) -> Self { + Self::ResolveBlob { + source: IrohResolveSource { id: value.source }, + size: value.size, + } + } +} + +#[derive(Clone, Hash, PartialEq, Eq)] +pub struct ReadRequestPoolItem { + /// The unique id of the read request. + pub id: Hash, + /// The hash of the blob that the read request is for. + pub blob_hash: Hash, + /// The offset of the read request. + pub offset: u32, + /// The length of the read request. + pub len: u32, + /// The address and method to callback when the read request is closed. + pub callback: (Address, MethodNum), +} + +impl From<&ReadRequestPoolItem> for IrohResolveKey { + fn from(value: &ReadRequestPoolItem) -> Self { + Self { hash: value.id } + } +} + +impl From<&ReadRequestPoolItem> for IrohTaskType { + fn from(value: &ReadRequestPoolItem) -> Self { + Self::CloseReadRequest { + blob_hash: value.blob_hash, + offset: value.offset, + len: value.len, + } + } +} diff --git a/fendermint/vm/interpreter/src/fvm/recall_helpers.rs b/fendermint/vm/interpreter/src/fvm/recall_helpers.rs new file mode 100644 index 0000000000..7b03f825ab --- /dev/null +++ b/fendermint/vm/interpreter/src/fvm/recall_helpers.rs @@ -0,0 +1,376 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Helper functions for Recall blob and read request operations +use crate::fvm::constants::BLOCK_GAS_LIMIT; +use anyhow::{anyhow, Result}; +use fendermint_actor_blob_reader::{ + CloseReadRequestParams, GetOpenReadRequestsParams, GetPendingReadRequestsParams, + GetReadRequestStatusParams, + Method::{ + CloseReadRequest, GetOpenReadRequests, GetPendingReadRequests, GetReadRequestStatus, + SetReadRequestPending, + }, + ReadRequestStatus, SetReadRequestPendingParams, BLOB_READER_ACTOR_ADDR, +}; +use fendermint_actor_blobs_shared::blobs::{ + BlobStatus, GetAddedBlobsParams, GetBlobStatusParams, GetPendingBlobsParams, SubscriptionId, +}; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_actor_blobs_shared::method::Method::{ + GetAddedBlobs, GetBlobStatus, GetPendingBlobs, GetStats, +}; +use fendermint_actor_blobs_shared::{GetStatsReturn, BLOBS_ACTOR_ADDR}; +use fendermint_vm_actor_interface::system; +use fendermint_vm_message::ipc::ClosedReadRequest; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::{address::Address, message::Message, MethodNum}; +use iroh_blobs::Hash; +use std::collections::HashSet; + +use super::state::FvmExecState; +use super::store::ReadOnlyBlockstore; +use crate::fvm::state::FvmApplyRet; + +type BlobItem = (Hash, u64, HashSet<(Address, SubscriptionId, iroh::NodeId)>); +type ReadRequestItem = (Hash, Hash, u32, u32, Address, MethodNum); + +/// Get added blobs from on chain state. +pub fn get_added_blobs( + state: &mut FvmExecState>, + size: u32, +) -> Result> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let params = GetAddedBlobsParams(size); + let params = RawBytes::serialize(params)?; + let msg = create_implicit_message( + BLOBS_ACTOR_ADDR, + GetAddedBlobs as u64, + params, + BLOCK_GAS_LIMIT, + ); + let (apply_ret, _) = state.execute_implicit(msg)?; + + let data = apply_ret.msg_receipt.return_data.to_vec(); + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing added blobs: {e}")) +} + +/// Get pending blobs from on chain state. +pub fn get_pending_blobs( + state: &mut FvmExecState>, + size: u32, +) -> Result> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let params = GetPendingBlobsParams(size); + let params = RawBytes::serialize(params)?; + let msg = create_implicit_message( + BLOBS_ACTOR_ADDR, + GetPendingBlobs as u64, + params, + BLOCK_GAS_LIMIT, + ); + let (apply_ret, _) = state.execute_implicit(msg)?; + + let data = apply_ret.msg_receipt.return_data.to_vec(); + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing pending blobs: {e}")) +} + +/// Helper function to check blob status by reading its on-chain state. +pub fn get_blob_status( + state: &mut FvmExecState>, + subscriber: Address, + hash: Hash, + id: SubscriptionId, +) -> Result> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let hash = B256(*hash.as_bytes()); + let params = GetBlobStatusParams { + subscriber, + hash, + id, + }; + let params = RawBytes::serialize(params)?; + let msg = create_implicit_message( + BLOBS_ACTOR_ADDR, + GetBlobStatus as u64, + params, + BLOCK_GAS_LIMIT, + ); + let (apply_ret, _) = state.execute_implicit(msg)?; + + let data = apply_ret.msg_receipt.return_data.to_vec(); + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing blob status: {e}")) +} + +/// Check if a blob is in the added state, by reading its on-chain state. +pub fn is_blob_added( + state: &mut FvmExecState>, + subscriber: Address, + hash: Hash, + id: SubscriptionId, +) -> Result<(bool, Option)> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let status = get_blob_status(state, subscriber, hash, id)?; + let added = if let Some(status) = status.clone() { + matches!(status, BlobStatus::Added) + } else { + false + }; + Ok((added, status)) +} + +/// Check if a blob is finalized (if it is resolved or failed), by reading its on-chain state. +pub fn is_blob_finalized( + state: &mut FvmExecState>, + subscriber: Address, + hash: Hash, + id: SubscriptionId, +) -> Result<(bool, Option)> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let status = get_blob_status(state, subscriber, hash, id)?; + let finalized = if let Some(status) = status.clone() { + matches!(status, BlobStatus::Resolved | BlobStatus::Failed) + } else { + false + }; + Ok((finalized, status)) +} + +/// Returns credit and blob stats from on-chain state. +pub fn get_blobs_stats(state: &mut FvmExecState) -> Result +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let msg = create_implicit_message( + BLOBS_ACTOR_ADDR, + GetStats as u64, + Default::default(), + BLOCK_GAS_LIMIT, + ); + let (apply_ret, _) = state.execute_implicit(msg)?; + + let data = apply_ret.msg_receipt.return_data.to_vec(); + fvm_ipld_encoding::from_slice::(&data) + .map_err(|e| anyhow!("error parsing stats: {e}")) +} + +/// Get open read requests from on chain state. +pub fn get_open_read_requests( + state: &mut FvmExecState>, + size: u32, +) -> Result> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let params = RawBytes::serialize(GetOpenReadRequestsParams(size))?; + let msg = create_implicit_message( + BLOB_READER_ACTOR_ADDR, + GetOpenReadRequests as u64, + params, + BLOCK_GAS_LIMIT, + ); + let (apply_ret, _) = state.execute_implicit(msg)?; + + let data = apply_ret.msg_receipt.return_data.to_vec(); + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing read requests: {e}")) +} + +/// Get pending read requests from on chain state. +pub fn get_pending_read_requests( + state: &mut FvmExecState>, + size: u32, +) -> Result> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let params = RawBytes::serialize(GetPendingReadRequestsParams(size))?; + let msg = create_implicit_message( + BLOB_READER_ACTOR_ADDR, + GetPendingReadRequests as u64, + params, + BLOCK_GAS_LIMIT, + ); + let (apply_ret, _) = state.execute_implicit(msg)?; + + let data = apply_ret.msg_receipt.return_data.to_vec(); + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing read requests: {e}")) +} + +/// Get the status of a read request from on chain state. +pub fn get_read_request_status( + state: &mut FvmExecState>, + id: Hash, +) -> Result> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let request_id = B256(*id.as_bytes()); + let params = RawBytes::serialize(GetReadRequestStatusParams(request_id))?; + let msg = create_implicit_message( + BLOB_READER_ACTOR_ADDR, + GetReadRequestStatus as u64, + params, + BLOCK_GAS_LIMIT, + ); + + let (apply_ret, _) = state.execute_implicit(msg)?; + let data = apply_ret.msg_receipt.return_data.to_vec(); + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing read request status: {e}")) +} + +/// Set the on-chain state of a read request to pending. +pub fn set_read_request_pending(state: &mut FvmExecState, id: Hash) -> Result +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let params = RawBytes::serialize(SetReadRequestPendingParams(B256(*id.as_bytes())))?; + let gas_limit = BLOCK_GAS_LIMIT; + let msg = create_implicit_message( + BLOB_READER_ACTOR_ADDR, + SetReadRequestPending as u64, + params, + gas_limit, + ); + + let (apply_ret, emitters) = state.execute_implicit(msg)?; + Ok(FvmApplyRet { + apply_ret, + from: system::SYSTEM_ACTOR_ADDR, + to: BLOB_READER_ACTOR_ADDR, + method_num: SetReadRequestPending as u64, + gas_limit, + emitters, + }) +} + +/// Execute the callback for a read request. +pub fn read_request_callback( + state: &mut FvmExecState, + read_request: &ClosedReadRequest, +) -> Result<()> +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let ClosedReadRequest { + id, + blob_hash: _, + offset: _, + len: _, + callback: (to, method_num), + response, + } = read_request.clone(); + + let params = RawBytes::serialize((id, response))?; + let msg = Message { + version: Default::default(), + from: BLOB_READER_ACTOR_ADDR, + to, + sequence: 0, + value: Default::default(), + method_num, + params, + gas_limit: BLOCK_GAS_LIMIT, + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + let result = state.execute_implicit(msg); + match result { + Ok((apply_ret, _)) => { + tracing::debug!( + "callback delivered for id: {:?}, exit code: {:?}", + id, + apply_ret.msg_receipt.exit_code + ); + } + Err(e) => { + tracing::error!( + "failed to execute read request callback for id: {:?}, error: {}", + id, + e + ); + } + } + + Ok(()) +} + +/// Remove a read request from on chain state. +pub fn close_read_request(state: &mut FvmExecState, id: Hash) -> Result +where + DB: Blockstore + Clone + 'static + Send + Sync, +{ + let params = RawBytes::serialize(CloseReadRequestParams(B256(*id.as_bytes())))?; + let gas_limit = BLOCK_GAS_LIMIT; + let msg = create_implicit_message( + BLOB_READER_ACTOR_ADDR, + CloseReadRequest as u64, + params, + gas_limit, + ); + + let (apply_ret, emitters) = state.execute_implicit(msg)?; + Ok(FvmApplyRet { + apply_ret, + from: system::SYSTEM_ACTOR_ADDR, + to: BLOB_READER_ACTOR_ADDR, + method_num: CloseReadRequest as u64, + gas_limit, + emitters, + }) +} + +/// Creates a standard implicit message with default values +pub fn create_implicit_message( + to: Address, + method_num: u64, + params: RawBytes, + gas_limit: u64, +) -> Message { + Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to, + sequence: 0, + value: Default::default(), + method_num, + params, + gas_limit, + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + } +} + +/// Calls a function inside a state transaction. +pub fn with_state_transaction( + state: &mut FvmExecState>, + f: F, +) -> Result +where + F: FnOnce(&mut FvmExecState>) -> Result, + DB: Blockstore + Clone + 'static + Send + Sync, +{ + state.state_tree_mut().begin_transaction(); + let result = f(state); + state + .state_tree_mut() + .end_transaction(true) + .expect("interpreter failed to end state transaction"); + result +} diff --git a/fendermint/vm/interpreter/src/fvm/state/exec.rs b/fendermint/vm/interpreter/src/fvm/state/exec.rs index 7a6372ffa3..eae27b769c 100644 --- a/fendermint/vm/interpreter/src/fvm/state/exec.rs +++ b/fendermint/vm/interpreter/src/fvm/state/exec.rs @@ -18,17 +18,18 @@ use fendermint_vm_genesis::PowerScale; use fvm::{ call_manager::DefaultCallManager, engine::MultiEngine, - executor::{ApplyFailure, ApplyKind, ApplyRet, DefaultExecutor, Executor}, + executor::{ApplyFailure, ApplyKind, ApplyRet, Executor}, machine::{DefaultMachine, Machine, Manifest, NetworkConfig}, state_tree::StateTree, - DefaultKernel, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::{ address::Address, chainid::ChainID, clock::ChainEpoch, econ::TokenAmount, error::ExitCode, - message::Message, receipt::Receipt, version::NetworkVersion, ActorID, + message::Message, receipt::Receipt, version::NetworkVersion, ActorID, MethodNum, }; +use recall_executor::RecallExecutor; +use recall_kernel::RecallKernel; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::fmt; @@ -42,6 +43,33 @@ pub type ActorAddressMap = HashMap; /// The result of the message application bundled with any delegated addresses of event emitters. pub type ExecResult = anyhow::Result<(ApplyRet, ActorAddressMap)>; +/// The return value extended with some things from the message that +/// might not be available to the caller, because of the message lookups +/// and transformations that happen along the way, e.g. where we need +/// a field, we might just have a CID. +pub struct FvmApplyRet { + pub apply_ret: ApplyRet, + pub from: Address, + pub to: Address, + pub method_num: MethodNum, + pub gas_limit: u64, + /// Delegated addresses of event emitters, if they have one. + pub emitters: HashMap, +} + +impl From for crate::types::AppliedMessage { + fn from(ret: FvmApplyRet) -> Self { + Self { + apply_ret: ret.apply_ret, + from: ret.from, + to: ret.to, + method_num: ret.method_num, + gas_limit: ret.gas_limit, + emitters: ret.emitters, + } + } +} + /// Parts of the state which evolve during the lifetime of the chain. #[serde_as] #[derive(Serialize, Deserialize, Clone, Eq, PartialEq)] @@ -133,9 +161,8 @@ where DB: Blockstore + Clone + 'static, { #[allow(clippy::type_complexity)] - executor: DefaultExecutor< - DefaultKernel>>>, - >, + executor: + RecallExecutor>>>>, /// Hash of the block currently being executed. For queries and checks this is empty. /// /// The main motivation to add it here was to make it easier to pass in data to the @@ -186,7 +213,7 @@ where let engine = multi_engine.get(&nc)?; let externs = FendermintExterns::new(blockstore.clone(), params.state_root); let machine = DefaultMachine::new(&mc, blockstore.clone(), externs)?; - let mut executor = DefaultExecutor::new(engine.clone(), machine)?; + let mut executor = RecallExecutor::new(engine.clone(), machine)?; let block_gas_tracker = BlockGasTracker::create(&mut executor)?; let base_fee = block_gas_tracker.base_fee().clone(); @@ -291,8 +318,8 @@ where pub fn execute_with_executor(&mut self, exec_func: F) -> anyhow::Result where F: FnOnce( - &mut DefaultExecutor< - DefaultKernel>>>, + &mut RecallExecutor< + RecallKernel>>>, >, ) -> anyhow::Result, { diff --git a/fendermint/vm/interpreter/src/fvm/state/mod.rs b/fendermint/vm/interpreter/src/fvm/state/mod.rs index ba601f0a2e..5e398a788f 100644 --- a/fendermint/vm/interpreter/src/fvm/state/mod.rs +++ b/fendermint/vm/interpreter/src/fvm/state/mod.rs @@ -20,5 +20,7 @@ pub use query::FvmQueryState; use super::store::ReadOnlyBlockstore; +pub use exec::FvmApplyRet; + /// We use full state even for checking, to support certain client scenarios. pub type CheckStateRef = Arc>>>>; diff --git a/fendermint/vm/interpreter/src/genesis.rs b/fendermint/vm/interpreter/src/genesis.rs index eac39915cb..c2211b7b98 100644 --- a/fendermint/vm/interpreter/src/genesis.rs +++ b/fendermint/vm/interpreter/src/genesis.rs @@ -5,6 +5,7 @@ use std::collections::{BTreeSet, HashMap}; use std::io::{Cursor, Read, Write}; use std::marker::PhantomData; use std::path::{Path, PathBuf}; +use std::str::FromStr; use std::sync::Arc; use anyhow::{anyhow, Context}; @@ -18,8 +19,8 @@ use fendermint_eth_hardhat::{ContractSourceAndName, Hardhat, FQN}; use fendermint_vm_actor_interface::diamond::{EthContract, EthContractMap}; use fendermint_vm_actor_interface::eam::EthAddress; use fendermint_vm_actor_interface::{ - account, activity, burntfunds, chainmetadata, cron, eam, f3_light_client, gas_market, init, - ipc, reward, system, EMPTY_ARR, + account, activity, adm, blob_reader, blobs, burntfunds, chainmetadata, cron, eam, + f3_light_client, gas_market, init, ipc, recall_config, reward, system, EMPTY_ARR, }; use fendermint_vm_core::Timestamp; use fendermint_vm_genesis::{ActorMeta, Collateral, Genesis, Power, PowerScale, Validator}; @@ -302,14 +303,17 @@ impl<'a> GenesisBuilder<'a> { .context("failed to create system actor")?; // Init actor + // Add Blobs actor ID to eth_builtin_ids so its delegated address is registered + let mut eth_builtin_ids: BTreeSet<_> = + ipc_entrypoints.values().map(|c| c.actor_id).collect(); + eth_builtin_ids.insert(blobs::BLOBS_ACTOR_ID); + eth_builtin_ids.insert(adm::ADM_ACTOR_ID); + let (init_state, addr_to_id) = init::State::new( state.store(), genesis.chain_name.clone(), &genesis.accounts, - &ipc_entrypoints - .values() - .map(|c| c.actor_id) - .collect::>(), + ð_builtin_ids, all_ipc_contracts.len() as u64, ) .context("failed to create init state")?; @@ -376,6 +380,34 @@ impl<'a> GenesisBuilder<'a> { ) .context("failed to create reward actor")?; + // ADM Address Manager (ADM) actor + let mut machine_codes = std::collections::HashMap::new(); + for machine_name in &["bucket", "timehub"] { + if let Some(cid) = state.custom_actor_manifest.code_by_name(machine_name) { + let kind = fendermint_actor_adm::Kind::from_str(machine_name) + .expect("failed to parse adm machine name"); + tracing::info!(machine_name, cid = cid.to_string(), "registered machine"); + machine_codes.insert(kind, *cid); + } + } + let adm_state = fendermint_actor_adm::State::new( + state.store(), + machine_codes, + fendermint_actor_adm::PermissionModeParams::Unrestricted, + )?; + let eth_addr = init::builtin_actor_eth_addr(adm::ADM_ACTOR_ID); + let f4_addr = fvm_shared::address::Address::from(eth_addr); + tracing::info!("!!!!!!!! SETUP adm ACTOR !!!!!!!!: {eth_addr}, {eth_addr:?}"); + state + .create_custom_actor( + fendermint_vm_actor_interface::adm::ADM_ACTOR_NAME, + adm::ADM_ACTOR_ID, + &adm_state, + TokenAmount::zero(), + Some(f4_addr), + ) + .context("failed to create adm actor")?; + // STAGE 1b: Then we initialize the in-repo custom actors. // Initialize the chain metadata actor which handles saving metadata about the chain @@ -394,6 +426,47 @@ impl<'a> GenesisBuilder<'a> { ) .context("failed to create chainmetadata actor")?; + // Initialize the recall config actor. + let recall_config_state = fendermint_actor_recall_config::State { + admin: None, + config: fendermint_actor_recall_config_shared::RecallConfig::default(), + }; + state + .create_custom_actor( + fendermint_actor_recall_config::ACTOR_NAME, + recall_config::RECALL_CONFIG_ACTOR_ID, + &recall_config_state, + TokenAmount::zero(), + None, + ) + .context("failed to create recall config actor")?; + + // Initialize the blob actor with delegated address for Ethereum/Solidity access. + let blobs_state = fendermint_actor_blobs::State::new(&state.store())?; + let blobs_eth_addr = init::builtin_actor_eth_addr(blobs::BLOBS_ACTOR_ID); + let blobs_f4_addr = fvm_shared::address::Address::from(blobs_eth_addr); + state + .create_custom_actor( + fendermint_actor_blobs::BLOBS_ACTOR_NAME, + blobs::BLOBS_ACTOR_ID, + &blobs_state, + TokenAmount::zero(), + Some(blobs_f4_addr), + ) + .context("failed to create blobs actor")?; + tracing::info!("!!!!!!!! SETUP BLOB ACTOR !!!!!!!!: {blobs_eth_addr}, {blobs_eth_addr:?}"); + + // Initialize the blob reader actor. + state + .create_custom_actor( + fendermint_actor_blob_reader::BLOB_READER_ACTOR_NAME, + blob_reader::BLOB_READER_ACTOR_ID, + &fendermint_actor_blob_reader::State::new(&state.store())?, + TokenAmount::zero(), + None, + ) + .context("failed to create blob reader actor")?; + let eam_state = fendermint_actor_eam::State::new( state.store(), PermissionModeParams::from(genesis.eam_permission_mode), @@ -409,6 +482,18 @@ impl<'a> GenesisBuilder<'a> { ) .context("failed to replace built in eam actor")?; + // Replace Init actor with our custom version that allows ADM to spawn actors + state + .replace_builtin_actor( + init::INIT_ACTOR_NAME, + init::INIT_ACTOR_ID, + fendermint_actor_init::IPC_INIT_ACTOR_NAME, + &init_state, + TokenAmount::zero(), + None, + ) + .context("failed to replace built in init actor")?; + // Currently hardcoded for now, once genesis V2 is implemented, should be taken // from genesis parameters. // diff --git a/fendermint/vm/iroh_resolver/Cargo.toml b/fendermint/vm/iroh_resolver/Cargo.toml new file mode 100644 index 0000000000..6bc15c73b5 --- /dev/null +++ b/fendermint/vm/iroh_resolver/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "fendermint_vm_iroh_resolver" +description = "Resolve iroh content in messages" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +anyhow = { workspace = true } +async-stm = { workspace = true } +hex = { workspace = true } +im = { workspace = true } +iroh = { workspace = true } +iroh-blobs = { workspace = true } +libp2p = { workspace = true } +prometheus = { workspace = true } +serde = { workspace = true } +tracing = { workspace = true } +tokio = { workspace = true } + +ipc-api = { path = "../../../ipc/api" } +ipc_ipld_resolver = { path = "../../../ipld/resolver" } +ipc-observability = { path = "../../../ipc/observability" } + +fendermint_vm_topdown = { path = "../topdown" } + +[dev-dependencies] +rand = { workspace = true } +tokio = { workspace = true } diff --git a/fendermint/vm/iroh_resolver/src/iroh.rs b/fendermint/vm/iroh_resolver/src/iroh.rs new file mode 100644 index 0000000000..ea3ebfec13 --- /dev/null +++ b/fendermint/vm/iroh_resolver/src/iroh.rs @@ -0,0 +1,294 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::time::Duration; + +use crate::observe::{ + BlobsFinalityVotingFailure, BlobsFinalityVotingSuccess, ReadRequestsCloseVoting, +}; +use async_stm::{atomically, atomically_or_err, queues::TQueueLike}; +use fendermint_vm_topdown::voting::VoteTally; +use ipc_api::subnet_id::SubnetID; +use ipc_ipld_resolver::{Client, ResolverIroh, ResolverIrohReadRequest, ValidatorKey, VoteRecord}; +use ipc_observability::emit; + +use iroh_blobs::Hash; +use libp2p::identity::Keypair; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use crate::pool::{ResolveKey, ResolveQueue, ResolveResults, ResolveTask, TaskType}; + +/// The iroh Resolver takes resolution tasks from the [ResolvePool] and +/// uses the [ipc_ipld_resolver] to fetch the content from the local iroh node. +pub struct IrohResolver { + client: Client, + queue: ResolveQueue, + retry_delay: Duration, + vote_tally: VoteTally, + key: Keypair, + subnet_id: SubnetID, + to_vote: fn(Hash, bool) -> V, + results: ResolveResults, +} + +impl IrohResolver +where + V: Clone + Send + Sync + Serialize + DeserializeOwned + 'static, +{ + #[allow(clippy::too_many_arguments)] + pub fn new( + client: Client, + queue: ResolveQueue, + retry_delay: Duration, + vote_tally: VoteTally, + key: Keypair, + subnet_id: SubnetID, + to_vote: fn(Hash, bool) -> V, + results: ResolveResults, + ) -> Self { + Self { + client, + queue, + retry_delay, + vote_tally, + key, + subnet_id, + to_vote, + results, + } + } + + /// Start taking tasks from the resolver pool and resolving them using the iroh Resolver. + pub async fn run(self) { + loop { + let task = atomically(|| { + let task = self.queue.read()?; + Ok(task) + }) + .await; + + start_resolve( + task, + self.client.clone(), + self.queue.clone(), + self.retry_delay, + self.vote_tally.clone(), + self.key.clone(), + self.subnet_id.clone(), + self.to_vote, + self.results.clone(), + ); + } + } +} + +/// Run task resolution in the background, so as not to block items from other +/// subnets being tried. +#[allow(clippy::too_many_arguments)] +fn start_resolve( + task: ResolveTask, + client: Client, + queue: ResolveQueue, + retry_delay: Duration, + vote_tally: VoteTally, + key: Keypair, + subnet_id: SubnetID, + to_vote: fn(Hash, bool) -> V, + results: ResolveResults, +) where + V: Clone + Send + Sync + Serialize + DeserializeOwned + 'static, +{ + tokio::spawn(async move { + println!("starting iroh blob resolve: {:?}", task.hash()); + match task.task_type() { + TaskType::ResolveBlob { source, size } => { + match client + .resolve_iroh(task.hash(), size, source.id.into()) + .await + { + Ok(Ok(())) => { + tracing::debug!(hash = %task.hash(), "iroh blob resolved"); + atomically(|| task.set_resolved()).await; + if add_own_vote( + task.hash(), + client, + vote_tally, + key, + subnet_id, + true, + to_vote, + ) + .await + { + emit(BlobsFinalityVotingSuccess { + blob_hash: Some(task.hash().to_string()), + }); + } + } + Err(e) | Ok(Err(e)) => { + tracing::error!( + hash = %task.hash(), + error = e.to_string(), + "iroh blob resolution failed, attempting retry" + ); + // If we fail to re-enqueue the task, cast a "failure" vote. + // And emit a failure event. + if !reenqueue(task.clone(), queue, retry_delay).await + && add_own_vote( + task.hash(), + client, + vote_tally, + key, + subnet_id, + false, + to_vote, + ) + .await + { + emit(BlobsFinalityVotingFailure { + blob_hash: Some(task.hash().to_string()), + }); + } + } + }; + } + TaskType::CloseReadRequest { + blob_hash, + offset, + len, + } => { + match client.close_read_request(blob_hash, offset, len).await { + Ok(Ok(response)) => { + let hash = task.hash(); + tracing::debug!(hash = %hash, "iroh read request resolved"); + + atomically(|| task.set_resolved()).await; + atomically(|| { + results.update(|mut results| { + results.insert(ResolveKey { hash }, response.to_vec()); + results + }) + }) + .await; + + // Extend task hash with response data to use as the vote hash. + // This ensures that the all validators are voting + // on the same response from IROH. + let mut task_id = task.hash().as_bytes().to_vec(); + task_id.extend(response.to_vec()); + let vote_hash = Hash::new(task_id); + if add_own_vote( + vote_hash, client, vote_tally, key, subnet_id, true, to_vote, + ) + .await + { + emit(ReadRequestsCloseVoting { + read_request_id: Some(vote_hash.to_string()), + }); + } + } + Err(e) | Ok(Err(e)) => { + tracing::error!( + hash = %task.hash(), + error = e.to_string(), + "iroh read request failed" + ); + if !reenqueue(task.clone(), queue, retry_delay).await { + tracing::error!( + hash = %task.hash(), + "failed to re-enqueue read request" + ); + } + } + }; + } + }; + }); +} + +async fn add_own_vote( + vote_hash: Hash, + client: Client, + vote_tally: VoteTally, + key: Keypair, + subnet_id: SubnetID, + resolved: bool, + to_vote: fn(Hash, bool) -> V, +) -> bool +where + V: Clone + Send + Sync + Serialize + DeserializeOwned + 'static, +{ + let vote = to_vote(vote_hash, resolved); + match VoteRecord::signed(&key, subnet_id, vote) { + Ok(vote) => { + let validator_key = ValidatorKey::from(key.public()); + let res = atomically_or_err(|| { + vote_tally.add_blob_vote( + validator_key.clone(), + vote_hash.as_bytes().to_vec(), + resolved, + ) + }) + .await; + + match res { + Ok(added) => { + if added { + // Send our own vote to peers + if let Err(e) = client.publish_vote(vote) { + tracing::error!(error = e.to_string(), "failed to publish vote"); + return false; + } + } + true + } + Err(e) => { + tracing::error!(error = e.to_string(), "failed to handle own vote"); + false + } + } + } + Err(e) => { + tracing::error!(error = e.to_string(), "failed to sign vote"); + false + } + } +} + +async fn reenqueue(task: ResolveTask, queue: ResolveQueue, retry_delay: Duration) -> bool { + if atomically(|| task.add_attempt()).await { + tracing::error!( + hash = %task.hash(), + "iroh task failed; retrying later" + ); + schedule_retry(task, queue, retry_delay).await; + true + } else { + tracing::error!( + hash = %task.hash(), + "iroh task failed; no attempts remaining" + ); + atomically(|| task.add_failure()).await; + false + } +} + +/// Part of error handling. +/// +/// In our case, we added the task from transaction processing, +/// which will not happen again, so there is no point further +/// propagating this error back to the sender to deal with. +/// Rather, we should retry until we can conclude whether it will +/// ever complete. Some errors raised by the service are transitive, +/// such as having no peers currently, but that might change. +/// +/// For now, let's retry the same task later. +async fn schedule_retry(task: ResolveTask, queue: ResolveQueue, retry_delay: Duration) { + tokio::spawn(async move { + tokio::time::sleep(retry_delay).await; + tracing::debug!(hash = %task.hash(), "retrying iroh task after sleep"); + atomically(|| queue.write(task.clone())).await; + }); +} diff --git a/fendermint/vm/iroh_resolver/src/lib.rs b/fendermint/vm/iroh_resolver/src/lib.rs new file mode 100644 index 0000000000..c08ab65321 --- /dev/null +++ b/fendermint/vm/iroh_resolver/src/lib.rs @@ -0,0 +1,7 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +pub mod iroh; +pub mod observe; +pub mod pool; diff --git a/fendermint/vm/iroh_resolver/src/observe.rs b/fendermint/vm/iroh_resolver/src/observe.rs new file mode 100644 index 0000000000..d3eeb15d2f --- /dev/null +++ b/fendermint/vm/iroh_resolver/src/observe.rs @@ -0,0 +1,172 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use ipc_observability::{ + impl_traceable, impl_traceables, lazy_static, register_metrics, Recordable, TraceLevel, + Traceable, +}; +use prometheus::{register_int_counter_vec, register_int_gauge, IntCounterVec, IntGauge, Registry}; + +register_metrics! { + BLOBS_FINALITY_VOTING_SUCCESS: IntCounterVec + = register_int_counter_vec!( + "blobs_finality_voting_success", + "Blobs finality: number of votes for successful blob resolution", + &["blob_hash"] + ); + BLOBS_FINALITY_VOTING_FAILURE: IntCounterVec + = register_int_counter_vec!( + "blobs_finality_voting_failure", + "Blobs finality: number of votes for failed blob resolution", + &["blob_hash"] + ); + BLOBS_FINALITY_PENDING_BLOBS: IntGauge + = register_int_gauge!( + "blobs_finality_pending_blobs", + "Blobs finality: current count of pending blobs" + ); + BLOBS_FINALITY_PENDING_BYTES: IntGauge + = register_int_gauge!("blobs_finality_pending_bytes", "Blobs finality: current count of pending bytes"); + + BLOBS_FINALITY_ADDED_BLOBS: IntGauge + = register_int_gauge!("blobs_finality_added_blobs", "Blobs finality: current count of added blobs"); + + BLOBS_FINALITY_ADDED_BYTES: IntGauge + = register_int_gauge!("blobs_finality_added_bytes", "Blobs finality: current count of added bytes"); + + READ_REQUESTS_VOTING_CLOSE: IntCounterVec + = register_int_counter_vec!( + "read_requests_voting_close", + "Read requests finality: number of votes for closing read request", + &["read_request_id"] + ); +} + +impl_traceables!( + TraceLevel::Debug, + "IrohResolver", + BlobsFinalityVotingFailure, + BlobsFinalityVotingSuccess, + BlobsFinalityPendingBlobs, + BlobsFinalityPendingBytes, + BlobsFinalityAddedBlobs, + BlobsFinalityAddedBytes, + ReadRequestsCloseVoting +); + +#[derive(Debug)] +pub struct BlobsFinalityVotingSuccess { + pub blob_hash: Option, +} + +impl Recordable for BlobsFinalityVotingSuccess { + fn record_metrics(&self) { + let hash = self.blob_hash.as_deref().unwrap_or(""); + BLOBS_FINALITY_VOTING_SUCCESS + .with_label_values(&[hash]) + .inc(); + } +} + +#[derive(Debug)] +pub struct BlobsFinalityVotingFailure { + pub blob_hash: Option, +} + +impl Recordable for BlobsFinalityVotingFailure { + fn record_metrics(&self) { + let hash = self.blob_hash.as_deref().unwrap_or(""); + BLOBS_FINALITY_VOTING_FAILURE + .with_label_values(&[hash]) + .inc(); + } +} + +#[derive(Debug)] +pub struct BlobsFinalityPendingBlobs(pub u64); + +impl Recordable for BlobsFinalityPendingBlobs { + fn record_metrics(&self) { + BLOBS_FINALITY_PENDING_BLOBS.set(self.0 as i64); + } +} + +#[derive(Debug)] +pub struct BlobsFinalityPendingBytes(pub u64); + +impl Recordable for BlobsFinalityPendingBytes { + fn record_metrics(&self) { + BLOBS_FINALITY_PENDING_BYTES.set(self.0 as i64); + } +} + +#[derive(Debug)] +pub struct BlobsFinalityAddedBlobs(pub u64); + +impl Recordable for BlobsFinalityAddedBlobs { + fn record_metrics(&self) { + BLOBS_FINALITY_ADDED_BLOBS.set(self.0 as i64); + } +} + +#[derive(Debug)] +pub struct BlobsFinalityAddedBytes(pub u64); + +impl Recordable for BlobsFinalityAddedBytes { + fn record_metrics(&self) { + BLOBS_FINALITY_ADDED_BYTES.set(self.0 as i64); + } +} + +#[derive(Debug)] +pub struct ReadRequestsCloseVoting { + pub read_request_id: Option, +} + +impl Recordable for ReadRequestsCloseVoting { + fn record_metrics(&self) { + let id = self.read_request_id.as_deref().unwrap_or(""); + READ_REQUESTS_VOTING_CLOSE.with_label_values(&[id]).inc(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ipc_observability::emit; + + #[test] + fn test_metrics() { + let registry = Registry::new(); + register_metrics(®istry).unwrap(); + } + + #[test] + fn test_metric_increase() { + let registry = Registry::new(); + register_metrics(®istry).unwrap(); + + emit(BlobsFinalityPendingBlobs(1)); + emit(BlobsFinalityPendingBytes(1)); + emit(BlobsFinalityAddedBlobs(1)); + emit(BlobsFinalityAddedBytes(1)); + emit(ReadRequestsCloseVoting { + read_request_id: Some(String::from("id")), + }); + } + + #[test] + fn test_emit() { + emit(BlobsFinalityVotingSuccess { + blob_hash: Some(String::from("hash")), + }); + emit(BlobsFinalityVotingFailure { + blob_hash: Some(String::from("hash")), + }); + emit(BlobsFinalityPendingBlobs(1)); + emit(BlobsFinalityPendingBytes(1)); + emit(BlobsFinalityAddedBlobs(1)); + emit(BlobsFinalityAddedBytes(1)); + } +} diff --git a/fendermint/vm/iroh_resolver/src/pool.rs b/fendermint/vm/iroh_resolver/src/pool.rs new file mode 100644 index 0000000000..a723a758cd --- /dev/null +++ b/fendermint/vm/iroh_resolver/src/pool.rs @@ -0,0 +1,411 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use async_stm::{ + queues::{tchan::TChan, TQueueLike}, + Stm, TVar, +}; +use iroh::NodeId; +use iroh_blobs::Hash; + +/// The maximum number of times a task can be attempted. +/// TODO: make configurable +const MAX_RESOLVE_ATTEMPTS: u64 = 3; + +/// Hashes we need to resolve. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ResolveKey { + pub hash: Hash, +} + +/// Hashes we need to resolve. +#[derive(Debug, Copy, Clone)] +pub struct ResolveSource { + pub id: NodeId, +} + +/// Ongoing status of a resolution. +/// +/// The status also keeps track of which original items mapped to the same resolution key. +/// Once resolved, they all become available at the same time. +/// TODO: include failure mechanism +#[derive(Clone)] +pub struct ResolveStatus { + /// The collection of items that all resolve to the same hash. + items: TVar>, + /// Indicate whether the content has been resolved. + is_resolved: TVar, + /// Counter added to by items if they fail. + num_failures: TVar, +} + +impl ResolveStatus +where + T: Clone + std::hash::Hash + Eq + PartialEq + Sync + Send + 'static, +{ + pub fn new(item: T) -> Self { + let mut items = im::HashSet::new(); + items.insert(item); + Self { + is_resolved: TVar::new(false), + num_failures: TVar::new(0), + items: TVar::new(items), + } + } + + pub fn is_resolved(&self) -> Stm { + self.is_resolved.read_clone() + } + + pub fn is_failed(&self) -> Stm { + let num_failures = self.num_failures.read_clone()?; + let num_tasks = self.items.read_clone()?.len() as u64; + Ok(num_failures == num_tasks) + } +} + +/// Tasks emitted by the pool for background resolution. +#[derive(Clone)] +pub struct ResolveTask { + /// Content to resolve. + key: ResolveKey, + /// Flag to flip when the task is done. + is_resolved: TVar, + /// Current number of resolve attempts. + num_attempts: TVar, + /// Counter to add to if all attempts are used. + num_failures: TVar, + /// Type of task + task_type: TaskType, +} + +#[derive(Clone, Debug)] +pub enum TaskType { + ResolveBlob { + source: ResolveSource, + size: u64, + }, + CloseReadRequest { + blob_hash: Hash, + offset: u32, + len: u32, + }, +} + +impl ResolveTask { + pub fn hash(&self) -> Hash { + self.key.hash + } + + pub fn set_resolved(&self) -> Stm<()> { + self.is_resolved.write(true) + } + + /// Adds an attempt and return whether a retry is available. + pub fn add_attempt(&self) -> Stm { + let attempts = self.num_attempts.modify(|mut a| { + a += 1; + (a, a) + })?; + Ok(attempts < MAX_RESOLVE_ATTEMPTS) + } + + /// Increments failures on the parent status. + pub fn add_failure(&self) -> Stm<()> { + self.num_failures.update(|a| a + 1) + } + + pub fn task_type(&self) -> TaskType { + self.task_type.clone() + } +} + +pub type ResolveQueue = TChan; +pub type ResolveResults = TVar>>; + +/// A data structure used to communicate resolution requirements and outcomes +/// between the resolver running in the background and the application waiting +/// for the results. +/// +/// It is designed to resolve a single hash, per item, +/// with the possibility of multiple items mapping to the same hash. +#[derive(Clone, Default)] +pub struct ResolvePool +where + T: Clone + Sync + Send + 'static, +{ + /// The resolution status of each item. + items: TVar>>, + /// Items queued for resolution. + queue: ResolveQueue, + /// Results of resolved items. + results: ResolveResults, +} + +impl ResolvePool +where + for<'a> ResolveKey: From<&'a T>, + for<'a> TaskType: From<&'a T>, + T: Sync + Send + Clone + std::hash::Hash + Eq + PartialEq + 'static, +{ + pub fn new() -> Self { + Self { + items: Default::default(), + queue: Default::default(), + results: Default::default(), + } + } + + /// Queue to consume for task items. + /// + /// Exposed as-is to allow re-queueing items. + pub fn queue(&self) -> ResolveQueue { + self.queue.clone() + } + + /// Results of resolved items. + pub fn results(&self) -> ResolveResults { + self.results.clone() + } + + /// Add an item to the resolution targets. + /// + /// If the item is new, enqueue it from background resolution, otherwise return its existing status. + pub fn add(&self, item: T) -> Stm> { + let key = ResolveKey::from(&item); + let task_type = TaskType::from(&item); + let mut items = self.items.read_clone()?; + + if items.contains_key(&key) { + let status = items.get(&key).cloned().unwrap(); + status.items.update_mut(|items| { + items.insert(item); + })?; + Ok(status) + } else { + let status = ResolveStatus::new(item); + items.insert(key, status.clone()); + self.items.write(items)?; + self.queue.write(ResolveTask { + key, + is_resolved: status.is_resolved.clone(), + num_attempts: TVar::new(0), + num_failures: status.num_failures.clone(), + task_type, + })?; + Ok(status) + } + } + + /// Return the status of an item. It can be queried for completion. + pub fn get_status(&self, item: &T) -> Stm>> { + let key = ResolveKey::from(item); + Ok(self.items.read()?.get(&key).cloned()) + } + + /// Collect total item count and resolved and failed items, ready for execution. + /// + /// The items collected are not removed, in case they need to be proposed again. + pub fn collect(&self) -> Stm<(usize, HashSet)> { + let mut count = 0; + let mut done = HashSet::new(); + let items = self.items.read()?; + for item in items.values() { + let item_items = item.items.read()?; + count += item_items.len(); + if item.is_resolved()? || item.is_failed()? { + done.extend(item_items.iter().cloned()); + } + } + Ok((count, done)) + } + + /// Count all items and resolved and failed items. + pub fn collect_counts(&self) -> Stm<(usize, usize)> { + let mut count = 0; + let mut done_count = 0; + let items = self.items.read()?; + for item in items.values() { + let item_items_count = item.items.read()?.len(); + count += item_items_count; + if item.is_resolved()? || item.is_failed()? { + done_count += item_items_count; + } + } + Ok((count, done_count)) + } + + /// Return capacity from the limit, not including done items. + pub fn get_capacity(&self, limit: usize) -> Stm { + self.collect_counts() + .map(|(count, done_count)| limit.saturating_sub(count - done_count)) + } + + /// Remove an item from the resolution targets. + pub fn remove_task(&self, item: &T) -> Stm<()> { + let key = ResolveKey::from(item); + self.items.update_mut(|items| { + items.remove(&key); + }) + } + + /// Get the result of a resolved item. + pub fn get_result(&self, item: &T) -> Stm>> { + let key = ResolveKey::from(item); + self.results + .read() + .map(|results| results.get(&key).cloned()) + } + + /// Remove the result of a resolved item. + pub fn remove_result(&self, item: &T) -> Stm<()> { + let key = ResolveKey::from(item); + self.results.update(|mut results| { + results.remove(&key); + results + }) + } +} + +#[cfg(test)] +mod tests { + use super::{ResolveKey, ResolvePool, ResolveSource, TaskType}; + + use async_stm::{atomically, queues::TQueueLike}; + use iroh::{NodeId, SecretKey}; + use iroh_blobs::Hash; + use rand::Rng; + + #[derive(Clone, Hash, Eq, PartialEq, Debug)] + struct TestItem { + hash: Hash, + source: NodeId, + size: u64, + } + + impl TestItem { + pub fn dummy() -> Self { + let mut rng = rand::thread_rng(); + let mut data = [0u8; 256]; + rng.fill(&mut data); + let hash = Hash::new(data); + + let source = SecretKey::generate(&mut rng).public(); + Self { + hash, + source, + size: 256, + } + } + } + + impl From<&TestItem> for ResolveKey { + fn from(value: &TestItem) -> Self { + Self { hash: value.hash } + } + } + + impl From<&TestItem> for TaskType { + fn from(value: &TestItem) -> Self { + Self::ResolveBlob { + source: ResolveSource { id: value.source }, + size: value.size, + } + } + } + + #[tokio::test] + async fn add_new_item() { + let pool = ResolvePool::new(); + let item = TestItem::dummy(); + + atomically(|| pool.add(item.clone())).await; + atomically(|| { + assert!(pool.get_status(&item)?.is_some()); + assert!(!pool.queue.is_empty()?); + assert_eq!(pool.queue.read()?.key, ResolveKey::from(&item)); + Ok(()) + }) + .await; + } + + #[tokio::test] + async fn add_existing_item() { + let pool = ResolvePool::new(); + let item = TestItem::dummy(); + + // Add once. + atomically(|| pool.add(item.clone())).await; + + // Consume it from the queue. + atomically(|| { + assert!(!pool.queue.is_empty()?); + let _ = pool.queue.read()?; + Ok(()) + }) + .await; + + // Add again. + atomically(|| pool.add(item.clone())).await; + + // Should not be queued a second time. + atomically(|| { + let status = pool.get_status(&item)?; + assert!(status.is_some()); + assert!(pool.queue.is_empty()?); + Ok(()) + }) + .await; + } + + #[tokio::test] + async fn get_status() { + let pool = ResolvePool::new(); + let item = TestItem::dummy(); + + let status1 = atomically(|| pool.add(item.clone())).await; + let status2 = atomically(|| pool.get_status(&item)) + .await + .expect("status exists"); + + // Complete the item. + atomically(|| { + assert!(!pool.queue.is_empty()?); + let task = pool.queue.read()?; + task.is_resolved.write(true) + }) + .await; + + // Check status. + atomically(|| { + assert!(status1.items.read()?.contains(&item)); + assert!(status1.is_resolved()?); + assert!(status2.is_resolved()?); + Ok(()) + }) + .await; + } + + #[tokio::test] + async fn collect_resolved() { + let pool = ResolvePool::new(); + let item = TestItem::dummy(); + + atomically(|| { + let status = pool.add(item.clone())?; + status.is_resolved.write(true)?; + + let (count1, resolved1) = pool.collect()?; + let (count2, resolved2) = pool.collect()?; + assert_eq!(count1, 1); + assert_eq!(count2, 1); + assert_eq!(resolved1, resolved2); + assert!(resolved1.contains(&item)); + Ok(()) + }) + .await; + } +} diff --git a/fendermint/vm/message/Cargo.toml b/fendermint/vm/message/Cargo.toml index 34459becbb..6371fd9276 100644 --- a/fendermint/vm/message/Cargo.toml +++ b/fendermint/vm/message/Cargo.toml @@ -17,6 +17,10 @@ serde_tuple = { workspace = true } serde_with = { workspace = true } num-traits = { workspace = true } +iroh-blobs = { workspace = true } +iroh-base = { workspace = true } +fendermint_actor_blobs_shared = { path = "../../actors/blobs/shared" } + arbitrary = { workspace = true, optional = true } quickcheck = { workspace = true, optional = true } rand = { workspace = true, optional = true } diff --git a/fendermint/vm/message/src/ipc.rs b/fendermint/vm/message/src/ipc.rs index 8f275a1c24..1e3fa6c6ea 100644 --- a/fendermint/vm/message/src/ipc.rs +++ b/fendermint/vm/message/src/ipc.rs @@ -1,7 +1,10 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use fvm_shared::clock::ChainEpoch; +use fendermint_actor_blobs_shared::blobs::SubscriptionId; +use fvm_shared::{address::Address, clock::ChainEpoch, MethodNum}; +use iroh_base::NodeId; +use iroh_blobs::Hash; use serde::{Deserialize, Serialize}; /// Messages involved in InterPlanetary Consensus. @@ -11,6 +14,12 @@ pub enum IpcMessage { /// A top-down checkpoint parent finality proposal. This proposal should contain the latest parent /// state that to be checked and voted by validators. TopDownExec(ParentFinality), + + /// Proposed by validators when a read request has been enqueued for resolution. + ReadRequestPending(PendingReadRequest), + + /// Proposed by validators when a read request has been closed. + ReadRequestClosed(ClosedReadRequest), } /// A proposal of the parent view that validators will be voting on. @@ -22,6 +31,70 @@ pub struct ParentFinality { pub block_hash: Vec, } +/// A blob resolution target that the validators will be voting on. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct FinalizedBlob { + /// The address that requested the blob. + pub subscriber: Address, + /// The blake3 hash of the blob. + pub hash: Hash, + /// The size of the blob. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// The node ID of the source node serving validators the blob. + pub source: NodeId, + /// Whether the blob was resolved or failed. + pub succeeded: bool, +} + +/// A blob that has been added but not yet queued for resolution. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct PendingBlob { + /// The address that requested the blob. + pub subscriber: Address, + /// The blake3 hash of the blob. + pub hash: Hash, + /// The size of the blob. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// The node ID of the source node serving validators the blob. + pub source: NodeId, +} + +/// A read request that the validators will be voting on. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct ClosedReadRequest { + /// The request ID. + pub id: Hash, + /// The hash of the blob to read from. + pub blob_hash: Hash, + /// The offset in the blob to read from. + pub offset: u32, + /// The length of the read request. + pub len: u32, + /// The address and method to callback when the read request is closed. + pub callback: (Address, MethodNum), + /// The data read from the blob. + pub response: Vec, +} + +/// A read request that is pending resolution. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct PendingReadRequest { + /// The request ID. + pub id: Hash, + /// The hash of the blob to read from. + pub blob_hash: Hash, + /// The offset in the blob to read from. + pub offset: u32, + /// The length of the read request. + pub len: u32, + /// The address and method to callback when the read request is closed. + pub callback: (Address, MethodNum), +} + #[cfg(feature = "arb")] mod arb { diff --git a/fendermint/vm/topdown/Cargo.toml b/fendermint/vm/topdown/Cargo.toml index 47b176eb18..daecc8970a 100644 --- a/fendermint/vm/topdown/Cargo.toml +++ b/fendermint/vm/topdown/Cargo.toml @@ -21,6 +21,7 @@ ipc_actors_abis = { path = "../../../contract-bindings" } ipc_ipld_resolver = { path = "../../../ipld/resolver" } ipc-api = { path = "../../../ipc/api" } ipc-provider = { path = "../../../ipc/provider" } +iroh-blobs = { workspace = true } libp2p = { workspace = true } num-traits = { workspace = true } serde = { workspace = true } diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index cbf4ab2c12..ae56e98d69 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -31,6 +31,7 @@ pub use crate::toggle::Toggle; pub type BlockHeight = u64; pub type Bytes = Vec; pub type BlockHash = Bytes; +pub type Blob = Bytes; /// The null round error message pub(crate) const NULL_ROUND_ERR_MSG: &str = "requested epoch was a null round"; @@ -136,6 +137,47 @@ impl Display for IPCParentFinality { } } +/// The finality view for IPC blob resolution +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct IPCBlobFinality { + pub hash: iroh_blobs::Hash, + pub success: bool, +} + +impl IPCBlobFinality { + pub fn new(hash: iroh_blobs::Hash, success: bool) -> Self { + Self { hash, success } + } +} + +impl Display for IPCBlobFinality { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "IPCBlobFinality(hash: {}, success: {})", + self.hash, self.success + ) + } +} + +/// The finality view for IPC read request resolution +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct IPCReadRequestClosed { + pub hash: iroh_blobs::Hash, +} + +impl IPCReadRequestClosed { + pub fn new(hash: iroh_blobs::Hash) -> Self { + Self { hash } + } +} + +impl Display for IPCReadRequestClosed { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "IPCReadRequestClosed(hash: {})", self.hash) + } +} + #[async_trait] pub trait ParentViewProvider { /// Obtain the genesis epoch of the current subnet in the parent diff --git a/fendermint/vm/topdown/src/voting.rs b/fendermint/vm/topdown/src/voting.rs index 793c2ab243..711e0cd12f 100644 --- a/fendermint/vm/topdown/src/voting.rs +++ b/fendermint/vm/topdown/src/voting.rs @@ -11,7 +11,7 @@ use crate::observe::{ ParentFinalityCommitted, ParentFinalityPeerQuorumReached, ParentFinalityPeerVoteReceived, ParentFinalityPeerVoteSent, }; -use crate::{BlockHash, BlockHeight}; +use crate::{Blob, BlockHash, BlockHeight}; use ipc_observability::{emit, serde::HexEncodableBlockHash}; // Usign this type because it's `Hash`, unlike the normal `libsecp256k1::PublicKey`. @@ -44,15 +44,15 @@ pub enum Error = BlockHash> { /// so that we can ask for proposals that are not going to be voted /// down. #[derive(Clone)] -pub struct VoteTally { +pub struct VoteTally { /// Current validator weights. These are the ones who will vote on the blocks, - /// so these are the weights which need to form a quorum. + /// so these are the weights that need to form a quorum. power_table: TVar>, /// The *finalized mainchain* of the parent as observed by this node. /// /// These are assumed to be final because IIRC that's how the syncer works, - /// only fetching the info about blocks which are already sufficiently deep. + /// only fetching the info about blocks which are already deep enough. /// /// When we want to propose, all we have to do is walk back this chain and /// tally the votes we collected for the block hashes until we reach a quorum. @@ -60,7 +60,7 @@ pub struct VoteTally { /// The block hash is optional to allow for null blocks on Filecoin rootnet. chain: TVar>>, - /// Index votes received by height and hash, which makes it easy to look up + /// Index votes received by height and hash. This makes it easy to look up /// all the votes for a given block hash and also to verify that a validator /// isn't equivocating by trying to vote for two different things at the /// same height. @@ -69,14 +69,22 @@ pub struct VoteTally { /// Adding votes can be paused if we observe that looking for a quorum takes too long /// and is often retried due to votes being added. pause_votes: TVar, + + /// Index votes received by blob. + blob_votes: TVar>>, + + /// Adding votes can be paused if we observe that looking for a quorum takes too long + /// and is often retried due to votes being added. + pause_blob_votes: TVar, } -impl VoteTally +impl VoteTally where K: Clone + Hash + Eq + Sync + Send + 'static + Debug + Display, V: AsRef<[u8]> + Clone + Hash + Eq + Sync + Send + 'static, + O: AsRef<[u8]> + Clone + Hash + Eq + Sync + Send + 'static, { - /// Create an uninitialized instance. Before blocks can be added to it + /// Create an uninitialized instance. Before blocks can be added to it, /// we will have to set the last finalized block. /// /// The reason this exists is so that we can delay initialization until @@ -87,6 +95,8 @@ where chain: TVar::default(), votes: TVar::default(), pause_votes: TVar::new(false), + blob_votes: TVar::default(), + pause_blob_votes: TVar::new(false), } } @@ -99,13 +109,20 @@ where chain: TVar::new(im::OrdMap::from_iter([(height, Some(hash))])), votes: TVar::default(), pause_votes: TVar::new(false), + blob_votes: TVar::default(), + pause_blob_votes: TVar::new(false), } } /// Check that a validator key is currently part of the power table. pub fn has_power(&self, validator_key: &K) -> Stm { let pt = self.power_table.read()?; - // For consistency consider validators without power unknown. + // If the power table is empty, we're in a parentless subnet without a topdown view. + // This kind of setup is only useful for local dev / testing. + if pt.is_empty() { + return Ok(true); + } + // For consistency, consider validators without power unknown. match pt.get(validator_key) { None => Ok(false), Some(weight) => Ok(*weight > 0), @@ -149,7 +166,7 @@ where /// /// Returns an error unless it's exactly the next expected height, /// so the caller has to call this in every epoch. If the parent - /// chain produced no blocks in that epoch then pass `None` to + /// chain produced no blocks in that epoch, then pass `None` to /// represent that null-round in the tally. pub fn add_block( &self, @@ -243,7 +260,8 @@ where self.pause_votes.write(true) } - /// Find a block on the (from our perspective) finalized chain that gathered enough votes from validators. + /// Find a block on the (from our perspective) finalized chain that gathered enough votes from + /// validators. pub fn find_quorum(&self) -> Stm> { self.pause_votes.write(false)?; @@ -311,7 +329,7 @@ where /// Call when a new finalized block is added to the ledger, to clear out all preceding blocks. /// - /// After this operation the minimum item in the chain will the new finalized block. + /// After this operation the minimum item in the chain will be the new finalized block. pub fn set_finalized( &self, parent_block_height: BlockHeight, @@ -325,6 +343,8 @@ where chain })?; + // The votes' TVar will be updated such that the only key in the + // map of block heights to validator votes per block is the newest finalized block self.votes .update(|votes| votes.split(&parent_block_height).1)?; @@ -338,12 +358,126 @@ where Ok(()) } + /// When a blob is finalized in the parent, we can remove it from the blob votes tally. + /// Note: Ensure this is called with `atomically`. + pub fn clear_blob(&self, blob: O) -> Stm<()> { + self.blob_votes.update_mut(|votes| { + votes.remove(&blob); + })?; + Ok(()) + } + + /// Add a vote for a blob we received. + /// + /// Returns `true` if this vote was added, `false` if it was ignored as a duplicate, + /// and an error if it's an equivocation or from a validator we don't know. + pub fn add_blob_vote( + &self, + validator_key: K, + blob: O, + resolved: bool, + ) -> StmResult> { + if *self.pause_blob_votes.read()? { + retry()?; + } + + if !self.has_power(&validator_key)? { + return abort(Error::UnpoweredValidator(validator_key)); + } + + let mut votes = self.blob_votes.read_clone()?; + let votes_for_blob = votes.entry(blob).or_default(); + + if let Some(existing_vote) = votes_for_blob.get(&validator_key) { + if *existing_vote { + // A vote for "resolved" was already made, ignore later votes + return Ok(false); + } + } + votes_for_blob.insert(validator_key, resolved); + + self.blob_votes.write(votes)?; + + Ok(true) + } + + /// Pause adding more votes until we are finished calling `find_quorum` which + /// automatically re-enables them. + pub fn pause_blob_votes_until_find_quorum(&self) -> Stm<()> { + self.pause_blob_votes.write(true) + } + + /// Determine if a blob has (from our perspective) gathered enough votes from validators. + /// Returns two bools. The first indicates whether the blob has reached quorum, + /// and the second indicates if the quorum deems the blob resolved or failed. + pub fn find_blob_quorum(&self, blob: &O) -> Stm<(bool, bool)> { + self.pause_blob_votes.write(false)?; + + let votes = self.blob_votes.read()?; + let power_table = self.power_table.read()?; + + // If the power table is empty, we're in a parentless subnet without a topdown view. + // This kind of setup is only useful for local dev / testing. + // + // There's no way to know how many validators are voting, and therefore no way to calculate + // a quorum threshold. + // The best we can do is say that at least one vote (yea/nay) is necessary. + let quorum_threshold = if power_table.is_empty() { + 1 as Weight + } else { + self.quorum_threshold()? + }; + + let mut resolved_weight = 0; + let mut failed_weight = 0; + let mut voters = im::HashSet::new(); + + let Some(votes_for_blob) = votes.get(blob) else { + return Ok((false, false)); + }; + + for (vk, resolved) in votes_for_blob { + if voters.insert(vk.clone()).is_none() { + // New voter, get their current weight; it might be 0 if they have been removed. + let power = if power_table.is_empty() { + 1 + } else { + power_table.get(vk).cloned().unwrap_or_default() + }; + + tracing::debug!("voter; key={}, power={}", vk.to_string(), power); + + if *resolved { + resolved_weight += power; + } else { + failed_weight += power; + } + } + } + + tracing::debug!( + resolved_weight, + failed_weight, + quorum_threshold, + "blob quorum; votes={}", + votes_for_blob.len() + ); + + if resolved_weight >= quorum_threshold { + Ok((true, true)) + } else if failed_weight >= quorum_threshold { + Ok((true, false)) + } else { + Ok((false, false)) + } + } + /// Overwrite the power table after it has changed to a new snapshot. /// /// This method expects absolute values, it completely replaces the existing powers. pub fn set_power_table(&self, power_table: Vec<(K, Weight)>) -> Stm<()> { let power_table = im::HashMap::from_iter(power_table); - // We don't actually have to remove the votes of anyone who is no longer a validator, + // We don't have to remove the votes of anyone who is no longer a validator, // we just have to make sure to handle the case when they are not in the power table. self.power_table.write(power_table) } @@ -355,7 +489,7 @@ where if power_updates.is_empty() { return Ok(()); } - // We don't actually have to remove the votes of anyone who is no longer a validator, + // We don't have to remove the votes of anyone who is no longer a validator, // we just have to make sure to handle the case when they are not in the power table. self.power_table.update_mut(|pt| { for (vk, w) in power_updates { @@ -468,10 +602,10 @@ pub async fn publish_vote_loop( } } - // Throttle vote gossiping at periods of fast syncing. For example if we create a subnet contract on Friday + // Throttle vote gossiping at periods of fast syncing. For example, if we create a subnet contract on Friday // and bring up a local testnet on Monday, all nodes would be ~7000 blocks behind a Lotus parent. CometBFT // would be in-sync, and they could rapidly try to gossip votes on previous heights. GossipSub might not like - // that, and we can just cast our votes every now and then to finalize multiple blocks. + // that, and we can just cast our votes now and then to finalize multiple blocks. vote_interval.tick().await; } diff --git a/infra/fendermint/scripts/genesis.toml b/infra/fendermint/scripts/genesis.toml index a182836be7..6ca95d7ea0 100644 --- a/infra/fendermint/scripts/genesis.toml +++ b/infra/fendermint/scripts/genesis.toml @@ -1,6 +1,7 @@ [tasks.genesis-new] extend = "fendermint-tool" -env = { "CMD" = "genesis --genesis-file /data/genesis.json new --chain-name ${NETWORK_NAME} --base-fee ${BASE_FEE} --timestamp ${TIMESTAMP} --power-scale ${POWER_SCALE}" } +# Use placeholder Ethereum address for IPC contracts owner (can be changed later) +env = { "CMD" = "genesis --genesis-file /data/genesis.json new --chain-name ${NETWORK_NAME} --base-fee ${BASE_FEE} --timestamp ${TIMESTAMP} --power-scale ${POWER_SCALE} --ipc-contracts-owner 0x0000000000000000000000000000000000000001" } ## Create the validator key pair ## Takes: diff --git a/ipc-decentralized-storage/Cargo.toml b/ipc-decentralized-storage/Cargo.toml new file mode 100644 index 0000000000..7c24671541 --- /dev/null +++ b/ipc-decentralized-storage/Cargo.toml @@ -0,0 +1,81 @@ +[package] +name = "ipc-decentralized-storage" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +thiserror.workspace = true +serde.workspace = true +serde_json.workspace = true +tokio.workspace = true +tracing.workspace = true +futures.workspace = true +futures-util.workspace = true +bytes.workspace = true + +# HTTP server dependencies +warp.workspace = true +hex.workspace = true +lazy_static.workspace = true +prometheus.workspace = true +prometheus_exporter.workspace = true +uuid.workspace = true +mime_guess.workspace = true +urlencoding.workspace = true + +# Entanglement dependencies +entangler.workspace = true +entangler_storage.workspace = true + +# HTTP client dependencies +reqwest = { version = "0.11", features = ["json"] } + +# CLI dependencies +clap = { workspace = true, features = ["derive"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } + +# Iroh dependencies for decentralized storage +iroh.workspace = true +iroh-base.workspace = true +iroh-blobs.workspace = true +iroh_manager = { path = "../recall/iroh_manager" } + +# Fendermint dependencies for RPC client +fendermint_rpc = { path = "../fendermint/rpc" } +fendermint_vm_message = { path = "../fendermint/vm/message" } +fendermint_vm_actor_interface = { path = "../fendermint/vm/actor_interface" } +fendermint_actor_blobs_shared = { path = "../fendermint/actors/blobs/shared" } +fendermint_actor_bucket = { path = "../fendermint/actors/bucket" } +fendermint_crypto = { path = "../fendermint/crypto" } + +# IPC dependencies for address parsing +ipc-api = { path = "../ipc/api" } +ethers.workspace = true + +# FVM dependencies +fvm_shared.workspace = true +fvm_ipld_encoding.workspace = true + +# Tendermint +tendermint-rpc.workspace = true + +# BLS signatures +bls-signatures = { version = "0.13.1", default-features = false, features = ["blst"] } +blake2b_simd.workspace = true +rand = "0.8" + +[[bin]] +name = "gateway" +path = "src/bin/gateway.rs" + +[[bin]] +name = "node" +path = "src/bin/node.rs" + +[dev-dependencies] +tokio = { workspace = true, features = ["test-util"] } +tempfile.workspace = true diff --git a/ipc-decentralized-storage/src/bin/gateway.rs b/ipc-decentralized-storage/src/bin/gateway.rs new file mode 100644 index 0000000000..abd76ae060 --- /dev/null +++ b/ipc-decentralized-storage/src/bin/gateway.rs @@ -0,0 +1,238 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! CLI for running the blob gateway with objects API + +use anyhow::{anyhow, Context, Result}; +use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use clap::Parser; +use fendermint_rpc::message::SignedMessageFactory; +use fendermint_rpc::QueryClient; +use fendermint_rpc::FendermintClient; +use fvm_shared::address::{set_current_network, Address, Network}; +use fvm_shared::chainid::ChainID; +use fendermint_vm_message::query::FvmQueryHeight; +use ipc_decentralized_storage::gateway::BlobGateway; +use ipc_decentralized_storage::gateway::objects_service; +use ipc_decentralized_storage::objects::ObjectsConfig; +use iroh_manager::IrohNode; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::path::PathBuf; +use std::time::Duration; +use tendermint_rpc::Url; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; + +#[derive(Parser, Debug)] +#[command(name = "gateway")] +#[command(about = "Run the blob gateway with objects API to query pending blobs and handle object uploads")] +struct Args { + /// Set the FVM Address Network: "mainnet" (f) or "testnet" (t) + #[arg(short, long, default_value = "testnet", env = "FM_NETWORK")] + network: String, + + /// Path to file containing the secp256k1 secret key in Base64 format (for signing transactions) + #[arg(long, env = "SECRET_KEY_FILE", required = true)] + secret_key_file: PathBuf, + + /// Path to file containing BLS private key in hex format (96 characters) + /// If not provided, a new key will be generated and saved to this path + #[arg(long, env = "BLS_KEY_FILE")] + bls_key_file: Option, + + /// Tendermint RPC URL + #[arg(short, long, default_value = "http://localhost:26657")] + rpc_url: Url, + + /// Number of pending blobs to fetch per query + #[arg(short, long, default_value = "10")] + batch_size: u32, + + /// Polling interval in seconds + #[arg(short = 'i', long, default_value = "5")] + poll_interval_secs: u64, + + // Objects service arguments + /// Enable objects HTTP API service + #[arg(long, default_value = "true")] + enable_objects: bool, + + /// Objects service listen address + #[arg(long, default_value = "127.0.0.1:8080", env = "OBJECTS_LISTEN_ADDR")] + objects_listen_addr: SocketAddr, + + /// Maximum object size in bytes (default 100MB) + #[arg(long, default_value = "104857600", env = "MAX_OBJECT_SIZE")] + max_object_size: u64, + + /// Path to Iroh data directory + #[arg(long, env = "IROH_PATH")] + iroh_path: PathBuf, + + /// Iroh IPv4 bind address + #[arg(long, env = "IROH_V4_ADDR")] + iroh_v4_addr: Option, + + /// Iroh IPv6 bind address + #[arg(long, env = "IROH_V6_ADDR")] + iroh_v6_addr: Option, +} + +/// Get the next sequence number (nonce) of an account. +async fn get_sequence(client: &impl QueryClient, addr: &Address) -> Result { + let state = client + .actor_state(addr, FvmQueryHeight::default()) + .await + .context("failed to get actor state")?; + + match state.value { + Some((_id, state)) => Ok(state.sequence), + None => Err(anyhow!("cannot find actor {addr}")), + } +} + +#[tokio::main] +async fn main() -> Result<()> { + // Initialize tracing + tracing_subscriber::registry() + .with(EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"))) + .with(tracing_subscriber::fmt::layer()) + .init(); + + let args = Args::parse(); + + // Set the network for address display (f for mainnet, t for testnet) + let network = match args.network.to_lowercase().as_str() { + "main" | "mainnet" | "f" => Network::Mainnet, + "test" | "testnet" | "t" => Network::Testnet, + _ => { + anyhow::bail!("Invalid network: {}. Use 'mainnet' or 'testnet'", args.network); + } + }; + set_current_network(network); + tracing::info!("Using network: {:?}", network); + + // Read secp256k1 secret key for signing transactions + tracing::info!( + "Reading secret key from: {}", + args.secret_key_file.display() + ); + let sk = SignedMessageFactory::read_secret_key(&args.secret_key_file) + .context("failed to read secret key")?; + + let pk = sk.public_key(); + // Use f1 address (secp256k1) for signing native FVM actor transactions + let from_addr = Address::new_secp256k1(&pk.serialize()).context("failed to create f1 address")?; + tracing::info!("Gateway sender address: {}", from_addr); + + // Parse or generate BLS private key if provided + let _bls_private_key = if let Some(key_file) = &args.bls_key_file { + if key_file.exists() { + tracing::info!("Reading BLS private key from: {}", key_file.display()); + let key_hex = std::fs::read_to_string(key_file) + .context("failed to read BLS private key file")? + .trim() + .to_string(); + + let key_bytes = hex::decode(&key_hex) + .context("failed to decode BLS private key hex string from file")?; + + let key = BlsPrivateKey::from_bytes(&key_bytes) + .map_err(|e| anyhow::anyhow!("failed to parse BLS private key: {:?}", e))?; + + tracing::info!("Loaded BLS private key successfully"); + tracing::info!("BLS Public key: {}", hex::encode(key.public_key().as_bytes())); + Some(key) + } else { + tracing::info!("BLS key file not found, generating a new BLS private key"); + let key = BlsPrivateKey::generate(&mut rand::thread_rng()); + let key_hex = hex::encode(key.as_bytes()); + + // Save the key to the file + std::fs::write(key_file, &key_hex) + .context("failed to write BLS private key to file")?; + + tracing::info!( + "Generated and saved new BLS private key to: {}", + key_file.display() + ); + tracing::info!("BLS Public key: {}", hex::encode(key.public_key().as_bytes())); + Some(key) + } + } else { + tracing::info!("No BLS private key file provided"); + None + }; + + tracing::info!("Starting blob gateway"); + tracing::info!("RPC URL: {}", args.rpc_url); + tracing::info!("Batch size: {}", args.batch_size); + tracing::info!("Poll interval: {}s", args.poll_interval_secs); + + // Start Iroh node for objects service + tracing::info!("Starting Iroh node at: {}", args.iroh_path.display()); + let iroh_node = IrohNode::persistent(args.iroh_v4_addr, args.iroh_v6_addr, &args.iroh_path) + .await + .context("failed to start Iroh node")?; + + let node_addr = iroh_node.endpoint().node_addr().await?; + tracing::info!("Iroh node started: {}", node_addr.node_id); + + // Start objects service if enabled (upload only) + if args.enable_objects { + let objects_config = ObjectsConfig { + listen_addr: args.objects_listen_addr, + tendermint_url: args.rpc_url.clone(), + max_object_size: args.max_object_size, + metrics_enabled: false, + metrics_listen: None, + }; + + // Use the gateway's own Iroh blobs client for uploads + let iroh_blobs = iroh_node.blobs_client().clone(); + + let _objects_handle = objects_service::start_objects_service( + objects_config, + iroh_node.clone(), + iroh_blobs, + ); + tracing::info!("Objects upload service started on {}", args.objects_listen_addr); + } + + // Create the Fendermint RPC client + let client = FendermintClient::new_http(args.rpc_url, None) + .context("failed to create Fendermint client")?; + + // Query the account nonce from the state + let sequence = get_sequence(&client, &from_addr) + .await + .context("failed to get account sequence")?; + + // Query the chain ID + let chain_id = client + .state_params(FvmQueryHeight::default()) + .await + .context("failed to get state params")? + .value + .chain_id; + + tracing::info!("Chain ID: {}", chain_id); + tracing::info!("Account sequence: {}", sequence); + + // Create signed message factory + let mf = SignedMessageFactory::new(sk, from_addr, sequence, ChainID::from(chain_id)); + + // Bind the client with the message factory for transaction signing + let bound_client = client.bind(mf); + + // Create the gateway with the bound client + let mut gateway = BlobGateway::new( + bound_client, + args.batch_size, + Duration::from_secs(args.poll_interval_secs), + ); + + // Run the gateway + gateway.run().await?; + + Ok(()) +} diff --git a/ipc-decentralized-storage/src/bin/node.rs b/ipc-decentralized-storage/src/bin/node.rs new file mode 100644 index 0000000000..a1a5f8854e --- /dev/null +++ b/ipc-decentralized-storage/src/bin/node.rs @@ -0,0 +1,585 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Binary for running a decentralized storage node + +use anyhow::{anyhow, Context, Result}; +use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use clap::{Parser, Subcommand}; +use ethers::types::Address as EthAddress; +use fendermint_actor_blobs_shared::method::Method; +use fendermint_actor_blobs_shared::operators::RegisterNodeOperatorParams; +use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; +use fendermint_rpc::message::{GasParams, SignedMessageFactory}; +use fendermint_rpc::tx::{TxClient, TxCommit}; +use fendermint_rpc::FendermintClient; +use fendermint_rpc::QueryClient; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::{set_current_network, Address, Network}; +use fvm_shared::chainid::ChainID; +use fvm_shared::econ::TokenAmount; +use ipc_decentralized_storage::node::{launch, NodeConfig}; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::path::PathBuf; +use std::str::FromStr; +use std::time::Duration; +use tendermint_rpc::Url; +use tracing::info; + +#[derive(Parser, Debug)] +#[command(name = "ipc-storage-node")] +#[command(about = "Decentralized storage node CLI", long_about = None)] +struct Cli { + /// Set the FVM Address Network: "mainnet" (f) or "testnet" (t) + #[arg(short, long, default_value = "testnet", env = "FM_NETWORK")] + network: String, + + #[command(subcommand)] + command: Commands, +} + +#[derive(Subcommand, Debug)] +enum Commands { + /// Run the storage node + Run(RunArgs), + /// Register as a node operator + RegisterOperator(RegisterOperatorArgs), + /// Generate a new BLS private key + GenerateBlsKey(GenerateBlsKeyArgs), + /// Query a blob by its hash + QueryBlob(QueryBlobArgs), + /// Query an object from a bucket by key + QueryObject(QueryObjectArgs), +} + +#[derive(Parser, Debug)] +struct RunArgs { + /// Path to file containing BLS private key in hex format (96 characters) + /// If not provided, a new key will be generated and saved to this path + #[arg(long, env = "BLS_KEY_FILE")] + secret_key_file: Option, + + /// Path to store Iroh data + #[arg(long, default_value = "./iroh_data")] + iroh_path: PathBuf, + + /// IPv4 bind address for Iroh (e.g., 0.0.0.0:11204) + #[arg(long)] + iroh_v4_addr: Option, + + /// IPv6 bind address for Iroh (e.g., [::]:11204) + #[arg(long)] + iroh_v6_addr: Option, + + /// Tendermint RPC URL + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + + /// Ethereum JSON-RPC URL (Fendermint ETH API endpoint) + #[arg(long, default_value = "http://localhost:8545")] + eth_rpc_url: String, + + /// Blobs actor address for event filtering (hex format with 0x prefix) + #[arg(long, default_value = "0xff00000000000000000000000000000000000064")] + blobs_actor_address: String, + + /// Number of blobs to fetch per query + #[arg(long, default_value = "10")] + batch_size: u32, + + /// Polling interval in seconds + #[arg(long, default_value = "5")] + poll_interval_secs: u64, + + /// Maximum concurrent blob downloads + #[arg(long, default_value = "10")] + max_concurrent_downloads: usize, + + /// Address to bind the RPC server for signature queries + #[arg(long, default_value = "127.0.0.1:8080")] + rpc_bind_addr: SocketAddr, +} + +#[derive(Parser, Debug)] +struct RegisterOperatorArgs { + /// Path to file containing BLS private key in hex format (96 characters) + #[arg(long, env = "BLS_KEY_FILE", required = true)] + bls_key_file: PathBuf, + + /// Path to file containing the secp256k1 secret key in Base64 format (for signing transactions) + #[arg(long, env = "SECRET_KEY_FILE", required = true)] + secret_key_file: PathBuf, + + /// RPC URL where this operator's node will be listening (e.g., http://my-node.example.com:8080) + #[arg(long, required = true)] + operator_rpc_url: String, + + /// Tendermint RPC URL for the chain + #[arg(long, default_value = "http://localhost:26657")] + chain_rpc_url: String, +} + +#[derive(Parser, Debug)] +struct GenerateBlsKeyArgs { + /// Path to save the generated BLS private key (hex format) + #[arg(long, short = 'o', default_value = "./bls_key.hex")] + output: PathBuf, + + /// Overwrite existing file if it exists + #[arg(long, short = 'f')] + force: bool, +} + +#[derive(Parser, Debug)] +struct QueryBlobArgs { + /// Blob hash to query (hex string, with or without 0x prefix) + #[arg(long, required = true)] + hash: String, + + /// Tendermint RPC URL for the chain + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + + /// Block height to query at (default: latest committed) + #[arg(long)] + height: Option, +} + +#[derive(Parser, Debug)] +struct QueryObjectArgs { + /// Bucket address (f-address or eth-address format) + #[arg(long, required = true)] + bucket: String, + + /// Object key/path within the bucket + #[arg(long, required = true)] + key: String, + + /// Tendermint RPC URL for the chain + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + + /// Block height to query at (default: latest committed) + #[arg(long)] + height: Option, +} + +#[tokio::main] +async fn main() -> Result<()> { + // Initialize tracing + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ) + .init(); + + let cli = Cli::parse(); + + // Set the network for address display (f for mainnet, t for testnet) + let network = match cli.network.to_lowercase().as_str() { + "main" | "mainnet" | "f" => Network::Mainnet, + "test" | "testnet" | "t" => Network::Testnet, + _ => { + anyhow::bail!("Invalid network: {}. Use 'mainnet' or 'testnet'", cli.network); + } + }; + set_current_network(network); + info!("Using network: {:?}", network); + + match cli.command { + Commands::Run(args) => run_node(args).await, + Commands::RegisterOperator(args) => register_operator(args).await, + Commands::GenerateBlsKey(args) => generate_bls_key(args), + Commands::QueryBlob(args) => query_blob(args).await, + Commands::QueryObject(args) => query_object(args).await, + } +} + +async fn run_node(args: RunArgs) -> Result<()> { + // Parse or generate BLS private key + let bls_private_key = if let Some(key_file) = &args.secret_key_file { + if key_file.exists() { + info!("Reading BLS private key from: {}", key_file.display()); + let key_hex = std::fs::read_to_string(key_file) + .context("failed to read BLS private key file")? + .trim() + .to_string(); + + let key_bytes = hex::decode(&key_hex) + .context("failed to decode BLS private key hex string from file")?; + + BlsPrivateKey::from_bytes(&key_bytes) + .map_err(|e| anyhow::anyhow!("failed to parse BLS private key: {:?}", e))? + } else { + info!("Key file not found, generating a new BLS private key"); + let key = BlsPrivateKey::generate(&mut rand::thread_rng()); + let key_hex = hex::encode(key.as_bytes()); + + // Save the key to the file + std::fs::write(key_file, &key_hex) + .context("failed to write BLS private key to file")?; + + info!( + "Generated and saved new BLS private key to: {}", + key_file.display() + ); + info!("Public key: {}", hex::encode(key.public_key().as_bytes())); + + key + } + } else { + info!( + "No private key file provided, generating a new temporary key (will not be persisted)" + ); + let key = BlsPrivateKey::generate(&mut rand::thread_rng()); + info!("Generated temporary BLS private key"); + info!("Public key: {}", hex::encode(key.public_key().as_bytes())); + info!("WARNING: This key will not be saved and will be lost when the node stops!"); + key + }; + + // Parse RPC URL + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + + // Parse blobs actor address + let blobs_actor_address: EthAddress = args + .blobs_actor_address + .parse() + .context("failed to parse blobs actor address")?; + + // Create node configuration + let config = NodeConfig { + iroh_path: args.iroh_path, + iroh_v4_addr: args.iroh_v4_addr, + iroh_v6_addr: args.iroh_v6_addr, + rpc_url, + eth_rpc_url: args.eth_rpc_url, + batch_size: args.batch_size, + poll_interval: Duration::from_secs(args.poll_interval_secs), + max_concurrent_downloads: args.max_concurrent_downloads, + bls_private_key, + rpc_bind_addr: args.rpc_bind_addr, + blobs_actor_address, + }; + + info!("Starting node with configuration: {:?}", config); + + // Launch the node + launch(config).await +} + +async fn register_operator(args: RegisterOperatorArgs) -> Result<()> { + info!("Registering as node operator"); + + // Read BLS private key + info!( + "Reading BLS private key from: {}", + args.bls_key_file.display() + ); + let key_hex = std::fs::read_to_string(&args.bls_key_file) + .context("failed to read BLS private key file")? + .trim() + .to_string(); + + let key_bytes = + hex::decode(&key_hex).context("failed to decode BLS private key hex string from file")?; + + let bls_private_key = BlsPrivateKey::from_bytes(&key_bytes) + .map_err(|e| anyhow::anyhow!("failed to parse BLS private key: {:?}", e))?; + + // Get BLS public key + let bls_pubkey = bls_private_key.public_key().as_bytes().to_vec(); + + info!("BLS public key: {}", hex::encode(&bls_pubkey)); + info!("Operator RPC URL: {}", args.operator_rpc_url); + + // Read secp256k1 secret key for signing + info!( + "Reading secret key from: {}", + args.secret_key_file.display() + ); + let sk = SignedMessageFactory::read_secret_key(&args.secret_key_file) + .context("failed to read secret key")?; + + let pk = sk.public_key(); + // Use f1 address (secp256k1) instead of f410 (delegated/ethereum) because we're calling + // a native FVM actor with CBOR params, not an EVM contract with calldata + let from_addr = Address::new_secp256k1(&pk.serialize()).context("failed to create f1 address")?; + info!("Sender address: {}", from_addr); + + // Parse chain RPC URL + let chain_rpc_url = + Url::from_str(&args.chain_rpc_url).context("failed to parse chain RPC URL")?; + + // Create Fendermint client + let client = FendermintClient::new_http(chain_rpc_url, None) + .context("failed to create Fendermint client")?; + + // Query the account nonce from the state + let sequence = get_sequence(&client, &from_addr) + .await + .context("failed to get account sequence")?; + + // Query the chain ID + let chain_id = client + .state_params(FvmQueryHeight::default()) + .await + .context("failed to get state params")? + .value + .chain_id; + + info!("Chain ID: {}", chain_id); + info!("Account sequence: {}", sequence); + + // Create signed message factory + let mf = SignedMessageFactory::new(sk, from_addr, sequence, ChainID::from(chain_id)); + + // Bind the client with the message factory + let mut client = client.bind(mf); + + // Prepare registration parameters + let params = RegisterNodeOperatorParams { + bls_pubkey: bls_pubkey.clone(), + rpc_url: args.operator_rpc_url.clone(), + }; + + let params_bytes = + RawBytes::serialize(params).context("failed to serialize RegisterNodeOperatorParams")?; + + // Gas params + let gas_params = GasParams { + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::from_atto(100), + gas_premium: TokenAmount::from_atto(100), + }; + + info!("Sending RegisterNodeOperator transaction..."); + + // Send the transaction + let res = TxClient::::transaction( + &mut client, + BLOBS_ACTOR_ADDR, + Method::RegisterNodeOperator as u64, + params_bytes, + TokenAmount::from_atto(0), + gas_params, + ) + .await + .context("failed to send RegisterNodeOperator transaction")?; + + if res.response.check_tx.code.is_err() { + anyhow::bail!( + "RegisterNodeOperator check_tx failed: {}", + res.response.check_tx.log + ); + } + + if res.response.deliver_tx.code.is_err() { + anyhow::bail!( + "RegisterNodeOperator deliver_tx failed: {}", + res.response.deliver_tx.log + ); + } + + info!("✓ Successfully registered as node operator!"); + info!( + " BLS Public key: {}", + hex::encode(bls_private_key.public_key().as_bytes()) + ); + info!(" RPC URL: {}", args.operator_rpc_url); + info!(" Tx hash: {}", res.response.hash); + + Ok(()) +} + +/// Get the next sequence number (nonce) of an account. +async fn get_sequence(client: &impl QueryClient, addr: &Address) -> Result { + let state = client + .actor_state(addr, FvmQueryHeight::default()) + .await + .context("failed to get actor state")?; + + match state.value { + Some((_id, state)) => Ok(state.sequence), + None => Err(anyhow!("cannot find actor {addr}")), + } +} + +/// Generate a new BLS private key and save it to a file. +fn generate_bls_key(args: GenerateBlsKeyArgs) -> Result<()> { + // Check if file already exists + if args.output.exists() && !args.force { + anyhow::bail!( + "File {} already exists. Use --force to overwrite.", + args.output.display() + ); + } + + info!("Generating new BLS private key..."); + + // Generate the key + let key = BlsPrivateKey::generate(&mut rand::thread_rng()); + let key_hex = hex::encode(key.as_bytes()); + let pubkey_hex = hex::encode(key.public_key().as_bytes()); + + // Save the key to the file + std::fs::write(&args.output, &key_hex).context("failed to write BLS private key to file")?; + + info!("✓ BLS private key generated successfully!"); + info!(" Private key saved to: {}", args.output.display()); + info!(" Public key: {}", pubkey_hex); + + Ok(()) +} + +/// Query a blob by its hash from the blobs actor. +async fn query_blob(args: QueryBlobArgs) -> Result<()> { + use fendermint_actor_blobs_shared::bytes::B256; + use fendermint_rpc::message::GasParams; + use fvm_shared::econ::TokenAmount; + + info!("Querying blob with hash: {}", args.hash); + + // Parse blob hash - strip 0x prefix if present + let blob_hash_hex = args.hash.strip_prefix("0x").unwrap_or(&args.hash); + + let blob_hash_bytes = hex::decode(blob_hash_hex) + .context("failed to decode blob hash hex string")?; + + if blob_hash_bytes.len() != 32 { + anyhow::bail!( + "blob hash must be 32 bytes, got {} bytes", + blob_hash_bytes.len() + ); + } + + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&blob_hash_bytes); + let blob_hash = B256(hash_array); + + // Parse RPC URL + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + + // Create Fendermint client + let mut client = FendermintClient::new_http(rpc_url, None) + .context("failed to create Fendermint client")?; + + // Set query height + let height = args + .height + .map(FvmQueryHeight::from) + .unwrap_or(FvmQueryHeight::Committed); + + // Gas params for the query call + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + // Query the blob + let maybe_blob = client + .blob_get_call(blob_hash, TokenAmount::default(), gas_params, height) + .await + .context("failed to query blob")?; + + match maybe_blob { + Some(blob) => { + println!("Blob found!"); + println!(" Hash: 0x{}", hex::encode(blob_hash.0)); + println!(" Size: {} bytes", blob.size); + println!(" Metadata hash: 0x{}", hex::encode(blob.metadata_hash.0)); + println!(" Status: {:?}", blob.status); + println!(" Subscribers: {}", blob.subscribers.len()); + + // Print subscriber details (subscription_id -> expiry epoch) + for (subscription_id, expiry) in &blob.subscribers { + println!(" - Subscription ID: {}", subscription_id); + println!(" Expiry epoch: {}", expiry); + } + } + None => { + println!("Blob not found with hash: 0x{}", hex::encode(blob_hash.0)); + } + } + + Ok(()) +} + +/// Query an object from a bucket by its key. +async fn query_object(args: QueryObjectArgs) -> Result<()> { + use fendermint_actor_bucket::GetParams; + use fendermint_rpc::message::GasParams; + use fvm_shared::address::{Error as NetworkError, Network}; + use fvm_shared::econ::TokenAmount; + use ipc_api::ethers_address_to_fil_address; + + info!("Querying object from bucket: {} with key: {}", args.bucket, args.key); + + // Parse bucket address (supports both f-address and eth-address formats) + let bucket_address = Network::Mainnet + .parse_address(&args.bucket) + .or_else(|e| match e { + NetworkError::UnknownNetwork => Network::Testnet.parse_address(&args.bucket), + _ => Err(e), + }) + .or_else(|_| { + let addr = ethers::types::Address::from_str(&args.bucket) + .context("failed to parse as eth address")?; + ethers_address_to_fil_address(&addr) + }) + .context("failed to parse bucket address")?; + + info!("Parsed bucket address: {}", bucket_address); + + // Parse RPC URL + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + + // Create Fendermint client + let mut client = FendermintClient::new_http(rpc_url, None) + .context("failed to create Fendermint client")?; + + // Set query height + let height = args + .height + .map(FvmQueryHeight::from) + .unwrap_or(FvmQueryHeight::Committed); + + // Gas params for the query call + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + // Query the object + let params = GetParams(args.key.as_bytes().to_vec()); + let maybe_object = client + .os_get_call(bucket_address, params, TokenAmount::default(), gas_params, height) + .await + .context("failed to query object")?; + + match maybe_object { + Some(object) => { + println!("Object found!"); + println!(" Key: {}", args.key); + println!(" Hash: 0x{}", hex::encode(object.hash.0)); + println!(" Recovery hash: 0x{}", hex::encode(object.recovery_hash.0)); + println!(" Size: {} bytes", object.size); + println!(" Expiry epoch: {}", object.expiry); + if !object.metadata.is_empty() { + println!(" Metadata:"); + for (key, value) in &object.metadata { + println!(" {}: {}", key, value); + } + } + } + None => { + println!("Object not found with key: {}", args.key); + } + } + + Ok(()) +} diff --git a/ipc-decentralized-storage/src/gateway/mod.rs b/ipc-decentralized-storage/src/gateway/mod.rs new file mode 100644 index 0000000000..117459b5e0 --- /dev/null +++ b/ipc-decentralized-storage/src/gateway/mod.rs @@ -0,0 +1,764 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Gateway module for querying pending blobs from the FVM blockchain +//! +//! This module provides a polling gateway that constantly queries the blobs actor +//! for pending blobs that need to be resolved. + +pub mod objects_service; + +use anyhow::{Context, Result}; +use bls_signatures::{aggregate, Serialize as BlsSerialize, Signature as BlsSignature}; +use fendermint_actor_blobs_shared::blobs::{ + BlobStatus, FinalizeBlobParams, GetAddedBlobsParams, SubscriptionId, +}; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_actor_blobs_shared::method::Method::{ + FinalizeBlob, GetActiveOperators, GetAddedBlobs, GetOperatorInfo, +}; +use fendermint_actor_blobs_shared::operators::{ + GetActiveOperatorsReturn, GetOperatorInfoParams, OperatorInfo, +}; +use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; +use fendermint_rpc::message::GasParams; +use fendermint_rpc::tx::{BoundClient, TxClient, TxCommit}; +use fendermint_vm_actor_interface::system; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::Address; +use fvm_shared::bigint::Zero; +use fvm_shared::econ::TokenAmount; +use fvm_shared::message::Message; +use std::collections::{HashMap, HashSet}; +use std::time::{Duration, Instant}; +use tokio::time::sleep; +use tracing::{debug, error, info, warn}; + +/// A blob item with its hash, size, and subscribers +/// Note: We use B256 for both hash and source to match the actor's return type exactly. +/// The actor returns BlobRequest = (B256, u64, HashSet<(Address, SubscriptionId, B256)>) +pub type BlobItem = (B256, u64, HashSet<(Address, SubscriptionId, B256)>); + +/// Cached operator information +struct OperatorCache { + /// List of active operator addresses in order (for bitmap indexing) + operators: Vec
, + /// Operator info by address (BLS pubkey, RPC URL) + operator_info: HashMap, + /// When this cache was last refreshed + last_refresh: Instant, +} + +impl OperatorCache { + fn new() -> Self { + Self { + operators: Vec::new(), + operator_info: HashMap::new(), + // Set to a time far in the past to force refresh on first use + last_refresh: Instant::now() - Duration::from_secs(3600), + } + } + + fn is_stale(&self, max_age: Duration) -> bool { + self.last_refresh.elapsed() > max_age + } +} + +/// Signature collection state for a single blob +struct BlobSignatureCollection { + /// When we first saw this blob + first_seen: Instant, + /// Number of collection attempts + retry_count: u32, + /// Signatures already collected: operator_index -> signature + collected_signatures: HashMap, + /// Operator indices we've already attempted (to avoid re-querying) + attempted_operators: HashSet, + /// Blob metadata needed for finalization + blob_metadata: BlobMetadata, +} + +/// Metadata about a blob needed for finalization +#[derive(Clone)] +pub struct BlobMetadata { + /// Subscriber address that requested the blob + subscriber: Address, + /// Blob size in bytes + size: u64, + /// Subscription ID + subscription_id: SubscriptionId, + /// Source Iroh node ID + source: B256, +} + +impl BlobSignatureCollection { + fn new(metadata: BlobMetadata) -> Self { + Self { + first_seen: Instant::now(), + retry_count: 0, + collected_signatures: HashMap::new(), + attempted_operators: HashSet::new(), + blob_metadata: metadata, + } + } +} + +/// Default gas parameters for transactions +fn default_gas_params() -> GasParams { + GasParams { + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::from_atto(100), + gas_premium: TokenAmount::from_atto(100), + } +} + +/// Gateway for polling added blobs from the chain +/// +/// Uses the fendermint RPC client to query the blobs actor for newly added blobs +/// and submit finalization transactions. +pub struct BlobGateway { + client: C, + /// How many added blobs to fetch per query + batch_size: u32, + /// Polling interval + poll_interval: Duration, + /// Cached operator data (refreshed periodically) + operator_cache: OperatorCache, + /// Track blobs awaiting signature collection and finalization + pending_finalization: HashMap, +} + +impl BlobGateway +where + C: fendermint_rpc::QueryClient + Send + Sync, +{ + /// Create a new blob gateway + pub fn new(client: C, batch_size: u32, poll_interval: Duration) -> Self { + Self { + client, + batch_size, + poll_interval, + operator_cache: OperatorCache::new(), + pending_finalization: HashMap::new(), + } + } + + /// Query added blobs from the chain once + pub async fn query_added_blobs(&self) -> Result> { + debug!("Querying added blobs (batch_size: {})", self.batch_size); + + // Create the query message to the blobs actor + let params = GetAddedBlobsParams(self.batch_size); + let params = + RawBytes::serialize(params).context("failed to serialize GetAddedBlobsParams")?; + + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: GetAddedBlobs as u64, + params, + gas_limit: 10_000_000_000, // High gas limit for read-only query + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + // Execute the query using the FendermintClient + let response = self + .client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute GetAddedBlobs call")?; + + if response.value.code.is_err() { + anyhow::bail!("GetAddedBlobs query failed: {}", response.value.info); + } + + // Decode the return data + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode response data")?; + + let blobs = fvm_ipld_encoding::from_slice::>(&return_data) + .context("failed to decode added blobs response")?; + + info!("Found {} added blobs", blobs.len()); + Ok(blobs) + } +} + +/// Implementation for transaction-capable clients (can submit finalization transactions) +impl BlobGateway +where + C: fendermint_rpc::QueryClient + BoundClient + TxClient + Send + Sync, +{ + /// Main entry point: run the gateway to monitor and finalize blobs + /// + /// This is an alias for run_signature_collection() + pub async fn run(&mut self) -> Result<()> { + self.run_signature_collection().await + } + + /// Main entry point: collect signatures and finalize blobs + /// + /// This monitors pending blobs, collects signatures from operators, + /// aggregates them, and calls finalize_blob on-chain. + pub async fn run_signature_collection(&mut self) -> Result<()> { + info!( + "Starting signature collection loop (interval: {:?})", + self.poll_interval + ); + + loop { + if let Err(e) = self.signature_collection_loop().await { + error!("Signature collection error: {}", e); + } + + sleep(self.poll_interval).await; + } + } + + async fn signature_collection_loop(&mut self) -> Result<()> { + debug!("Starting signature collection loop iteration"); + + // Step 1: Refresh operator cache if stale (every 5 minutes) + let cache_refresh_interval = Duration::from_secs(300); + let needs_refresh = self.operator_cache.is_stale(cache_refresh_interval); + debug!( + "Operator cache status: {} operators, stale: {}", + self.operator_cache.operators.len(), + needs_refresh + ); + + if needs_refresh { + info!("Refreshing operator cache..."); + match self.query_active_operators().await { + Ok(operators) => { + self.operator_cache.operators = operators.clone(); + self.operator_cache.operator_info.clear(); + + // Fetch operator info for each operator + for operator_addr in &operators { + match self.get_operator_info(*operator_addr).await { + Ok(info) => { + self.operator_cache + .operator_info + .insert(*operator_addr, info); + } + Err(e) => { + warn!("Failed to get info for operator {}: {}", operator_addr, e); + } + } + } + + self.operator_cache.last_refresh = Instant::now(); + info!("Operator cache refreshed: {} operators", operators.len()); + } + Err(e) => { + warn!("Failed to refresh operator cache: {}", e); + } + } + } + + // Step 2: Query added blobs and track them + match self.query_added_blobs().await { + Ok(added_blobs) => { + for (hash, size, sources) in added_blobs { + // Extract metadata from sources (pick first source) + if let Some((subscriber, subscription_id, source)) = sources.iter().next() { + // Skip if already tracked + if self.pending_finalization.contains_key(&hash) { + continue; + } + + let metadata = BlobMetadata { + subscriber: *subscriber, + size, + subscription_id: subscription_id.clone(), + source: *source, + }; + + // Track the blob for signature collection + // (blob will be finalized directly from Added status) + self.pending_finalization + .insert(hash, BlobSignatureCollection::new(metadata)); + } else { + warn!("Blob {} has no sources, skipping", hash); + } + } + } + Err(e) => { + warn!("Failed to query added blobs: {}", e); + } + } + + // Step 3: Try to collect signatures for tracked blobs + let tracked_blobs: Vec = self.pending_finalization.keys().copied().collect(); + + debug!( + "Checking {} blobs for signature collection", + tracked_blobs.len() + ); + + for hash in tracked_blobs { + // Get collection once and check if we should skip + let Some(collection) = self.pending_finalization.get_mut(&hash) else { + continue; + }; + + // Skip if we just added this blob (give operators time to download) + // Use 10 seconds for faster testing + let elapsed = collection.first_seen.elapsed(); + if elapsed < Duration::from_secs(10) { + debug!( + "Blob {} waiting for operators to download ({:.1}s / 10s)", + hash, + elapsed.as_secs_f64() + ); + continue; + } + + info!( + "Blob {} ready for signature collection (waited {:.1}s)", + hash, + elapsed.as_secs_f64() + ); + + // Get operators from cache + let (operators, total_operators) = ( + self.operator_cache.operators.clone(), + self.operator_cache.operators.len(), + ); + + if total_operators == 0 { + debug!("No operators available, skipping signature collection"); + continue; + } + + let threshold = (total_operators * 2 + 2) / 3; // Ceiling of 2/3 + + // Collect signatures that aren't already attempted + let attempted_operators = collection.attempted_operators.clone(); + + // Build list of (index, operator_addr, rpc_url) for operators we need to query + let mut fetch_tasks = Vec::new(); + for (index, operator_addr) in operators.iter().enumerate() { + // Skip if already collected + if attempted_operators.contains(&index) { + continue; + } + + // Get operator RPC URL from cache - skip if not found + let Some(operator_info) = self.operator_cache.operator_info.get(operator_addr) + else { + warn!( + "Operator {} not found in cache, skipping", + operator_addr + ); + continue; + }; + + fetch_tasks.push((index, *operator_addr, operator_info.rpc_url.clone())); + } + + // Fetch signatures from all operators in parallel + let fetch_futures: Vec<_> = fetch_tasks + .into_iter() + .map(|(index, operator_addr, rpc_url)| async move { + let result = Self::fetch_signature_static(&rpc_url, hash).await; + (index, operator_addr, result) + }) + .collect(); + + // Wait for all fetches to complete + let fetch_results = futures::future::join_all(fetch_futures).await; + + // Collect successful signatures + let mut new_signatures: Vec<(usize, BlsSignature)> = Vec::new(); + for (index, operator_addr, result) in fetch_results { + match result { + Ok(signature) => { + info!( + "Got signature from operator {} (index {})", + operator_addr, index + ); + new_signatures.push((index, signature)); + } + Err(e) => { + warn!( + "Failed to get signature from operator {}: {}", + operator_addr, e + ); + // Don't mark as attempted - we'll retry next iteration + } + } + } + + // Apply all collected signatures at once + let collection = self.pending_finalization.get_mut(&hash).unwrap(); + for (index, signature) in new_signatures { + collection.collected_signatures.insert(index, signature); + collection.attempted_operators.insert(index); + } + + // Get collection reference for final checks + let num_collected = collection.collected_signatures.len(); + + if num_collected >= threshold { + // Collect signatures and build bitmap + let sigs_vec: Vec<(usize, BlsSignature)> = collection + .collected_signatures + .iter() + .map(|(idx, sig)| (*idx, *sig)) + .collect(); + + let mut bitmap: u128 = 0; + for idx in collection.collected_signatures.keys() { + bitmap |= 1u128 << idx; + } + + info!( + "Collected {}/{} signatures for blob {} (threshold: {})", + num_collected, total_operators, hash, threshold + ); + + // Get metadata before calling finalize_blob + let metadata = collection.blob_metadata.clone(); + + // Aggregate signatures + match self.aggregate_signatures(sigs_vec) { + Ok(aggregated_sig) => { + info!("Successfully aggregated signature for blob {}", hash); + info!("Bitmap: 0b{:b}", bitmap); + + // Call finalize_blob with aggregated signature and bitmap + match self + .finalize_blob(hash, &metadata, aggregated_sig, bitmap) + .await + { + Ok(()) => { + // Remove from tracking after successful finalization + self.pending_finalization.remove(&hash); + info!("Blob {} finalized on-chain and removed from tracking", hash); + } + Err(e) => { + warn!("Failed to finalize blob {} on-chain: {}", hash, e); + // Keep in tracking to retry later + } + } + } + Err(e) => { + warn!("Failed to aggregate signatures for {}: {}", hash, e); + } + } + } else { + // Update retry count + collection.retry_count += 1; + + // Give up after too many retries or too much time + if collection.retry_count > 20 + || collection.first_seen.elapsed() > Duration::from_secs(600) + { + warn!( + "Giving up on blob {} after {} retries / {:?} (collected {}/{})", + hash, + collection.retry_count, + collection.first_seen.elapsed(), + num_collected, + threshold + ); + } else { + debug!( + "Blob {} progress: {}/{} signatures (threshold: {})", + hash, num_collected, total_operators, threshold + ); + } + } + } + + Ok(()) + } +} + +/// Additional query methods for all clients (read-only operations) +impl BlobGateway +where + C: fendermint_rpc::QueryClient + Send + Sync, +{ + /// Query the list of active node operators from the chain + pub async fn query_active_operators(&self) -> Result> { + debug!("Querying active operators"); + + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: GetActiveOperators as u64, + params: RawBytes::default(), + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = self + .client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute GetActiveOperators call")?; + + if response.value.code.is_err() { + anyhow::bail!("GetActiveOperators query failed: {}", response.value.info); + } + + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode response data")?; + + let result = fvm_ipld_encoding::from_slice::(&return_data) + .context("failed to decode active operators response")?; + + info!("Found {} active operators", result.operators.len()); + Ok(result.operators) + } + + /// Get operator info by address + pub async fn get_operator_info(&self, address: Address) -> Result { + debug!("Querying operator info for {}", address); + + let params = GetOperatorInfoParams { address }; + let params = + RawBytes::serialize(params).context("failed to serialize GetOperatorInfoParams")?; + + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: GetOperatorInfo as u64, + params, + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = self + .client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute GetOperatorInfo call")?; + + if response.value.code.is_err() { + anyhow::bail!("GetOperatorInfo query failed: {}", response.value.info); + } + + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode response data")?; + + let result = fvm_ipld_encoding::from_slice::>(&return_data) + .context("failed to decode operator info response")?; + + result.ok_or_else(|| anyhow::anyhow!("Operator not found")) + } + + /// Collect signatures from all active operators for a given blob hash + /// + /// Returns a tuple of (signatures_with_index, bitmap) where: + /// - signatures_with_index: Vec of (operator_index, BLS signature) + /// - bitmap: u128 bitmap indicating which operators signed + pub async fn collect_signatures( + &self, + blob_hash: B256, + ) -> Result<(Vec<(usize, BlsSignature)>, u128)> { + info!("Collecting signatures for blob {}", blob_hash); + + // Get active operators + let operators = self.query_active_operators().await?; + + if operators.is_empty() { + anyhow::bail!("No active operators found"); + } + + let mut signatures = Vec::new(); + let mut bitmap: u128 = 0; + + // Query each operator's RPC for the signature + for (index, operator_addr) in operators.iter().enumerate() { + match self.get_operator_info(*operator_addr).await { + Ok(operator_info) => { + match self + .fetch_signature_from_operator(&operator_info.rpc_url, blob_hash) + .await + { + Ok(signature) => { + signatures.push((index, signature)); + bitmap |= 1u128 << index; + info!( + "Got signature from operator {} (index {})", + operator_addr, index + ); + } + Err(e) => { + warn!( + "Failed to get signature from operator {} ({}): {}", + operator_addr, operator_info.rpc_url, e + ); + } + } + } + Err(e) => { + warn!("Failed to get info for operator {}: {}", operator_addr, e); + } + } + } + + if signatures.is_empty() { + anyhow::bail!("No signatures collected from any operator"); + } + + info!( + "Collected {} signatures out of {} operators", + signatures.len(), + operators.len() + ); + + Ok((signatures, bitmap)) + } + + /// Fetch a signature from an operator's RPC endpoint + async fn fetch_signature_from_operator( + &self, + rpc_url: &str, + blob_hash: B256, + ) -> Result { + Self::fetch_signature_static(rpc_url, blob_hash).await + } + + /// Static version of fetch_signature_from_operator for parallel execution + async fn fetch_signature_static(rpc_url: &str, blob_hash: B256) -> Result { + let url = format!("{}/signature/{}", rpc_url, blob_hash); + debug!("Fetching signature from {}", url); + + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(10)) + .build() + .context("failed to create HTTP client")?; + + let response = client + .get(&url) + .send() + .await + .context("failed to send HTTP request")?; + + if !response.status().is_success() { + anyhow::bail!("HTTP request failed with status: {}", response.status()); + } + + let json: serde_json::Value = response + .json() + .await + .context("failed to parse JSON response")?; + + let signature_hex = json["signature"] + .as_str() + .ok_or_else(|| anyhow::anyhow!("Missing 'signature' field in response"))?; + + let signature_bytes = + hex::decode(signature_hex).context("failed to decode signature hex")?; + + let signature = BlsSignature::from_bytes(&signature_bytes) + .map_err(|e| anyhow::anyhow!("Failed to parse BLS signature: {:?}", e))?; + + Ok(signature) + } + + /// Aggregate BLS signatures into a single signature + pub fn aggregate_signatures( + &self, + signatures: Vec<(usize, BlsSignature)>, + ) -> Result { + if signatures.is_empty() { + anyhow::bail!("Cannot aggregate empty signature list"); + } + + info!("Aggregating {} signatures", signatures.len()); + + let sigs: Vec = signatures.into_iter().map(|(_, sig)| sig).collect(); + let aggregated = aggregate(&sigs) + .map_err(|e| anyhow::anyhow!("Failed to aggregate signatures: {:?}", e))?; + + Ok(aggregated) + } +} + +/// Transaction methods for clients that can submit transactions +impl BlobGateway +where + C: fendermint_rpc::QueryClient + BoundClient + TxClient + Send + Sync, +{ + /// Call finalize_blob on-chain with aggregated signature and bitmap + /// + /// This submits a real transaction to the blockchain (not just a query). + pub async fn finalize_blob( + &mut self, + blob_hash: B256, + metadata: &BlobMetadata, + aggregated_signature: BlsSignature, + signer_bitmap: u128, + ) -> Result<()> { + info!("Finalizing blob {} on-chain", blob_hash); + + // Serialize aggregated signature + let signature_bytes = aggregated_signature.as_bytes().to_vec(); + + // Create finalize blob params + let params = FinalizeBlobParams { + source: metadata.source, + subscriber: metadata.subscriber, + hash: blob_hash, + size: metadata.size, + id: metadata.subscription_id.clone(), + status: BlobStatus::Resolved, + aggregated_signature: signature_bytes, + signer_bitmap, + }; + + let params_bytes = + RawBytes::serialize(params).context("failed to serialize FinalizeBlobParams")?; + + // Submit actual transaction using TxClient + let res = TxClient::::transaction( + &mut self.client, + BLOBS_ACTOR_ADDR, + FinalizeBlob as u64, + params_bytes, + TokenAmount::zero(), + default_gas_params(), + ) + .await + .context("failed to send FinalizeBlob transaction")?; + + if res.response.check_tx.code.is_err() { + anyhow::bail!( + "FinalizeBlob check_tx failed: {}", + res.response.check_tx.log + ); + } + + if res.response.deliver_tx.code.is_err() { + anyhow::bail!( + "FinalizeBlob deliver_tx failed: {}", + res.response.deliver_tx.log + ); + } + + info!( + "Successfully finalized blob {} on-chain (tx: {})", + blob_hash, res.response.hash + ); + Ok(()) + } +} diff --git a/ipc-decentralized-storage/src/gateway/objects_service.rs b/ipc-decentralized-storage/src/gateway/objects_service.rs new file mode 100644 index 0000000000..ef6f4450e4 --- /dev/null +++ b/ipc-decentralized-storage/src/gateway/objects_service.rs @@ -0,0 +1,72 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Objects service integration for the gateway +//! +//! This module provides functionality to start the objects HTTP service +//! alongside the gateway's blob polling functionality. + +use anyhow::{Result}; +use iroh_manager::{BlobsClient, IrohNode}; +use std::net::SocketAddr; +use tracing::info; + +use crate::objects::{self, ObjectsConfig}; + +/// Configuration for the gateway with objects service +#[derive(Clone, Debug)] +pub struct GatewayWithObjectsConfig { + /// Objects service configuration + pub objects_config: ObjectsConfig, +} + +impl Default for GatewayWithObjectsConfig { + fn default() -> Self { + Self { + objects_config: ObjectsConfig::default(), + } + } +} + +/// Start the objects HTTP service in a background task +/// +/// This spawns the objects service which handles: +/// - POST /v1/objects - Upload objects +/// - GET /v1/objects/{address}/{key} - Download objects from buckets +/// - GET /v1/blobs/{hash} - Download blobs directly +/// +/// Returns a handle to the spawned task. +pub fn start_objects_service( + config: ObjectsConfig, + iroh_node: IrohNode, + iroh_resolver_blobs: BlobsClient, +) -> tokio::task::JoinHandle> { + let listen_addr = config.listen_addr; + info!(listen_addr = %listen_addr, "starting objects service in background"); + + tokio::spawn(async move { + objects::run_objects_service(config, iroh_node, iroh_resolver_blobs).await + }) +} + +/// Start only the objects HTTP service (blocking) +/// +/// This is a convenience function that runs the objects service directly +/// without the gateway's blob polling functionality. +pub async fn run_objects_service_standalone( + listen_addr: SocketAddr, + tendermint_url: tendermint_rpc::Url, + iroh_node: IrohNode, + iroh_resolver_blobs: BlobsClient, + max_object_size: u64, +) -> Result<()> { + let config = ObjectsConfig { + listen_addr, + tendermint_url, + max_object_size, + metrics_enabled: false, + metrics_listen: None, + }; + + objects::run_objects_service(config, iroh_node, iroh_resolver_blobs).await +} diff --git a/ipc-decentralized-storage/src/lib.rs b/ipc-decentralized-storage/src/lib.rs new file mode 100644 index 0000000000..a73f28b639 --- /dev/null +++ b/ipc-decentralized-storage/src/lib.rs @@ -0,0 +1,11 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! IPC Decentralized Storage +//! +//! This crate provides decentralized storage abstractions and implementations +//! for the IPC (Inter-Planetary Consensus) system. + +pub mod gateway; +pub mod node; +pub mod objects; diff --git a/ipc-decentralized-storage/src/node/mod.rs b/ipc-decentralized-storage/src/node/mod.rs new file mode 100644 index 0000000000..ff08cec6c1 --- /dev/null +++ b/ipc-decentralized-storage/src/node/mod.rs @@ -0,0 +1,297 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Node module for running a decentralized storage node +//! +//! This module provides functionality to run a complete storage node that: +//! - Starts an Iroh instance for P2P storage +//! - Polls the chain for newly added blobs +//! - Resolves blobs by downloading them from the source nodes + +mod resolver; +mod rpc; +pub mod store; + +use anyhow::{Context, Result}; +use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use ethers::types::Address; +use fendermint_rpc::FendermintClient; +use fendermint_actor_blobs_shared::bytes::B256; +use iroh_blobs::Hash; +use iroh_manager::IrohNode; +use std::collections::HashMap; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::str::FromStr; +use std::sync::{Arc, RwLock}; +use std::time::Duration; +use tendermint_rpc::Url; +use tokio::sync::Mutex; +use tokio::time::sleep; +use tracing::{debug, error, info, warn}; + +use crate::gateway::BlobGateway; +use resolver::EventPollerConfig; +use store::InMemoryStore; + +/// Configuration for the storage node +#[derive(Clone)] +pub struct NodeConfig { + /// Path to store Iroh data + pub iroh_path: std::path::PathBuf, + /// IPv4 bind address for Iroh (optional, uses default if None) + pub iroh_v4_addr: Option, + /// IPv6 bind address for Iroh (optional, uses default if None) + pub iroh_v6_addr: Option, + /// Tendermint RPC URL + pub rpc_url: Url, + /// Ethereum JSON-RPC URL (Fendermint ETH API endpoint) + pub eth_rpc_url: String, + /// Number of blobs to fetch per query + pub batch_size: u32, + /// Polling interval for querying added blobs + pub poll_interval: Duration, + /// Maximum concurrent blob downloads + pub max_concurrent_downloads: usize, + /// BLS private key for signing blob hashes + pub bls_private_key: BlsPrivateKey, + /// Address to bind the RPC server for signature queries + pub rpc_bind_addr: SocketAddr, + /// Blobs actor address for event filtering + pub blobs_actor_address: Address, +} + +impl std::fmt::Debug for NodeConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NodeConfig") + .field("iroh_path", &self.iroh_path) + .field("iroh_v4_addr", &self.iroh_v4_addr) + .field("iroh_v6_addr", &self.iroh_v6_addr) + .field("rpc_url", &self.rpc_url) + .field("eth_rpc_url", &self.eth_rpc_url) + .field("batch_size", &self.batch_size) + .field("poll_interval", &self.poll_interval) + .field("max_concurrent_downloads", &self.max_concurrent_downloads) + .field("bls_private_key", &"") + .field("rpc_bind_addr", &self.rpc_bind_addr) + .field("blobs_actor_address", &self.blobs_actor_address) + .finish() + } +} + +/// Storage for BLS signatures of resolved blobs +/// Maps blob hash -> BLS signature +pub type SignatureStorage = Arc>>>; + +/// Shared Fendermint client wrapped in Arc for async access +pub type SharedFendermintClient = Arc>; + +impl NodeConfig { + /// Create a new NodeConfig with a generated BLS key + pub fn new_with_generated_key() -> Self { + let bls_private_key = BlsPrivateKey::generate(&mut rand::thread_rng()); + Self { + iroh_path: std::env::current_dir().unwrap().join("iroh_data"), + iroh_v4_addr: None, + iroh_v6_addr: None, + rpc_url: Url::from_str("http://localhost:26657").unwrap(), + eth_rpc_url: "http://localhost:8545".to_string(), + batch_size: 10, + poll_interval: Duration::from_secs(5), + max_concurrent_downloads: 10, + bls_private_key, + rpc_bind_addr: "127.0.0.1:8080".parse().unwrap(), + blobs_actor_address: Address::zero(), // Should be configured + } + } +} + +/// Launch a storage node that polls for added blobs and downloads them +/// +/// This function: +/// 1. Starts an Iroh node for P2P storage +/// 2. Creates an RPC client to query the chain +/// 3. Polls for newly added blobs +/// 4. Downloads blobs from their source nodes using Iroh +/// 5. Polls for blob finalized/deleted events +pub async fn launch(config: NodeConfig) -> Result<()> { + info!("Starting decentralized storage node"); + info!("Iroh path: {}", config.iroh_path.display()); + info!("RPC URL: {}", config.rpc_url); + info!("ETH RPC URL: {}", config.eth_rpc_url); + info!("Poll interval: {:?}", config.poll_interval); + + // Start Iroh node + info!("Starting Iroh node..."); + let iroh_node = + IrohNode::persistent(config.iroh_v4_addr, config.iroh_v6_addr, &config.iroh_path) + .await + .context("failed to start Iroh node")?; + + let node_addr = iroh_node.endpoint().node_addr().await?; + info!("Iroh node started: {}", node_addr.node_id); + + // Create RPC client + info!("Connecting to Fendermint RPC..."); + let client = FendermintClient::new_http(config.rpc_url.clone(), None) + .context("failed to create Fendermint client")?; + + // Create gateway + let gateway = BlobGateway::new(client, config.batch_size, config.poll_interval); + + // Track blobs currently being downloaded (keyed by B256 hash from chain) + let mut in_progress: HashMap>> = HashMap::new(); + // Track blobs that have been downloaded but not yet finalized on-chain + let mut downloaded: HashMap = HashMap::new(); + + // Storage for BLS signatures of downloaded blobs + let signatures: SignatureStorage = Arc::new(RwLock::new(HashMap::new())); + + // Create in-memory store for tracking polled heights + let store = Arc::new(InMemoryStore::new()); + + // Create a separate client for RPC server queries + let rpc_client = FendermintClient::new_http(config.rpc_url.clone(), None) + .context("failed to create RPC server Fendermint client")?; + let rpc_client = Arc::new(Mutex::new(rpc_client)); + + // Start RPC server for signature queries and blob downloads + let signatures_for_rpc = signatures.clone(); + let rpc_bind_addr = config.rpc_bind_addr; + let rpc_client_for_server = rpc_client.clone(); + let iroh_for_rpc = iroh_node.clone(); + tokio::spawn(async move { + if let Err(e) = rpc::start_rpc_server(rpc_bind_addr, signatures_for_rpc, rpc_client_for_server, iroh_for_rpc).await { + error!("RPC server error: {}", e); + } + }); + + // Start event poller for blob finalization and deletion + let signatures_for_events = signatures.clone(); + let store_for_events = store.clone(); + let iroh_for_events = iroh_node.clone(); + let event_poller_config = EventPollerConfig { + eth_rpc_url: config.eth_rpc_url.clone(), + poll_interval: config.poll_interval, + blobs_actor_address: config.blobs_actor_address, + }; + tokio::spawn(async move { + if let Err(e) = resolver::poll_for_blob_events( + event_poller_config, + signatures_for_events, + store_for_events, + iroh_for_events, + ).await { + error!("Event poller error: {}", e); + } + }); + + info!("Starting blob resolution loop"); + info!( + "BLS public key: {:?}", + hex::encode(config.bls_private_key.public_key().as_bytes()) + ); + info!("RPC server listening on: {}", config.rpc_bind_addr); + + loop { + // Check completed downloads and move them to the downloaded set + // Collect finished tasks to process + let mut finished = Vec::new(); + in_progress.retain(|hash, handle| { + if handle.is_finished() { + finished.push(*hash); + false // Remove from in_progress + } else { + true // Keep in in_progress + } + }); + + // Process finished downloads + for hash in finished { + // Note: The task has finished, but we mark it as downloaded + // The actual result checking would require more complex handling + // For now, we assume successful completion if the task finished + info!("Blob {} download completed, waiting for finalization", hash); + downloaded.insert(hash, std::time::Instant::now()); + } + + // TODO: Query on-chain blob status to check if downloaded blobs are finalized + // For now, just log the downloaded blobs waiting for finalization + if !downloaded.is_empty() { + debug!("Blobs waiting for finalization: {}", downloaded.len()); + // Clean up old entries (older than 5 minutes) to prevent memory leaks + let cutoff = std::time::Instant::now() - Duration::from_secs(300); + downloaded.retain(|hash, timestamp| { + if *timestamp < cutoff { + warn!("Blob {} has been waiting for finalization for >5 minutes, removing from tracking", hash); + false + } else { + true + } + }); + } + + // Query for added blobs + match gateway.query_added_blobs().await { + Ok(blobs) => { + if !blobs.is_empty() { + info!("Found {} added blobs to resolve", blobs.len()); + + for blob_item in blobs { + let (hash, size, sources) = blob_item; + + // Skip if already downloading + if in_progress.contains_key(&hash) { + debug!("Blob {} already in progress, skipping", hash); + continue; + } + + // Check if we're at the concurrency limit + if in_progress.len() >= config.max_concurrent_downloads { + warn!( + "Max concurrent downloads ({}) reached, deferring blob {}", + config.max_concurrent_downloads, hash + ); + continue; + } + + // Skip if already downloaded and waiting for finalization + if downloaded.contains_key(&hash) { + debug!("Blob {} already downloaded, waiting for finalization", hash); + continue; + } + + // Spawn a task to download this blob + let iroh_clone = iroh_node.clone(); + let bls_key = config.bls_private_key; + let sigs = signatures.clone(); + + // Convert B256 hash to iroh_blobs::Hash + let iroh_hash = Hash::from_bytes(hash.0); + + // Convert sources from B256 to iroh::NodeId + let iroh_sources: std::collections::HashSet<_> = sources + .into_iter() + .map(|(addr, sub_id, source_b256)| { + let node_id = iroh::NodeId::from_bytes(&source_b256.0) + .expect("B256 should be valid NodeId bytes"); + (addr, sub_id, node_id) + }) + .collect(); + + let handle = tokio::spawn(async move { + resolver::resolve_blob(iroh_clone, iroh_hash, size, iroh_sources, bls_key, sigs).await + }); + + in_progress.insert(hash, handle); + } + } + } + Err(e) => { + error!("Failed to query added blobs: {}", e); + } + } + + // Wait before the next poll + sleep(config.poll_interval).await; + } +} diff --git a/ipc-decentralized-storage/src/node/resolver.rs b/ipc-decentralized-storage/src/node/resolver.rs new file mode 100644 index 0000000000..5f2cf67167 --- /dev/null +++ b/ipc-decentralized-storage/src/node/resolver.rs @@ -0,0 +1,463 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Blob resolution and event handling for the storage node +//! +//! This module provides: +//! - Blob resolution by downloading from source nodes +//! - Event polling for blob finalization and deletion using ethers-rs + +use std::sync::Arc; +use std::time::Duration; + +use anyhow::{Context, Result}; +use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use ethers::prelude::*; +use ethers::providers::{Http, Provider}; +use iroh_blobs::Hash; +use iroh_manager::IrohNode; +use tracing::{debug, error, info, warn}; + +use super::store::Store; +use super::SignatureStorage; + +// Event signatures for blob events (keccak256 of the event signature) +// BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved) +const BLOB_FINALIZED_TOPIC: &str = + "0x3f5b99de731555264580d7e2f00e46919de0d4f067a01d28aed55632a9068595"; +// BlobDeleted(address indexed subscriber, bytes32 hash, uint256 size, uint256 bytesReleased) +const BLOB_DELETED_TOPIC: &str = + "0x1ebbc934d9a1e5c0c9bcb94c6a7c55bfa2b66fca0a5d8ed66f0b43a5c8e3c0d8"; + +/// Configuration for the event poller +#[derive(Clone)] +pub struct EventPollerConfig { + /// Ethereum JSON-RPC URL (Fendermint ETH API endpoint) + pub eth_rpc_url: String, + /// Polling interval + pub poll_interval: Duration, + /// Blobs actor address to filter events from + pub blobs_actor_address: Address, +} + +/// Events that the poller can detect +#[derive(Debug, Clone)] +pub enum BlobEvent { + /// A blob has been finalized + Finalized { hash: Hash }, + /// A blob has been deleted + Deleted { hash: Hash }, +} + +/// Poll for blob events (finalized and deleted) using ethers-rs get_logs +/// +/// This function polls the chain for new blocks and processes events +/// related to blob finalization and deletion. +pub async fn poll_for_blob_events( + config: EventPollerConfig, + signatures: SignatureStorage, + store: Arc, + iroh: IrohNode, +) -> Result<()> { + info!("Starting event poller for BlobFinalized and BlobDeleted events"); + info!("ETH RPC URL: {}", config.eth_rpc_url); + info!("Poll interval: {:?}", config.poll_interval); + info!("Blobs actor address: {:?}", config.blobs_actor_address); + + // Create ethers HTTP provider + let provider = Provider::::try_from(&config.eth_rpc_url) + .context("failed to create HTTP provider")?; + + loop { + if let Err(e) = poll_once(&provider, &config, &signatures, &store, &iroh).await { + error!("Error during event polling: {}", e); + } + + tokio::time::sleep(config.poll_interval).await; + } +} + +/// Perform a single poll iteration +async fn poll_once( + provider: &Provider, + config: &EventPollerConfig, + signatures: &SignatureStorage, + store: &Arc, + iroh: &IrohNode, +) -> Result<()> { + // Get the latest block number + let latest_block = provider + .get_block_number() + .await + .context("failed to get block number")?; + let latest_height = latest_block.as_u64(); + + // Get the last polled height from store + let last_polled = store.get_last_polled_height()?.unwrap_or(0); + + if latest_height <= last_polled { + debug!( + "No new blocks to process (latest: {}, last polled: {})", + latest_height, last_polled + ); + return Ok(()); + } + + let from_block = last_polled + 1; + debug!( + "Processing blocks from {} to {}", + from_block, latest_height + ); + + // Build filter for BlobFinalized events + let finalized_filter = Filter::new() + .address(config.blobs_actor_address) + .topic0(BLOB_FINALIZED_TOPIC.parse::().unwrap()) + .from_block(from_block) + .to_block(latest_height); + + // Build filter for BlobDeleted events + let deleted_filter = Filter::new() + .address(config.blobs_actor_address) + .topic0(BLOB_DELETED_TOPIC.parse::().unwrap()) + .from_block(from_block) + .to_block(latest_height); + + // Query for BlobFinalized events + let finalized_logs = provider + .get_logs(&finalized_filter) + .await + .context("failed to get BlobFinalized logs")?; + + for log in finalized_logs { + if let Some(event) = parse_blob_finalized_log(&log) { + handle_blob_event(event, signatures, iroh).await; + } + } + + // Query for BlobDeleted events + let deleted_logs = provider + .get_logs(&deleted_filter) + .await + .context("failed to get BlobDeleted logs")?; + + for log in deleted_logs { + if let Some(event) = parse_blob_deleted_log(&log) { + handle_blob_event(event, signatures, iroh).await; + } + } + + // Update the last polled height + store.set_last_polled_height(latest_height)?; + debug!("Updated last polled height to {}", latest_height); + + Ok(()) +} + +/// Parse a BlobFinalized event from a log +/// Event: BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved) +fn parse_blob_finalized_log(log: &Log) -> Option { + // The hash is the second topic (first non-indexed param in data, but hash is in data) + // Actually, looking at the event signature: + // event BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved); + // - subscriber is indexed (topic1) + // - hash is not indexed (in data) + // - resolved is not indexed (in data) + + if log.data.len() < 64 { + debug!("BlobFinalized log data too short: {} bytes", log.data.len()); + return None; + } + + // First 32 bytes of data is the hash + let hash_bytes: [u8; 32] = log.data[0..32].try_into().ok()?; + let hash = Hash::from(hash_bytes); + + Some(BlobEvent::Finalized { hash }) +} + +/// Parse a BlobDeleted event from a log +/// Event: BlobDeleted(address indexed subscriber, bytes32 hash, uint256 size, uint256 bytesReleased) +fn parse_blob_deleted_log(log: &Log) -> Option { + // - subscriber is indexed (topic1) + // - hash is not indexed (in data, first 32 bytes) + // - size is not indexed (in data) + // - bytesReleased is not indexed (in data) + + if log.data.len() < 96 { + debug!("BlobDeleted log data too short: {} bytes", log.data.len()); + return None; + } + + // First 32 bytes of data is the hash + let hash_bytes: [u8; 32] = log.data[0..32].try_into().ok()?; + let hash = Hash::from(hash_bytes); + + Some(BlobEvent::Deleted { hash }) +} + +/// Handle a blob event +async fn handle_blob_event(event: BlobEvent, signatures: &SignatureStorage, iroh: &IrohNode) { + match event { + BlobEvent::Finalized { hash } => { + // Remove signature from memory for finalized blobs + let mut sigs = signatures.write().unwrap(); + if sigs.remove(&hash).is_some() { + info!("Removed signature for finalized blob {} from memory", hash); + } else { + debug!( + "Blob {} was finalized but no signature found in memory", + hash + ); + } + } + BlobEvent::Deleted { hash } => { + // Remove signature from memory + { + let mut sigs = signatures.write().unwrap(); + if sigs.remove(&hash).is_some() { + info!("Removed signature for deleted blob {} from memory", hash); + } + } + + // Optionally delete the blob from Iroh storage + // Note: This is a best-effort cleanup, failures are logged but not fatal + match delete_blob_from_iroh(iroh, hash).await { + Ok(deleted) => { + if deleted { + info!("Deleted blob {} from Iroh storage", hash); + } else { + debug!("Blob {} was not found in Iroh storage", hash); + } + } + Err(e) => { + warn!("Failed to delete blob {} from Iroh storage: {}", hash, e); + } + } + } + } +} + +/// Delete a blob and its associated content from Iroh storage +async fn delete_blob_from_iroh(iroh: &IrohNode, hash: Hash) -> Result { + use iroh_blobs::hashseq::HashSeq; + + // First, try to read the hash sequence to get all associated hashes + let hash_seq_bytes = match iroh.blobs_client().read_to_bytes(hash).await { + Ok(bytes) => bytes, + Err(_) => { + // Blob not found, nothing to delete + return Ok(false); + } + }; + + // Parse the hash sequence + let content_hashes: Vec = match HashSeq::try_from(hash_seq_bytes) { + Ok(seq) => seq.iter().collect(), + Err(e) => { + warn!("Failed to parse hash sequence for {}: {}", hash, e); + // Still try to delete the main hash + vec![] + } + }; + + // Delete the hash sequence blob tag + let seq_tag = iroh_blobs::Tag(format!("blob-seq-{}", hash).into()); + let _ = iroh.blobs_client().tags().delete(seq_tag).await; + + // Delete content blob tags + for content_hash in &content_hashes { + let content_tag = iroh_blobs::Tag(format!("blob-{}-{}", hash, content_hash).into()); + let _ = iroh.blobs_client().tags().delete(content_tag).await; + } + + Ok(true) +} + +/// Resolve a blob by downloading it from one of its sources +/// +/// Downloads the hash sequence and all blobs referenced within it (including original content). +/// Returns Ok(()) if the blob was successfully downloaded, Err otherwise. +pub async fn resolve_blob( + iroh: IrohNode, + hash: Hash, + size: u64, + sources: std::collections::HashSet<( + fvm_shared::address::Address, + fendermint_actor_blobs_shared::blobs::SubscriptionId, + iroh::NodeId, + )>, + bls_private_key: BlsPrivateKey, + signatures: SignatureStorage, +) -> Result<()> { + use iroh_blobs::hashseq::HashSeq; + + info!("Resolving blob: {} (size: {})", hash, size); + debug!("Sources: {} available", sources.len()); + + // Try each source until one succeeds + for (_subscriber, _id, source_node_id) in sources { + debug!("Attempting download from source: {}", source_node_id); + + // Create a NodeAddr from the source + let source_addr = iroh::NodeAddr::new(source_node_id); + + // Step 1: Download the hash sequence blob + match iroh + .blobs_client() + .download_with_opts( + hash, + iroh_blobs::rpc::client::blobs::DownloadOptions { + format: iroh_blobs::BlobFormat::Raw, + nodes: vec![source_addr.clone()], + tag: iroh_blobs::util::SetTagOption::Named(iroh_blobs::Tag( + format!("blob-seq-{}", hash).into(), + )), + mode: iroh_blobs::rpc::client::blobs::DownloadMode::Queued, + }, + ) + .await + { + Ok(progress) => { + match progress.finish().await { + Ok(outcome) => { + let downloaded_size = outcome.local_size + outcome.downloaded_size; + info!( + "Downloaded hash sequence {} (downloaded: {} bytes, local: {} bytes)", + hash, outcome.downloaded_size, outcome.local_size + ); + + // Step 2: Read and parse the hash sequence to get all referenced blobs + let hash_seq_bytes = match iroh.blobs_client().read_to_bytes(hash).await { + Ok(bytes) => bytes, + Err(e) => { + warn!("Failed to read hash sequence {}: {}", hash, e); + continue; + } + }; + + let hash_seq = match HashSeq::try_from(hash_seq_bytes) { + Ok(seq) => seq, + Err(e) => { + warn!("Failed to parse hash sequence {}: {}", hash, e); + continue; + } + }; + + let content_hashes: Vec = hash_seq.iter().collect(); + info!( + "Hash sequence {} contains {} blobs to download", + hash, + content_hashes.len() + ); + + // Step 3: Download all blobs in the hash sequence + let mut all_downloaded = true; + for (idx, content_hash) in content_hashes.iter().enumerate() { + let blob_type = if idx == 0 { + "original content" + } else if idx == 1 { + "metadata" + } else { + "parity" + }; + + debug!( + "Downloading {} blob {} ({}/{}): {}", + blob_type, + content_hash, + idx + 1, + content_hashes.len(), + content_hash + ); + + match iroh + .blobs_client() + .download_with_opts( + *content_hash, + iroh_blobs::rpc::client::blobs::DownloadOptions { + format: iroh_blobs::BlobFormat::Raw, + nodes: vec![source_addr.clone()], + tag: iroh_blobs::util::SetTagOption::Named(iroh_blobs::Tag( + format!("blob-{}-{}", hash, content_hash).into(), + )), + mode: iroh_blobs::rpc::client::blobs::DownloadMode::Queued, + }, + ) + .await + { + Ok(content_progress) => { + match content_progress.finish().await { + Ok(content_outcome) => { + debug!( + "Downloaded {} blob {} (downloaded: {} bytes, local: {} bytes)", + blob_type, + content_hash, + content_outcome.downloaded_size, + content_outcome.local_size + ); + } + Err(e) => { + warn!( + "Failed to complete {} blob {} download: {}", + blob_type, content_hash, e + ); + all_downloaded = false; + } + } + } + Err(e) => { + warn!( + "Failed to start {} blob {} download: {}", + blob_type, content_hash, e + ); + all_downloaded = false; + } + } + } + + if !all_downloaded { + warn!( + "Not all content blobs downloaded for {}, trying next source", + hash + ); + continue; + } + + info!( + "Successfully resolved blob {} with all {} content blobs (expected original size: {} bytes)", + hash, content_hashes.len(), size + ); + + // Generate BLS signature for the blob hash + let hash_bytes = hash.as_bytes(); + let signature = bls_private_key.sign(hash_bytes); + let signature_bytes = signature.as_bytes(); + + // Store signature in memory + { + let mut sigs = signatures.write().unwrap(); + sigs.insert(hash, signature_bytes.clone()); + } + + info!("Generated BLS signature for blob {}", hash); + debug!("Signature: {}", hex::encode(&signature_bytes)); + debug!("Hash sequence blob size: {} bytes", downloaded_size); + + // Blob downloaded successfully + // It will now wait for validator signatures before finalization + return Ok(()); + } + Err(e) => { + warn!("Failed to complete download from {}: {}", source_node_id, e); + } + } + } + Err(e) => { + warn!("Failed to start download from {}: {}", source_node_id, e); + } + } + } + + anyhow::bail!("Failed to resolve blob {} from any source", hash) +} diff --git a/ipc-decentralized-storage/src/node/rpc.rs b/ipc-decentralized-storage/src/node/rpc.rs new file mode 100644 index 0000000000..97b872aba9 --- /dev/null +++ b/ipc-decentralized-storage/src/node/rpc.rs @@ -0,0 +1,426 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! RPC server for the decentralized storage node +//! +//! Provides HTTP endpoints for: +//! - Signature queries +//! - Blob metadata queries +//! - Blob content retrieval + +use std::convert::Infallible; +use std::net::SocketAddr; + +use anyhow::Result; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_rpc::message::GasParams; +use fendermint_rpc::QueryClient; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_shared::econ::TokenAmount; +use iroh_blobs::Hash; +use iroh_manager::IrohNode; +use tracing::info; +use warp::Filter; + +use super::{SharedFendermintClient, SignatureStorage}; + +/// Start the RPC server for signature queries and blob queries +pub async fn start_rpc_server( + bind_addr: SocketAddr, + signatures: SignatureStorage, + client: SharedFendermintClient, + iroh: IrohNode, +) -> Result<()> { + // GET /signature/{hash} + let get_signature = warp::path!("signature" / String) + .and(warp::get()) + .and(with_signatures(signatures)) + .and_then(handle_get_signature); + + // GET /health + let health = warp::path("health") + .and(warp::get()) + .map(|| warp::reply::json(&serde_json::json!({"status": "ok"}))); + + // GET /v1/blobs/{hash} - returns blob metadata as JSON + let client_for_meta = client.clone(); + let get_blob = warp::path!("v1" / "blobs" / String) + .and(warp::get()) + .and(warp::query::()) + .and(with_client(client_for_meta)) + .and_then(handle_get_blob); + + // GET /v1/blobs/{hash}/content - returns blob content as binary stream + let get_blob_content = warp::path!("v1" / "blobs" / String / "content") + .and(warp::get()) + .and(warp::query::()) + .and(with_client(client)) + .and(with_iroh(iroh)) + .and_then(handle_get_blob_content); + + // CORS configuration - allow all origins for development + let cors = warp::cors() + .allow_any_origin() + .allow_methods(vec!["GET", "POST", "OPTIONS"]) + .allow_headers(vec!["Content-Type", "Authorization"]); + + let routes = get_signature + .or(health) + .or(get_blob_content) + .or(get_blob) + .with(cors); + + info!("RPC server starting on {}", bind_addr); + warp::serve(routes).run(bind_addr).await; + Ok(()) +} + +/// Warp filter to inject signature storage +fn with_signatures( + signatures: SignatureStorage, +) -> impl Filter + Clone { + warp::any().map(move || signatures.clone()) +} + +/// Response for signature query +#[derive(serde::Serialize)] +struct SignatureResponse { + hash: String, + signature: String, +} + +/// Handle GET /signature/{hash} +async fn handle_get_signature( + hash_str: String, + signatures: SignatureStorage, +) -> Result { + use std::str::FromStr; + + // Parse hash from hex string + let hash = Hash::from_str(&hash_str).map_err(|_| warp::reject::not_found())?; + + // Look up signature + let signature = { + let sigs = signatures.read().unwrap(); + sigs.get(&hash).cloned() + }; + + match signature { + Some(sig) => { + let response = SignatureResponse { + hash: hash_str, + signature: hex::encode(&sig), + }; + Ok(warp::reply::json(&response)) + } + None => Err(warp::reject::not_found()), + } +} + +/// Query parameter for optional block height +#[derive(serde::Deserialize)] +struct HeightQuery { + pub height: Option, +} + +/// Warp filter to inject Fendermint client +fn with_client( + client: SharedFendermintClient, +) -> impl Filter + Clone { + warp::any().map(move || client.clone()) +} + +/// Response for blob query +#[derive(serde::Serialize)] +struct BlobResponse { + hash: String, + size: u64, + metadata_hash: String, + status: String, + subscribers: Vec, +} + +/// Subscriber info for blob response +#[derive(serde::Serialize)] +struct BlobSubscriberInfo { + subscription_id: String, + expiry: i64, +} + +/// Error response +#[derive(serde::Serialize)] +struct ErrorResponse { + error: String, +} + +/// Handle GET /v1/blobs/{hash} +async fn handle_get_blob( + hash_str: String, + height_query: HeightQuery, + client: SharedFendermintClient, +) -> Result { + // Parse blob hash - strip 0x prefix if present + let blob_hash_hex = hash_str.strip_prefix("0x").unwrap_or(&hash_str); + + let blob_hash_bytes = match hex::decode(blob_hash_hex) { + Ok(bytes) => bytes, + Err(_) => { + return Ok(warp::reply::with_status( + warp::reply::json(&ErrorResponse { + error: "invalid hex string".to_string(), + }), + warp::http::StatusCode::BAD_REQUEST, + )); + } + }; + + if blob_hash_bytes.len() != 32 { + return Ok(warp::reply::with_status( + warp::reply::json(&ErrorResponse { + error: format!("blob hash must be 32 bytes, got {}", blob_hash_bytes.len()), + }), + warp::http::StatusCode::BAD_REQUEST, + )); + } + + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&blob_hash_bytes); + let blob_hash = B256(hash_array); + + // Set query height + let height = height_query + .height + .map(FvmQueryHeight::from) + .unwrap_or(FvmQueryHeight::Committed); + + // Gas params for the query call + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + // Query the blob + let maybe_blob = { + let mut client_guard = client.lock().await; + client_guard + .blob_get_call(blob_hash, TokenAmount::default(), gas_params, height) + .await + }; + + match maybe_blob { + Ok(Some(blob)) => { + let subscribers: Vec = blob + .subscribers + .iter() + .map(|(sub_id, expiry)| BlobSubscriberInfo { + subscription_id: sub_id.to_string(), + expiry: *expiry, + }) + .collect(); + + let response = BlobResponse { + hash: format!("0x{}", hex::encode(blob_hash.0)), + size: blob.size, + metadata_hash: format!("0x{}", hex::encode(blob.metadata_hash.0)), + status: format!("{:?}", blob.status), + subscribers, + }; + Ok(warp::reply::with_status( + warp::reply::json(&response), + warp::http::StatusCode::OK, + )) + } + Ok(None) => Ok(warp::reply::with_status( + warp::reply::json(&ErrorResponse { + error: "blob not found".to_string(), + }), + warp::http::StatusCode::NOT_FOUND, + )), + Err(e) => Ok(warp::reply::with_status( + warp::reply::json(&ErrorResponse { + error: format!("query failed: {}", e), + }), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )), + } +} + +/// Warp filter to inject Iroh node +fn with_iroh( + iroh: IrohNode, +) -> impl Filter + Clone { + warp::any().map(move || iroh.clone()) +} + +/// Handle GET /v1/blobs/{hash}/content - returns the actual blob content +async fn handle_get_blob_content( + hash_str: String, + height_query: HeightQuery, + client: SharedFendermintClient, + iroh: IrohNode, +) -> Result { + use futures::TryStreamExt; + use iroh_blobs::hashseq::HashSeq; + use warp::hyper::Body; + + // Parse blob hash - strip 0x prefix if present + let blob_hash_hex = hash_str.strip_prefix("0x").unwrap_or(&hash_str); + + let blob_hash_bytes = match hex::decode(blob_hash_hex) { + Ok(bytes) => bytes, + Err(_) => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: "invalid hex string".to_string(), + }) + .unwrap(), + )), + warp::http::StatusCode::BAD_REQUEST, + )); + } + }; + + if blob_hash_bytes.len() != 32 { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("blob hash must be 32 bytes, got {}", blob_hash_bytes.len()), + }) + .unwrap(), + )), + warp::http::StatusCode::BAD_REQUEST, + )); + } + + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&blob_hash_bytes); + let blob_hash = B256(hash_array); + + // Set query height + let height = height_query + .height + .map(FvmQueryHeight::from) + .unwrap_or(FvmQueryHeight::Committed); + + // Gas params for the query call + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + // First query the blobs actor to verify the blob exists + let maybe_blob = { + let mut client_guard = client.lock().await; + client_guard + .blob_get_call(blob_hash, TokenAmount::default(), gas_params, height) + .await + }; + + match maybe_blob { + Ok(Some(blob)) => { + // The blob hash is actually a hash sequence hash + let hash_seq_hash = Hash::from_bytes(blob_hash.0); + let size = blob.size; + + // Read the hash sequence from Iroh to get the original content hash + let hash_seq_bytes = match iroh.blobs_client().read_to_bytes(hash_seq_hash).await { + Ok(bytes) => bytes, + Err(e) => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("failed to read hash sequence: {}", e), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )); + } + }; + + let hash_seq = match HashSeq::try_from(hash_seq_bytes) { + Ok(seq) => seq, + Err(e) => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("failed to parse hash sequence: {}", e), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )); + } + }; + + // First hash in the sequence is the original content + let orig_hash = match hash_seq.iter().next() { + Some(hash) => hash, + None => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: "hash sequence is empty".to_string(), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )); + } + }; + + // Read the actual content from Iroh + let reader = match iroh.blobs_client().read(orig_hash).await { + Ok(reader) => reader, + Err(e) => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("failed to read blob content: {}", e), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )); + } + }; + + // Stream the content as the response body + let bytes_stream = reader.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)); + let body = Body::wrap_stream(bytes_stream); + + let mut response = warp::reply::Response::new(body); + response.headers_mut().insert( + "Content-Type", + warp::http::HeaderValue::from_static("application/octet-stream"), + ); + response.headers_mut().insert( + "Content-Length", + warp::http::HeaderValue::from(size), + ); + + Ok(warp::reply::with_status(response, warp::http::StatusCode::OK)) + } + Ok(None) => Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: "blob not found".to_string(), + }) + .unwrap(), + )), + warp::http::StatusCode::NOT_FOUND, + )), + Err(e) => Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("query failed: {}", e), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )), + } +} diff --git a/ipc-decentralized-storage/src/node/store.rs b/ipc-decentralized-storage/src/node/store.rs new file mode 100644 index 0000000000..cbe9d813c8 --- /dev/null +++ b/ipc-decentralized-storage/src/node/store.rs @@ -0,0 +1,94 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Storage trait and implementations for the storage node +//! +//! This module provides: +//! - A trait for storing node state (e.g., last polled height) +//! - An in-memory implementation for development/testing + +use anyhow::Result; +use std::sync::RwLock; + +/// Storage trait for persisting node state +pub trait Store: Send + Sync { + /// Get the last polled block height + fn get_last_polled_height(&self) -> Result>; + + /// Store the last polled block height + fn set_last_polled_height(&self, height: u64) -> Result<()>; +} + +/// In-memory implementation of the Store trait +/// +/// This implementation stores state in memory and is suitable for +/// development and testing. State is lost when the node restarts. +pub struct InMemoryStore { + last_polled_height: RwLock>, +} + +impl InMemoryStore { + /// Create a new in-memory store + pub fn new() -> Self { + Self { + last_polled_height: RwLock::new(None), + } + } + + /// Create a new in-memory store with an initial height + pub fn with_initial_height(height: u64) -> Self { + Self { + last_polled_height: RwLock::new(Some(height)), + } + } +} + +impl Default for InMemoryStore { + fn default() -> Self { + Self::new() + } +} + +impl Store for InMemoryStore { + fn get_last_polled_height(&self) -> Result> { + let guard = self.last_polled_height.read().map_err(|e| { + anyhow::anyhow!("failed to acquire read lock: {}", e) + })?; + Ok(*guard) + } + + fn set_last_polled_height(&self, height: u64) -> Result<()> { + let mut guard = self.last_polled_height.write().map_err(|e| { + anyhow::anyhow!("failed to acquire write lock: {}", e) + })?; + *guard = Some(height); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_in_memory_store() { + let store = InMemoryStore::new(); + + // Initially None + assert_eq!(store.get_last_polled_height().unwrap(), None); + + // Set and get + store.set_last_polled_height(100).unwrap(); + assert_eq!(store.get_last_polled_height().unwrap(), Some(100)); + + // Update + store.set_last_polled_height(200).unwrap(); + assert_eq!(store.get_last_polled_height().unwrap(), Some(200)); + } + + #[test] + fn test_in_memory_store_with_initial_height() { + let store = InMemoryStore::with_initial_height(50); + assert_eq!(store.get_last_polled_height().unwrap(), Some(50)); + } +} diff --git a/ipc-decentralized-storage/src/objects.rs b/ipc-decentralized-storage/src/objects.rs new file mode 100644 index 0000000000..177ae7fe32 --- /dev/null +++ b/ipc-decentralized-storage/src/objects.rs @@ -0,0 +1,1200 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Objects API service for handling object upload and download +//! +//! This module provides HTTP endpoints for: +//! - Uploading objects to Iroh storage with entanglement +//! - Downloading objects from buckets +//! - Downloading blobs directly + +use std::{ + convert::Infallible, net::SocketAddr, num::ParseIntError, path::Path, str::FromStr, + time::Instant, +}; + +use anyhow::{anyhow, Context, Result}; +use bytes::Buf; +use entangler::{ChunkRange, Config, EntanglementResult, Entangler}; +use entangler_storage::iroh::IrohStorage as EntanglerIrohStorage; +use fendermint_actor_bucket::{GetParams, Object}; +use fendermint_rpc::{client::FendermintClient, message::GasParams, QueryClient}; +use fendermint_vm_message::query::FvmQueryHeight; +use futures_util::{StreamExt, TryStreamExt}; +use fvm_shared::address::{Address, Error as NetworkError, Network}; +use fvm_shared::econ::TokenAmount; +use ipc_api::ethers_address_to_fil_address; +use iroh::NodeAddr; +use iroh_blobs::{hashseq::HashSeq, rpc::client::blobs::BlobStatus, util::SetTagOption, Hash}; +use iroh_manager::{get_blob_hash_and_size, BlobsClient, IrohNode}; +use lazy_static::lazy_static; +use mime_guess::get_mime_extensions_str; +use prometheus::{register_histogram, register_int_counter, Histogram, IntCounter}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tracing::{debug, info}; +use uuid::Uuid; +use warp::path::Tail; +use warp::{ + filters::multipart::Part, + http::{HeaderMap, HeaderValue, StatusCode}, + hyper::body::Body, + Filter, Rejection, Reply, +}; + +/// The alpha parameter for alpha entanglement determines the number of parity blobs to generate +/// for the original blob. +const ENTANGLER_ALPHA: u8 = 3; +/// The s parameter for alpha entanglement determines the number of horizontal strands in the grid. +const ENTANGLER_S: u8 = 5; +/// Chunk size used by the entangler. +const CHUNK_SIZE: u64 = 1024; + +/// Configuration for the objects service +#[derive(Clone, Debug)] +pub struct ObjectsConfig { + /// Listen address for the HTTP server + pub listen_addr: SocketAddr, + /// Tendermint RPC URL for FendermintClient + pub tendermint_url: tendermint_rpc::Url, + /// Maximum object size in bytes + pub max_object_size: u64, + /// Enable metrics + pub metrics_enabled: bool, + /// Metrics listen address + pub metrics_listen: Option, +} + +impl Default for ObjectsConfig { + fn default() -> Self { + Self { + listen_addr: "127.0.0.1:8080".parse().unwrap(), + tendermint_url: "http://localhost:26657".parse().unwrap(), + max_object_size: 100 * 1024 * 1024, // 100MB + metrics_enabled: false, + metrics_listen: None, + } + } +} + +/// Run the objects service +/// +/// This starts an HTTP server with endpoints for object upload/download. +pub async fn run_objects_service( + config: ObjectsConfig, + iroh_node: IrohNode, + iroh_resolver_blobs: BlobsClient, +) -> Result<()> { + if config.metrics_enabled { + if let Some(metrics_listen) = config.metrics_listen { + info!(listen_addr = %metrics_listen, "serving metrics"); + let builder = prometheus_exporter::Builder::new(metrics_listen); + let _ = builder.start().context("failed to start metrics server")?; + } + } else { + info!("metrics disabled"); + } + + let client = FendermintClient::new_http(config.tendermint_url, None)?; + + // Admin routes + let health = warp::path!("health").and(warp::get()).and_then(handle_health); + let node_addr = warp::path!("v1" / "node") + .and(warp::get()) + .and(with_iroh(iroh_node.clone())) + .and_then(handle_node_addr); + + // Objects routes + let objects_upload = warp::path!("v1" / "objects") + .and(warp::post()) + .and(with_iroh(iroh_node.clone())) + .and(warp::multipart::form().max_length(config.max_object_size + 1024 * 1024)) + .and(with_max_size(config.max_object_size)) + .and_then(handle_object_upload); + + let objects_download = warp::path!("v1" / "objects" / String / ..) + .and(warp::path::tail()) + .and( + warp::get() + .map(|| "GET".to_string()) + .or(warp::head().map(|| "HEAD".to_string())) + .unify(), + ) + .and(warp::header::optional::("Range")) + .and(warp::query::()) + .and(with_client(client.clone())) + .and(with_iroh_blobs(iroh_resolver_blobs.clone())) + .and_then(handle_object_download); + + let blobs_download = warp::path!("v1" / "blobs" / String) + .and( + warp::get() + .map(|| "GET".to_string()) + .or(warp::head().map(|| "HEAD".to_string())) + .unify(), + ) + .and(warp::header::optional::("Range")) + .and(warp::query::()) + .and(with_client(client.clone())) + .and(with_iroh_blobs(iroh_resolver_blobs.clone())) + .and_then(handle_blob_download); + + let router = health + .or(node_addr) + .or(objects_upload) + .or(blobs_download) + .or(objects_download) + .with( + warp::cors() + .allow_any_origin() + .allow_headers(vec!["Content-Type"]) + .allow_methods(vec!["POST", "DEL", "GET", "HEAD"]), + ) + .recover(handle_rejection); + + info!(listen_addr = %config.listen_addr, "starting objects service"); + warp::serve(router).run(config.listen_addr).await; + + Ok(()) +} + +/// Create the objects service routes (for integration into existing servers) +pub fn objects_routes( + client: FendermintClient, + iroh_node: IrohNode, + iroh_resolver_blobs: BlobsClient, + max_object_size: u64, +) -> impl Filter + Clone { + let health = warp::path!("health").and(warp::get()).and_then(handle_health); + let node_addr = warp::path!("v1" / "node") + .and(warp::get()) + .and(with_iroh(iroh_node.clone())) + .and_then(handle_node_addr); + + let objects_upload = warp::path!("v1" / "objects") + .and(warp::post()) + .and(with_iroh(iroh_node.clone())) + .and(warp::multipart::form().max_length(max_object_size + 1024 * 1024)) + .and(with_max_size(max_object_size)) + .and_then(handle_object_upload); + + let objects_download = warp::path!("v1" / "objects" / String / ..) + .and(warp::path::tail()) + .and( + warp::get() + .map(|| "GET".to_string()) + .or(warp::head().map(|| "HEAD".to_string())) + .unify(), + ) + .and(warp::header::optional::("Range")) + .and(warp::query::()) + .and(with_client(client.clone())) + .and(with_iroh_blobs(iroh_resolver_blobs.clone())) + .and_then(handle_object_download); + + let blobs_download = warp::path!("v1" / "blobs" / String) + .and( + warp::get() + .map(|| "GET".to_string()) + .or(warp::head().map(|| "HEAD".to_string())) + .unify(), + ) + .and(warp::header::optional::("Range")) + .and(warp::query::()) + .and(with_client(client.clone())) + .and(with_iroh_blobs(iroh_resolver_blobs.clone())) + .and_then(handle_blob_download); + + health + .or(node_addr) + .or(objects_upload) + .or(blobs_download) + .or(objects_download) +} + +fn with_client( + client: FendermintClient, +) -> impl Filter + Clone { + warp::any().map(move || client.clone()) +} + +fn with_iroh(client: IrohNode) -> impl Filter + Clone { + warp::any().map(move || client.clone()) +} + +fn with_iroh_blobs( + client: BlobsClient, +) -> impl Filter + Clone { + warp::any().map(move || client.clone()) +} + +fn with_max_size(max_size: u64) -> impl Filter + Clone { + warp::any().map(move || max_size) +} + +#[derive(Serialize, Deserialize)] +struct HeightQuery { + pub height: Option, +} + +#[derive(Debug, Error)] +enum ObjectsError { + #[error("error parsing range header: `{0}`")] + RangeHeaderParseError(ParseIntError), + #[error("invalid range header")] + RangeHeaderInvalid, +} + +impl From for ObjectsError { + fn from(err: ParseIntError) -> Self { + ObjectsError::RangeHeaderParseError(err) + } +} + +#[derive(Default)] +struct ObjectParser { + hash: Option, + size: Option, + source: Option, + data_part: Option, +} + +impl ObjectParser { + async fn read_part(&mut self, part: Part) -> anyhow::Result> { + let value = part + .stream() + .fold(Vec::new(), |mut vec, data| async move { + if let Ok(data) = data { + vec.extend_from_slice(data.chunk()); + } + vec + }) + .await; + Ok(value) + } + + async fn read_hash(&mut self, form_part: Part) -> anyhow::Result<()> { + let value = self.read_part(form_part).await?; + let text = String::from_utf8(value).map_err(|_| anyhow!("cannot parse hash"))?; + let hash: Hash = text.parse().map_err(|_| anyhow!("cannot parse hash"))?; + self.hash = Some(hash); + Ok(()) + } + + async fn read_size(&mut self, form_part: Part) -> anyhow::Result<()> { + let value = self.read_part(form_part).await?; + let text = String::from_utf8(value).map_err(|_| anyhow!("cannot parse size"))?; + let size: u64 = text.parse().map_err(|_| anyhow!("cannot parse size"))?; + self.size = Some(size); + Ok(()) + } + + async fn read_source(&mut self, form_part: Part) -> anyhow::Result<()> { + let value = self.read_part(form_part).await?; + let text = String::from_utf8(value).map_err(|_| anyhow!("cannot parse source"))?; + let source: NodeAddr = + serde_json::from_str(&text).map_err(|_| anyhow!("cannot parse source"))?; + self.source = Some(source); + Ok(()) + } + + async fn read_form(mut form_data: warp::multipart::FormData) -> anyhow::Result { + let mut object_parser = ObjectParser::default(); + while let Some(part) = form_data.next().await { + let part = part.map_err(|e| anyhow!("cannot read form data: {}", e))?; + match part.name() { + "hash" => { + object_parser.read_hash(part).await?; + } + "size" => { + object_parser.read_size(part).await?; + } + "source" => { + object_parser.read_source(part).await?; + } + "data" => { + object_parser.data_part = Some(part); + // This early return was added to avoid the "failed to lock multipart state" error. + // It implies that the data field must be the last one sent in the multipart form. + return Ok(object_parser); + } + // Ignore but accept signature-related fields for backward compatibility + "chain_id" | "msg" => { + // Read and discard the data + let _ = object_parser.read_part(part).await?; + } + _ => { + return Err(anyhow!("unknown form field")); + } + } + } + Ok(object_parser) + } +} + +lazy_static! { + static ref COUNTER_BLOBS_UPLOADED: IntCounter = register_int_counter!( + "objects_blobs_uploaded_total", + "Number of successfully uploaded blobs" + ) + .unwrap(); + static ref COUNTER_BYTES_UPLOADED: IntCounter = register_int_counter!( + "objects_bytes_uploaded_total", + "Number of successfully uploaded bytes" + ) + .unwrap(); + static ref HISTOGRAM_UPLOAD_TIME: Histogram = register_histogram!( + "objects_upload_time_seconds", + "Time spent uploading an object in seconds" + ) + .unwrap(); + static ref COUNTER_BLOBS_DOWNLOADED: IntCounter = register_int_counter!( + "objects_blobs_downloaded_total", + "Number of successfully downloaded blobs" + ) + .unwrap(); + static ref COUNTER_BYTES_DOWNLOADED: IntCounter = register_int_counter!( + "objects_bytes_downloaded_total", + "Number of successfully downloaded bytes" + ) + .unwrap(); + static ref HISTOGRAM_DOWNLOAD_TIME: Histogram = register_histogram!( + "objects_download_time_seconds", + "Time spent downloading an object in seconds" + ) + .unwrap(); +} + +async fn handle_health() -> Result { + Ok(warp::reply::reply()) +} + +async fn handle_node_addr(iroh: IrohNode) -> Result { + let node_addr = iroh.endpoint().node_addr().await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to get iroh node address info: {}", e), + }) + })?; + Ok(warp::reply::json(&node_addr)) +} + +#[derive(Serialize)] +struct UploadResponse { + hash: String, // Hash sequence hash (for bucket storage) + orig_hash: String, // Original blob content hash (for addBlob) + metadata_hash: String, +} + +async fn handle_object_upload( + iroh: IrohNode, + form_data: warp::multipart::FormData, + max_size: u64, +) -> Result { + let start_time = Instant::now(); + let parser = ObjectParser::read_form(form_data).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to read form: {}", e), + }) + })?; + + let size = match parser.size { + Some(size) => size, + None => { + return Err(Rejection::from(BadRequest { + message: "missing size in form".to_string(), + })) + } + }; + if size > max_size { + return Err(Rejection::from(BadRequest { + message: format!("blob size exceeds maximum of {}", max_size), + })); + } + + let upload_id = Uuid::new_v4(); + + // Handle the two upload cases + let hash = match (parser.source, parser.data_part) { + // Case 1: Source node provided - download from the source + (Some(source), None) => { + let hash = match parser.hash { + Some(hash) => hash, + None => { + return Err(Rejection::from(BadRequest { + message: "missing hash in form".to_string(), + })) + } + }; + + let tag = iroh_blobs::Tag(format!("temp-{hash}-{upload_id}").into()); + let progress = iroh + .blobs_client() + .download_with_opts( + hash, + iroh_blobs::rpc::client::blobs::DownloadOptions { + format: iroh_blobs::BlobFormat::Raw, + nodes: vec![source], + tag: SetTagOption::Named(tag), + mode: iroh_blobs::rpc::client::blobs::DownloadMode::Queued, + }, + ) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to fetch blob {}: {}", hash, e), + }) + })?; + let outcome = progress.finish().await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to fetch blob {}: {}", hash, e), + }) + })?; + let outcome_size = outcome.local_size + outcome.downloaded_size; + if outcome_size != size { + return Err(Rejection::from(BadRequest { + message: format!( + "blob size and given size do not match (expected {}, got {})", + size, outcome_size + ), + })); + } + + debug!( + "downloaded blob {} in {:?} (size: {}; local_size: {}; downloaded_size: {})", + hash, outcome.stats.elapsed, size, outcome.local_size, outcome.downloaded_size, + ); + COUNTER_BYTES_UPLOADED.inc_by(outcome.downloaded_size); + hash + } + + // Case 2: Direct upload - store the provided data + (None, Some(data_part)) => { + let stream = data_part.stream().map(|result| { + result + .map(|mut buf| buf.copy_to_bytes(buf.remaining())) + .map_err(|e| { + std::io::Error::new(std::io::ErrorKind::Other, format!("Warp error: {}", e)) + }) + }); + + let batch = iroh.blobs_client().batch().await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to store blob: {}", e), + }) + })?; + let temp_tag = batch.add_stream(stream).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to store blob: {}", e), + }) + })?; + + let hash = *temp_tag.hash(); + let new_tag = iroh_blobs::Tag(format!("temp-{hash}-{upload_id}").into()); + batch.persist_to(temp_tag, new_tag).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to persist blob: {}", e), + }) + })?; + + drop(batch); + + let status = iroh.blobs_client().status(hash).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to check blob status: {}", e), + }) + })?; + let BlobStatus::Complete { size } = status else { + return Err(Rejection::from(BadRequest { + message: "failed to store data".to_string(), + })); + }; + COUNTER_BYTES_UPLOADED.inc_by(size); + debug!("stored uploaded blob {} (size: {})", hash, size); + + hash + } + + (Some(_), Some(_)) => { + return Err(Rejection::from(BadRequest { + message: "cannot provide both source and data".to_string(), + })); + } + + (None, None) => { + return Err(Rejection::from(BadRequest { + message: "must provide either source or data".to_string(), + })); + } + }; + + debug!("raw uploaded hash: {}", hash); + + let ent = new_entangler(iroh.blobs_client()).map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to create entangler: {}", e), + }) + })?; + let ent_result = ent.entangle_uploaded(hash.to_string()).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to entangle uploaded data: {}", e), + }) + })?; + + debug!( + "entanglement result: orig_hash={}, metadata_hash={}, upload_results_count={}", + ent_result.orig_hash, + ent_result.metadata_hash, + ent_result.upload_results.len() + ); + + let hash_seq_hash = tag_entangled_data(&iroh, &ent_result, upload_id) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to tag entangled data: {}", e), + }) + })?; + + debug!("hash_seq_hash: {}", hash_seq_hash); + + COUNTER_BLOBS_UPLOADED.inc(); + HISTOGRAM_UPLOAD_TIME.observe(start_time.elapsed().as_secs_f64()); + + let response = UploadResponse { + hash: hash_seq_hash.to_string(), + orig_hash: ent_result.orig_hash.clone(), + metadata_hash: ent_result.metadata_hash, + }; + Ok(warp::reply::json(&response)) +} + +async fn tag_entangled_data( + iroh: &IrohNode, + ent_result: &EntanglementResult, + upload_id: Uuid, +) -> Result { + let orig_hash = Hash::from_str(ent_result.orig_hash.as_str())?; + let metadata_hash = Hash::from_str(ent_result.metadata_hash.as_str())?; + + // collect all hashes related to the blob, but ignore the metadata hash, as we want to make + // sure that the metadata hash is the second hash in the sequence after the original hash + let upload_hashes = ent_result + .upload_results + .iter() + .map(|r| Hash::from_str(&r.hash)) + .collect::, _>>()? + .into_iter() + .filter(|h| h != &metadata_hash) + .collect::>(); + + let mut hashes = vec![orig_hash, metadata_hash]; + hashes.extend(upload_hashes); + + let hashes_str = hashes + .iter() + .map(|h| h.to_string()) + .collect::>() + .join(", "); + + let batch = iroh.blobs_client().batch().await?; + + // make a hash sequence object from the hashes and upload it to iroh + let hash_seq = hashes.into_iter().collect::(); + + let temp_tag = batch + .add_bytes_with_opts(hash_seq, iroh_blobs::BlobFormat::HashSeq) + .await?; + let hash_seq_hash = *temp_tag.hash(); + + debug!( + "storing hash sequence: {} ({})", + hash_seq_hash.to_string(), + hashes_str + ); + + // this tag will be replaced later by the validator to "stored-seq-{hash_seq_hash}" + let hash_seq_tag = iroh_blobs::Tag(format!("temp-seq-{hash_seq_hash}").into()); + batch.persist_to(temp_tag, hash_seq_tag).await?; + + drop(batch); + + // delete all tags returned by the entangler + for ent_upload_result in &ent_result.upload_results { + let tag_value = ent_upload_result + .info + .get("tag") + .ok_or_else(|| anyhow!("Missing tag in entanglement upload result"))?; + let tag = iroh_blobs::Tag::from(tag_value.clone()); + iroh.blobs_client().tags().delete(tag).await?; + } + + // remove upload tags + let orig_tag = iroh_blobs::Tag(format!("temp-{orig_hash}-{upload_id}").into()); + iroh.blobs_client().tags().delete(orig_tag).await?; + + Ok(hash_seq_hash) +} + +fn new_entangler(iroh: &BlobsClient) -> Result, entangler::Error> { + Entangler::new( + EntanglerIrohStorage::from_client(iroh.clone()), + Config::new(ENTANGLER_ALPHA, ENTANGLER_S), + ) +} + +fn get_range_params(range: String, size: u64) -> Result<(u64, u64), ObjectsError> { + let range: Vec = range + .replace("bytes=", "") + .split('-') + .map(|n| n.to_string()) + .collect(); + if range.len() != 2 { + return Err(ObjectsError::RangeHeaderInvalid); + } + let (first, mut last): (u64, u64) = match (!range[0].is_empty(), !range[1].is_empty()) { + (true, true) => (range[0].parse::()?, range[1].parse::()?), + (true, false) => (range[0].parse::()?, size - 1), + (false, true) => { + let last = range[1].parse::()?; + if last > size { + (0, size - 1) + } else { + (size - last, size - 1) + } + } + (false, false) => (0, size - 1), + }; + if first > last || first >= size { + return Err(ObjectsError::RangeHeaderInvalid); + } + if last >= size { + last = size - 1; + } + Ok((first, last)) +} + +struct ObjectRange { + start: u64, + end: u64, + len: u64, + size: u64, + body: Body, +} + +async fn handle_object_download( + address: String, + tail: Tail, + method: String, + range: Option, + height_query: HeightQuery, + client: F, + iroh: BlobsClient, +) -> Result { + let address = parse_address(&address).map_err(|e| { + Rejection::from(BadRequest { + message: format!("invalid address {}: {}", address, e), + }) + })?; + let height = height_query + .height + .unwrap_or(FvmQueryHeight::Committed.into()); + + let path = urlencoding::decode(tail.as_str()) + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("invalid address {}: {}", address, e), + }) + })? + .to_string(); + + let key: Vec = path.into(); + let start_time = Instant::now(); + let maybe_object = os_get(client, address, GetParams(key.clone()), height) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("bucket get error: {}", e), + }) + })?; + + match maybe_object { + Some(object) => { + let seq_hash = Hash::from_bytes(object.hash.0); + let (hash, size) = get_blob_hash_and_size(&iroh, seq_hash).await.map_err(|e| { + Rejection::from(BadRequest { + message: e.to_string(), + }) + })?; + + let ent = new_entangler(&iroh).map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to create entangler: {}", e), + }) + })?; + let recovery_hash = Hash::from_bytes(object.recovery_hash.0); + + let object_range = match range { + Some(range) => { + let (first_byte, last_byte) = get_range_params(range, size).map_err(|e| { + Rejection::from(BadRequest { + message: e.to_string(), + }) + })?; + let len = (last_byte - first_byte) + 1; + + let first_chunk = first_byte / CHUNK_SIZE; + let last_chunk = last_byte / CHUNK_SIZE; + + let bytes_stream = ent + .download_range( + &hash.to_string(), + ChunkRange::Between(first_chunk, last_chunk), + Some(recovery_hash.to_string()), + ) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to download object: {} {}", hash, e), + }) + })?; + + let offset = (first_byte % CHUNK_SIZE) as usize; + let end_offset = (last_byte % CHUNK_SIZE + 1) as usize; + + let bytes_stream = bytes_stream.enumerate().map(move |(i, chunk)| { + let chunk = chunk?; + let result = if first_chunk == last_chunk { + // Single chunk case - slice with both offsets + chunk.slice(offset..end_offset) + } else if i == 0 { + // First of multiple chunks + chunk.slice(offset..) + } else if i == (last_chunk - first_chunk) as usize { + // Last of multiple chunks + chunk.slice(..end_offset) + } else { + // Middle chunks + chunk + }; + Ok::<_, anyhow::Error>(result) + }); + + let body = Body::wrap_stream(bytes_stream); + ObjectRange { + start: first_byte, + end: last_byte, + len, + size, + body, + } + } + None => { + let bytes_stream = ent + .download(&hash.to_string(), Some(&recovery_hash.to_string())) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to download object: {} {}", hash, e), + }) + })?; + let body = Body::wrap_stream(bytes_stream.map_err(|e| anyhow::anyhow!(e))); + ObjectRange { + start: 0, + end: size - 1, + len: size, + size, + body, + } + } + }; + + // If it is a HEAD request, we don't need to send the body, + // but we still need to send the Content-Length header + if method == "HEAD" { + let mut response = warp::reply::Response::new(Body::empty()); + let mut header_map = HeaderMap::new(); + header_map.insert("Content-Length", HeaderValue::from(object_range.len)); + let headers = response.headers_mut(); + headers.extend(header_map); + return Ok(response); + } + + let mut response = warp::reply::Response::new(object_range.body); + let mut header_map = HeaderMap::new(); + if object_range.len < object_range.size { + *response.status_mut() = StatusCode::PARTIAL_CONTENT; + header_map.insert( + "Content-Range", + HeaderValue::from_str(&format!( + "bytes {}-{}/{}", + object_range.start, object_range.end, object_range.size + )) + .unwrap(), + ); + } else { + header_map.insert("Accept-Ranges", HeaderValue::from_str("bytes").unwrap()); + } + header_map.insert("Content-Length", HeaderValue::from(object_range.len)); + + let content_type = object + .metadata + .get("content-type") + .cloned() + .unwrap_or_else(|| "application/octet-stream".to_string()); + header_map.insert( + "Content-Type", + HeaderValue::from_str(&content_type).unwrap(), + ); + + let key_str = String::from_utf8_lossy(&key); + if let Some(val) = get_filename_with_extension(&key_str, &content_type) { + let disposition = format!("attachment; filename=\"{}\"", val); + header_map.insert( + "Content-Disposition", + HeaderValue::from_str(&disposition).unwrap(), + ); + } + + let headers = response.headers_mut(); + headers.extend(header_map); + + COUNTER_BLOBS_DOWNLOADED.inc(); + COUNTER_BYTES_DOWNLOADED.inc_by(object_range.len); + HISTOGRAM_DOWNLOAD_TIME.observe(start_time.elapsed().as_secs_f64()); + + Ok(response) + } + None => Err(Rejection::from(NotFound)), + } +} + +/// Handle direct blob download by querying the blobs actor. +async fn handle_blob_download( + blob_hash_str: String, + method: String, + range: Option, + height_query: HeightQuery, + client: F, + iroh: BlobsClient, +) -> Result { + // Strip 0x prefix if present + let blob_hash_hex = blob_hash_str.strip_prefix("0x").unwrap_or(&blob_hash_str); + + let blob_hash_bytes = hex::decode(blob_hash_hex).map_err(|e| { + Rejection::from(BadRequest { + message: format!("invalid blob hash {}: {}", blob_hash_str, e), + }) + })?; + + if blob_hash_bytes.len() != 32 { + return Err(Rejection::from(BadRequest { + message: format!("blob hash must be 32 bytes, got {}", blob_hash_bytes.len()), + })); + } + + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&blob_hash_bytes); + let blob_hash = fendermint_actor_blobs_shared::bytes::B256(hash_array); + + let height = height_query + .height + .unwrap_or(FvmQueryHeight::Committed.into()); + + let start_time = Instant::now(); + + // Query the blobs actor to get blob info + let maybe_blob = blob_get(client, blob_hash, height).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("blobs actor query error: {}", e), + }) + })?; + + match maybe_blob { + Some(blob) => { + // The blob hash from blobs actor is the hash sequence hash + // We need to parse it to get the original content hash + let hash_seq_hash = Hash::from_bytes(blob_hash.0); + let size = blob.size; + + debug!( + "blob download: hash_seq_hash={}, size={}", + hash_seq_hash, size + ); + + // Read the hash sequence to get the original content hash + let hash_seq_bytes = iroh.read_to_bytes(hash_seq_hash).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to read hash sequence: {} {}", hash_seq_hash, e), + }) + })?; + + let hash_seq = HashSeq::try_from(hash_seq_bytes).map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to parse hash sequence: {}", e), + }) + })?; + + // First hash in the sequence is the original content + let orig_hash = hash_seq.iter().next().ok_or_else(|| { + Rejection::from(BadRequest { + message: "hash sequence is empty".to_string(), + }) + })?; + + debug!("parsed orig_hash from hash sequence: {}", orig_hash); + + let object_range = match range { + Some(range) => { + let (first_byte, last_byte) = get_range_params(range, size).map_err(|e| { + Rejection::from(BadRequest { + message: e.to_string(), + }) + })?; + let len = (last_byte - first_byte) + 1; + + // Use read_at for range requests on the original content + use iroh_blobs::rpc::client::blobs::ReadAtLen; + let read_len = ReadAtLen::AtMost(len); + let bytes = iroh + .read_at_to_bytes(orig_hash, first_byte, read_len) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!( + "failed to read blob at range: {} {}", + orig_hash, e + ), + }) + })?; + + let body = Body::from(bytes); + ObjectRange { + start: first_byte, + end: last_byte, + len, + size, + body, + } + } + None => { + // Read the entire original content blob directly from Iroh + debug!("reading original content with hash: {}", orig_hash); + + let reader = iroh.read(orig_hash).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to read blob: {} {}", orig_hash, e), + }) + })?; + + let bytes_stream = reader.map(move |chunk_result: Result| { + chunk_result.map_err(|e: std::io::Error| anyhow::anyhow!(e)) + }); + + let body = Body::wrap_stream(bytes_stream); + ObjectRange { + start: 0, + end: size - 1, + len: size, + size, + body, + } + } + }; + + // If it is a HEAD request, we don't need to send the body + if method == "HEAD" { + let mut response = warp::reply::Response::new(Body::empty()); + let mut header_map = HeaderMap::new(); + header_map.insert("Content-Length", HeaderValue::from(object_range.len)); + let headers = response.headers_mut(); + headers.extend(header_map); + return Ok(response); + } + + let mut response = warp::reply::Response::new(object_range.body); + let mut header_map = HeaderMap::new(); + if object_range.len < object_range.size { + *response.status_mut() = StatusCode::PARTIAL_CONTENT; + header_map.insert( + "Content-Range", + HeaderValue::from_str(&format!( + "bytes {}-{}/{}", + object_range.start, object_range.end, object_range.size + )) + .unwrap(), + ); + } else { + header_map.insert("Accept-Ranges", HeaderValue::from_str("bytes").unwrap()); + } + header_map.insert("Content-Length", HeaderValue::from(object_range.len)); + header_map.insert( + "Content-Type", + HeaderValue::from_str("application/octet-stream").unwrap(), + ); + + let headers = response.headers_mut(); + headers.extend(header_map); + + COUNTER_BLOBS_DOWNLOADED.inc(); + COUNTER_BYTES_DOWNLOADED.inc_by(object_range.len); + HISTOGRAM_DOWNLOAD_TIME.observe(start_time.elapsed().as_secs_f64()); + + Ok(response) + } + None => Err(Rejection::from(NotFound)), + } +} + +/// Parse an f/eth-address from string. +pub fn parse_address(s: &str) -> anyhow::Result
{ + let addr = Network::Mainnet + .parse_address(s) + .or_else(|e| match e { + NetworkError::UnknownNetwork => Network::Testnet.parse_address(s), + _ => Err(e), + }) + .or_else(|_| { + let addr = ethers::types::Address::from_str(s)?; + ethers_address_to_fil_address(&addr) + })?; + Ok(addr) +} + +// Rejection handlers + +#[derive(Clone, Debug)] +struct BadRequest { + message: String, +} + +impl warp::reject::Reject for BadRequest {} + +#[derive(Debug)] +struct NotFound; + +impl warp::reject::Reject for NotFound {} + +#[derive(Clone, Debug, Serialize)] +struct ErrorMessage { + code: u16, + message: String, +} + +async fn handle_rejection(err: Rejection) -> Result { + let (code, message) = if err.is_not_found() || err.find::().is_some() { + (StatusCode::NOT_FOUND, "Not Found".to_string()) + } else if let Some(e) = err.find::() { + let err = e.to_owned(); + (StatusCode::BAD_REQUEST, err.message) + } else if err.find::().is_some() { + ( + StatusCode::PAYLOAD_TOO_LARGE, + "Payload too large".to_string(), + ) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, format!("{:?}", err)) + }; + + let reply = warp::reply::json(&ErrorMessage { + code: code.as_u16(), + message, + }); + let reply = warp::reply::with_header(reply, "Access-Control-Allow-Origin", "*"); + Ok(warp::reply::with_status(reply, code)) +} + +// RPC methods + +async fn os_get( + mut client: F, + address: Address, + params: GetParams, + height: u64, +) -> anyhow::Result> { + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + let h = FvmQueryHeight::from(height); + + let return_data = client + .os_get_call(address, params, TokenAmount::default(), gas_params, h) + .await?; + + Ok(return_data) +} + +async fn blob_get( + mut client: F, + blob_hash: fendermint_actor_blobs_shared::bytes::B256, + height: u64, +) -> anyhow::Result> { + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + let h = FvmQueryHeight::from(height); + + let return_data = client + .blob_get_call(blob_hash, TokenAmount::default(), gas_params, h) + .await?; + + Ok(return_data) +} + +fn get_filename_with_extension(filename: &str, content_type: &str) -> Option { + let path = Path::new(filename); + + // Checks if filename already has extension + if path.extension().and_then(|ext| ext.to_str()).is_some() { + return Some(filename.to_string()); + } + + get_mime_extensions_str(content_type)? + .first() + .map(|ext| format!("{}.{}", filename, ext)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_get_range_params() { + // bad formats + let _ = get_range_params("bytes=0,50".into(), 100).is_err(); + let _ = get_range_params("bytes=-0-50".into(), 100).is_err(); + let _ = get_range_params("bytes=-50-".into(), 100).is_err(); + // first > last + let _ = get_range_params("bytes=50-0".into(), 100).is_err(); + // first >= size + let _ = get_range_params("bytes=100-".into(), 100).is_err(); + // first == last + let (first, last) = get_range_params("bytes=0-0".into(), 100).unwrap(); + assert_eq!(first, 0); + assert_eq!(last, 0); + // exact range given + let (first, last) = get_range_params("bytes=0-50".into(), 100).unwrap(); + assert_eq!(first, 0); + assert_eq!(last, 50); + // only end given, this means "give me last 50 bytes" + let (first, last) = get_range_params("bytes=-50".into(), 100).unwrap(); + assert_eq!(first, 50); + assert_eq!(last, 99); + // only start given, this means "give me everything but the first 50 bytes" + let (first, last) = get_range_params("bytes=50-".into(), 100).unwrap(); + assert_eq!(first, 50); + assert_eq!(last, 99); + // neither given, this means "give me everything" + let (first, last) = get_range_params("bytes=-".into(), 100).unwrap(); + assert_eq!(first, 0); + assert_eq!(last, 99); + // last >= size + let (first, last) = get_range_params("bytes=50-100".into(), 100).unwrap(); + assert_eq!(first, 50); + assert_eq!(last, 99); + } +} diff --git a/ipc-dropbox/.env.example b/ipc-dropbox/.env.example new file mode 100644 index 0000000000..9c9059842d --- /dev/null +++ b/ipc-dropbox/.env.example @@ -0,0 +1,8 @@ +# IPC Network Configuration +VITE_TENDERMINT_RPC=http://localhost:26657 +VITE_OBJECTS_LISTEN_ADDR=http://localhost:8080 +VITE_NODE_OPERATION_OBJECT_API=http://localhost:8081 +VITE_ETH_RPC=http://localhost:8545 +VITE_BLOBS_ACTOR=0x6d342defae60f6402aee1f804653bbae4e66ae46 +VITE_ADM_ACTOR=0x7caec36fc8a3a867ca5b80c6acb5e5871d05aa28 +VITE_CHAIN_ID=1023102 diff --git a/ipc-dropbox/README.md b/ipc-dropbox/README.md new file mode 100644 index 0000000000..1cb15f41f8 --- /dev/null +++ b/ipc-dropbox/README.md @@ -0,0 +1,89 @@ +# IPC Decentralized Dropbox + +A Dropbox-like web application for storing and managing files on the IPC network. + +## Prerequisites + +- Node.js 18+ +- MetaMask browser extension +- Running IPC network services: + - Gateway (port 8080) + - Node (port 8081) + - Tendermint RPC (port 26657) + - Ethereum RPC (port 8545) + +## Setup + +1. Install dependencies: + +```bash +npm install +``` + +2. Copy the environment file and configure: + +```bash +cp .env.example .env +``` + +Edit `.env` with your service URLs if different from defaults. + +3. Start the development server: + +```bash +npm run dev +``` + +4. Open http://localhost:3000 in your browser + +## Configuration + +The following environment variables can be configured: + +| Variable | Default | Description | +|----------|---------|-------------| +| `VITE_TENDERMINT_RPC` | `http://localhost:26657` | Tendermint RPC endpoint | +| `VITE_OBJECTS_LISTEN_ADDR` | `http://localhost:8080` | Gateway objects API | +| `VITE_NODE_OPERATION_OBJECT_API` | `http://localhost:8081` | Node operation API | +| `VITE_ETH_RPC` | `http://localhost:8545` | Ethereum RPC endpoint | +| `VITE_BLOBS_ACTOR` | `0x6d342...` | Blobs actor contract address | +| `VITE_ADM_ACTOR` | `0x7caec...` | ADM actor contract address | + +## Usage Flow + +1. **Connect Wallet**: Click "Connect MetaMask" to connect your wallet. The app will attempt to switch to the IPC network automatically. + +2. **Buy Credit**: If you don't have credit, purchase some using FIL. This is required for storage. + +3. **Create Bucket**: Create a storage bucket to hold your files. Each bucket is an on-chain smart contract. + +4. **Upload Files**: Once you have credit and a bucket, you can: + - Upload files using the "Upload File" button + - Create folders for organization + - Navigate through folders using breadcrumbs + +5. **Download Files**: Click the "Download" button next to any file to retrieve it. + +## Features + +- MetaMask wallet integration +- Credit balance display and purchase +- Bucket creation and management +- File upload to gateway + on-chain registration +- Folder-based navigation (S3-style) +- File download from node + +## Tech Stack + +- React 18 +- TypeScript +- Vite +- ethers.js v6 + +## Building for Production + +```bash +npm run build +``` + +The built files will be in the `dist` directory. diff --git a/ipc-dropbox/index.html b/ipc-dropbox/index.html new file mode 100644 index 0000000000..0fce51b4a2 --- /dev/null +++ b/ipc-dropbox/index.html @@ -0,0 +1,13 @@ + + + + + + + IPC Decentralized Dropbox + + +
+ + + diff --git a/ipc-dropbox/package.json b/ipc-dropbox/package.json new file mode 100644 index 0000000000..e69fc0743d --- /dev/null +++ b/ipc-dropbox/package.json @@ -0,0 +1,23 @@ +{ + "name": "recall-dropbox", + "version": "1.0.0", + "private": true, + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "ethers": "^6.9.0", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@types/react": "^18.2.43", + "@types/react-dom": "^18.2.17", + "@vitejs/plugin-react": "^4.2.1", + "typescript": "^5.3.3", + "vite": "^5.0.10" + } +} diff --git a/ipc-dropbox/src/App.tsx b/ipc-dropbox/src/App.tsx new file mode 100644 index 0000000000..e708aeb517 --- /dev/null +++ b/ipc-dropbox/src/App.tsx @@ -0,0 +1,132 @@ +import React from 'react'; +import { useWallet } from './hooks/useWallet'; +import { useCredit } from './hooks/useCredit'; +import { useBucket, useFileExplorer } from './hooks/useBucket'; +import { useUpload } from './hooks/useUpload'; +import { useDownload } from './hooks/useDownload'; +import { WalletConnect } from './components/WalletConnect'; +import { CreditManager } from './components/CreditManager'; +import { BucketManager } from './components/BucketManager'; +import { FileExplorer } from './components/FileExplorer'; + +function App() { + const wallet = useWallet(); + const credit = useCredit(wallet.signer, wallet.address); + const bucket = useBucket(wallet.signer, wallet.address); + const fileExplorer = useFileExplorer(wallet.signer, bucket.bucketAddress); + const upload = useUpload(wallet.signer, bucket.bucketAddress); + const download = useDownload(); + + return ( +
+
+

IPC Decentralized Dropbox

+ +
+ +
+ {!wallet.isConnected ? ( +
+

Welcome to IPC Decentralized Dropbox

+

Connect your wallet to start storing files on the IPC network.

+ +
+ ) : !credit.hasCredit ? ( +
+

Step 1: Get Storage Credit

+ +
+ ) : !bucket.hasBucket ? ( +
+

Step 2: Create a Storage Bucket

+
+ +
+ +
+ ) : ( +
+
+ + +
+
+ +
+
+ )} +
+ +
+

Powered by IPC Network

+
+
+ ); +} + +export default App; diff --git a/ipc-dropbox/src/components/BucketManager.tsx b/ipc-dropbox/src/components/BucketManager.tsx new file mode 100644 index 0000000000..4cab36ef0d --- /dev/null +++ b/ipc-dropbox/src/components/BucketManager.tsx @@ -0,0 +1,59 @@ +import React, { useEffect } from 'react'; + +interface BucketManagerProps { + bucketAddress: string | null; + hasBucket: boolean; + isLoading: boolean; + isCreating: boolean; + error: string | null; + onFetchBuckets: () => Promise; + onCreateBucket: () => Promise; +} + +export function BucketManager({ + bucketAddress, + hasBucket, + isLoading, + isCreating, + error, + onFetchBuckets, + onCreateBucket, +}: BucketManagerProps) { + useEffect(() => { + onFetchBuckets(); + }, [onFetchBuckets]); + + const shortenAddress = (addr: string) => + `${addr.slice(0, 10)}...${addr.slice(-8)}`; + + if (isLoading) { + return
Checking for buckets...
; + } + + return ( +
+

Storage Bucket

+ {hasBucket ? ( +
+

+ Bucket Address:{' '} + {shortenAddress(bucketAddress!)} +

+
+ ) : ( +
+

You need a bucket to store files.

+ +
+ )} + + {error &&

{error}

} +
+ ); +} diff --git a/ipc-dropbox/src/components/CreditManager.tsx b/ipc-dropbox/src/components/CreditManager.tsx new file mode 100644 index 0000000000..ee071bebc0 --- /dev/null +++ b/ipc-dropbox/src/components/CreditManager.tsx @@ -0,0 +1,83 @@ +import React, { useEffect, useState } from 'react'; +import { ethers } from 'ethers'; +import { CreditInfo } from '../types'; + +interface CreditManagerProps { + credit: CreditInfo | null; + hasCredit: boolean; + isLoading: boolean; + isPurchasing: boolean; + error: string | null; + onFetchCredit: () => void; + onBuyCredit: (amount: string) => Promise; +} + +export function CreditManager({ + credit, + hasCredit, + isLoading, + isPurchasing, + error, + onFetchCredit, + onBuyCredit, +}: CreditManagerProps) { + const [amount, setAmount] = useState('0.1'); + + useEffect(() => { + onFetchCredit(); + }, [onFetchCredit]); + + const formatCredit = (value: bigint) => { + return ethers.formatEther(value); + }; + + const handleBuyCredit = async () => { + await onBuyCredit(amount); + }; + + if (isLoading) { + return
Loading credit info...
; + } + + return ( +
+

Credit Balance

+ {credit && ( +
+

+ Current Credit: {formatCredit(credit.balance)} FIL +

+

+ Free Credit: {formatCredit(credit.freeCredit)} FIL +

+
+ )} + + {!hasCredit && ( +
+

You need credit to use IPC storage.

+
+ setAmount(e.target.value)} + step="0.1" + min="0.01" + className="input" + /> + FIL + +
+
+ )} + + {error &&

{error}

} +
+ ); +} diff --git a/ipc-dropbox/src/components/FileExplorer.tsx b/ipc-dropbox/src/components/FileExplorer.tsx new file mode 100644 index 0000000000..51301ed70f --- /dev/null +++ b/ipc-dropbox/src/components/FileExplorer.tsx @@ -0,0 +1,237 @@ +import React, { useEffect, useRef, useState } from 'react'; +import { FileItem } from '../types'; + +interface FileExplorerProps { + files: FileItem[]; + currentPath: string; + isLoading: boolean; + isUploading: boolean; + isDeleting: boolean; + uploadProgress: string; + error: string | null; + uploadError: string | null; + deleteError: string | null; + onNavigateToFolder: (path: string) => void; + onNavigateUp: () => void; + onRefresh: () => void; + onUpload: (file: File, targetPath: string) => Promise; + onDownload: (blobHash: string, fileName: string) => Promise; + onDelete: (key: string) => Promise; + onFetchFiles: (prefix: string) => void; +} + +export function FileExplorer({ + files, + currentPath, + isLoading, + isUploading, + isDeleting, + uploadProgress, + error, + uploadError, + deleteError, + onNavigateToFolder, + onNavigateUp, + onRefresh, + onUpload, + onDownload, + onDelete, + onFetchFiles, +}: FileExplorerProps) { + const fileInputRef = useRef(null); + const [newFolderName, setNewFolderName] = useState(''); + const [showNewFolderInput, setShowNewFolderInput] = useState(false); + + useEffect(() => { + onFetchFiles(currentPath); + }, [onFetchFiles, currentPath]); + + const handleFileSelect = async (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (file) { + const success = await onUpload(file, currentPath); + if (success) { + onRefresh(); + } + } + // Reset input + if (fileInputRef.current) { + fileInputRef.current.value = ''; + } + }; + + const handleCreateFolder = () => { + if (newFolderName.trim()) { + const folderPath = currentPath + newFolderName.trim() + '/'; + onNavigateToFolder(folderPath); + setNewFolderName(''); + setShowNewFolderInput(false); + } + }; + + const formatSize = (size?: bigint) => { + if (!size) return '-'; + const bytes = Number(size); + if (bytes < 1024) return `${bytes} B`; + if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`; + if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`; + return `${(bytes / (1024 * 1024 * 1024)).toFixed(1)} GB`; + }; + + const getBreadcrumbs = () => { + const parts = currentPath.split('/').filter(Boolean); + const crumbs = [{ name: 'Home', path: '' }]; + let path = ''; + for (const part of parts) { + path += part + '/'; + crumbs.push({ name: part, path }); + } + return crumbs; + }; + + return ( +
+
+
+ {getBreadcrumbs().map((crumb, index, arr) => ( + + + {index < arr.length - 1 && /} + + ))} +
+ +
+ + + + + +
+
+ + {showNewFolderInput && ( +
+ setNewFolderName(e.target.value)} + placeholder="Folder name" + className="input" + onKeyDown={(e) => e.key === 'Enter' && handleCreateFolder()} + /> + + +
+ )} + + {(error || uploadError || deleteError) && ( +

{error || uploadError || deleteError}

+ )} + + {isLoading ? ( +
Loading files...
+ ) : files.length === 0 ? ( +
+

This folder is empty

+

Upload a file or create a folder to get started

+
+ ) : ( +
+
+ Name + Size + Actions +
+ {files.map((file) => ( +
+ + {file.isFolder ? ( + + ) : ( + + File + {file.name} + + )} + + {formatSize(file.size)} + + {!file.isFolder && file.blobHash && ( + <> + + + + )} + +
+ ))} +
+ )} +
+ ); +} diff --git a/ipc-dropbox/src/components/WalletConnect.tsx b/ipc-dropbox/src/components/WalletConnect.tsx new file mode 100644 index 0000000000..8be4cc4e8a --- /dev/null +++ b/ipc-dropbox/src/components/WalletConnect.tsx @@ -0,0 +1,42 @@ +import React from 'react'; + +interface WalletConnectProps { + address: string | null; + isConnecting: boolean; + error: string | null; + onConnect: () => void; + onDisconnect: () => void; +} + +export function WalletConnect({ + address, + isConnecting, + error, + onConnect, + onDisconnect, +}: WalletConnectProps) { + const shortenAddress = (addr: string) => + `${addr.slice(0, 6)}...${addr.slice(-4)}`; + + return ( +
+ {address ? ( +
+ {shortenAddress(address)} + +
+ ) : ( + + )} + {error &&

{error}

} +
+ ); +} diff --git a/ipc-dropbox/src/hooks/useBucket.ts b/ipc-dropbox/src/hooks/useBucket.ts new file mode 100644 index 0000000000..2eaee998dd --- /dev/null +++ b/ipc-dropbox/src/hooks/useBucket.ts @@ -0,0 +1,252 @@ +import { useState, useCallback } from 'react'; +import { ethers } from 'ethers'; +import { getConfig } from '../utils/config'; +import { getAdmContract, getBucketContract, MACHINE_INITIALIZED_TOPIC } from '../utils/contracts'; +import { QueryResult, ObjectEntry, FileItem } from '../types'; + +export function useBucket(signer: ethers.Signer | null, address: string | null) { + const [bucketAddress, setBucketAddress] = useState(null); + const [isLoading, setIsLoading] = useState(false); + const [isCreating, setIsCreating] = useState(false); + const [error, setError] = useState(null); + + const fetchBuckets = useCallback(async () => { + if (!signer || !address) return []; + + setIsLoading(true); + setError(null); + + try { + const config = getConfig(); + // Use provider for view calls to avoid MetaMask issues + const provider = await signer.provider; + if (!provider) throw new Error('No provider available'); + const contract = getAdmContract(config.admActor, provider); + // listBuckets returns array of (kind, addr, metadata[]) + const machines = await contract.listBuckets(address); + + console.log('listBuckets raw result:', machines); + + // ethers.js v6 returns tuples as arrays, access by index + // Machine = [kind, addr, metadata[]] + const buckets: string[] = []; + for (const m of machines) { + // Access as array: m[0] = kind, m[1] = addr, m[2] = metadata + const kind = typeof m.kind !== 'undefined' ? m.kind : m[0]; + const addr = typeof m.addr !== 'undefined' ? m.addr : m[1]; + console.log('Machine:', { kind, addr }); + if (Number(kind) === 0) { + buckets.push(addr); + } + } + + console.log('Filtered buckets:', buckets); + + if (buckets.length > 0) { + setBucketAddress(buckets[0]); // Use the first bucket + } + + return buckets; + } catch (err: unknown) { + const error = err as Error; + console.error('fetchBuckets error:', err); + setError(error.message || 'Failed to fetch buckets'); + return []; + } finally { + setIsLoading(false); + } + }, [signer, address]); + + const createBucket = useCallback(async () => { + if (!signer) { + setError('Wallet not connected'); + return null; + } + + setIsCreating(true); + setError(null); + + try { + const config = getConfig(); + const contract = getAdmContract(config.admActor, signer); + const tx = await contract.createBucket(); + const receipt = await tx.wait(); + + // Extract bucket address from MachineInitialized event + let newBucketAddress: string | null = null; + for (const log of receipt.logs) { + if (log.topics[0] === MACHINE_INITIALIZED_TOPIC) { + // The address is in the data field (last 20 bytes of 32-byte word) + const data = log.data; + newBucketAddress = '0x' + data.slice(26, 66); + break; + } + } + + if (newBucketAddress) { + setBucketAddress(newBucketAddress); + } + + return newBucketAddress; + } catch (err: unknown) { + const error = err as Error; + setError(error.message || 'Failed to create bucket'); + return null; + } finally { + setIsCreating(false); + } + }, [signer]); + + const selectBucket = useCallback((address: string) => { + setBucketAddress(address); + }, []); + + return { + bucketAddress, + isLoading, + isCreating, + error, + fetchBuckets, + createBucket, + selectBucket, + hasBucket: !!bucketAddress, + }; +} + +export function useFileExplorer(signer: ethers.Signer | null, bucketAddress: string | null) { + const [files, setFiles] = useState([]); + const [currentPath, setCurrentPath] = useState(''); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + + const fetchFiles = useCallback(async (prefix: string = '') => { + if (!signer || !bucketAddress) return; + + setIsLoading(true); + setError(null); + + try { + // Use provider for view calls to avoid MetaMask issues + const provider = await signer.provider; + if (!provider) throw new Error('No provider available'); + const contract = getBucketContract(bucketAddress, provider); + + let result: QueryResult; + if (prefix) { + result = await contract['queryObjects(string,string)'](prefix, '/'); + } else { + result = await contract['queryObjects(string,string)']('', '/'); + } + + const fileItems: FileItem[] = []; + + // Add folders from commonPrefixes + for (const folderPath of result.commonPrefixes) { + const name = folderPath.slice(prefix.length).replace(/\/$/, ''); + if (name) { + fileItems.push({ + name, + fullPath: folderPath, + isFolder: true, + }); + } + } + + // Add files from objects + console.log('queryObjects result:', result); + console.log('objects:', result.objects); + for (const obj of result.objects) { + console.log('Raw object:', obj); + const objEntry = obj as unknown as ObjectEntry; + const key = objEntry.key || (obj as unknown as { 0: string })[0]; + const state = objEntry.state || (obj as unknown as { 1: { 0: string; 1: bigint; 2: bigint } })[1]; + + console.log('Parsed object:', { key, state }); + + const name = key.slice(prefix.length); + if (name && !name.includes('/')) { + const fileItem = { + name, + fullPath: key, + isFolder: false, + size: state.size ?? (state as unknown as { 1: bigint })[1], + expiry: state.expiry ?? (state as unknown as { 2: bigint })[2], + blobHash: state.blobHash ?? (state as unknown as { 0: string })[0], + }; + console.log('FileItem:', fileItem); + fileItems.push(fileItem); + } + } + + console.log('Final fileItems:', fileItems); + setFiles(fileItems); + setCurrentPath(prefix); + } catch (err: unknown) { + const error = err as Error; + console.error('fetchFiles error:', err); + setError(error.message || 'Failed to fetch files'); + } finally { + setIsLoading(false); + } + }, [signer, bucketAddress]); + + const navigateToFolder = useCallback((folderPath: string) => { + fetchFiles(folderPath); + }, [fetchFiles]); + + const navigateUp = useCallback(() => { + if (!currentPath) return; + const parts = currentPath.split('/').filter(Boolean); + parts.pop(); + const newPath = parts.length > 0 ? parts.join('/') + '/' : ''; + fetchFiles(newPath); + }, [currentPath, fetchFiles]); + + const refresh = useCallback(() => { + fetchFiles(currentPath); + }, [fetchFiles, currentPath]); + + const [isDeleting, setIsDeleting] = useState(false); + const [deleteError, setDeleteError] = useState(null); + + const deleteObject = useCallback(async (key: string) => { + if (!signer || !bucketAddress) { + setDeleteError('Wallet or bucket not connected'); + return false; + } + + setIsDeleting(true); + setDeleteError(null); + + try { + const contract = getBucketContract(bucketAddress, signer); + const tx = await contract.deleteObject(key); + await tx.wait(); + + // Refresh the file list after deletion + await fetchFiles(currentPath); + return true; + } catch (err: unknown) { + const error = err as Error; + console.error('deleteObject error:', err); + setDeleteError(error.message || 'Failed to delete object'); + return false; + } finally { + setIsDeleting(false); + } + }, [signer, bucketAddress, fetchFiles, currentPath]); + + return { + files, + currentPath, + isLoading, + error, + fetchFiles, + navigateToFolder, + navigateUp, + refresh, + deleteObject, + isDeleting, + deleteError, + }; +} diff --git a/ipc-dropbox/src/hooks/useCredit.ts b/ipc-dropbox/src/hooks/useCredit.ts new file mode 100644 index 0000000000..1ef9352dbc --- /dev/null +++ b/ipc-dropbox/src/hooks/useCredit.ts @@ -0,0 +1,88 @@ +import { useState, useCallback } from 'react'; +import { ethers } from 'ethers'; +import { getConfig } from '../utils/config'; +import { getBlobsContract } from '../utils/contracts'; +import { CreditInfo } from '../types'; + +export function useCredit(signer: ethers.Signer | null, address: string | null) { + const [credit, setCredit] = useState(null); + const [isLoading, setIsLoading] = useState(false); + const [isPurchasing, setIsPurchasing] = useState(false); + const [error, setError] = useState(null); + + const fetchCredit = useCallback(async () => { + if (!signer || !address) return; + + setIsLoading(true); + setError(null); + + try { + const config = getConfig(); + // Use provider for view calls to avoid MetaMask issues + const provider = await signer.provider; + if (!provider) throw new Error('No provider available'); + const contract = getBlobsContract(config.blobsActor, provider); + const account = await contract.getAccount(address); + + console.log('getAccount raw result:', account); + + // Access by property name or index (ethers v6 returns both) + const creditFree = account.creditFree ?? account[1]; + const creditCommitted = account.creditCommitted ?? account[2]; + const lastDebitEpoch = account.lastDebitEpoch ?? account[4]; + + console.log('Parsed credit:', { creditFree, creditCommitted, lastDebitEpoch }); + + setCredit({ + balance: creditFree + creditCommitted, + freeCredit: creditFree, + lastDebitEpoch: BigInt(lastDebitEpoch), + }); + } catch (err: unknown) { + const error = err as Error; + console.error('fetchCredit error:', err); + setError(error.message || 'Failed to fetch credit'); + } finally { + setIsLoading(false); + } + }, [signer, address]); + + const buyCredit = useCallback(async (amountEther: string) => { + if (!signer) { + setError('Wallet not connected'); + return false; + } + + setIsPurchasing(true); + setError(null); + + try { + const config = getConfig(); + const contract = getBlobsContract(config.blobsActor, signer); + const tx = await contract.buyCredit({ + value: ethers.parseEther(amountEther), + }); + await tx.wait(); + await fetchCredit(); + return true; + } catch (err: unknown) { + const error = err as Error; + setError(error.message || 'Failed to buy credit'); + return false; + } finally { + setIsPurchasing(false); + } + }, [signer, fetchCredit]); + + const hasCredit = credit && (credit.balance > 0n || credit.freeCredit > 0n); + + return { + credit, + isLoading, + isPurchasing, + error, + fetchCredit, + buyCredit, + hasCredit, + }; +} diff --git a/ipc-dropbox/src/hooks/useDownload.ts b/ipc-dropbox/src/hooks/useDownload.ts new file mode 100644 index 0000000000..8326f34acd --- /dev/null +++ b/ipc-dropbox/src/hooks/useDownload.ts @@ -0,0 +1,58 @@ +import { useState, useCallback } from 'react'; +import { getConfig } from '../utils/config'; + +export function useDownload() { + const [isDownloading, setIsDownloading] = useState(false); + const [error, setError] = useState(null); + + const downloadFile = useCallback(async (blobHash: string, fileName: string) => { + console.log('downloadFile called:', { blobHash, fileName }); + setIsDownloading(true); + setError(null); + + try { + const config = getConfig(); + + // Remove 0x prefix if present + const hash = blobHash.startsWith('0x') ? blobHash.slice(2) : blobHash; + console.log('Fetching from:', `${config.nodeOperationObjectApi}/v1/blobs/${hash}/content`); + + const response = await fetch(`${config.nodeOperationObjectApi}/v1/blobs/${hash}/content`); + + if (!response.ok) { + throw new Error(`Download failed: ${response.statusText}`); + } + + const blob = await response.blob(); + + // Create download link + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = fileName; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + + return true; + } catch (err: unknown) { + const error = err as Error; + setError(error.message || 'Download failed'); + return false; + } finally { + setIsDownloading(false); + } + }, []); + + const clearError = useCallback(() => { + setError(null); + }, []); + + return { + isDownloading, + error, + downloadFile, + clearError, + }; +} diff --git a/ipc-dropbox/src/hooks/useUpload.ts b/ipc-dropbox/src/hooks/useUpload.ts new file mode 100644 index 0000000000..4b389e173f --- /dev/null +++ b/ipc-dropbox/src/hooks/useUpload.ts @@ -0,0 +1,145 @@ +import { useState, useCallback } from 'react'; +import { ethers } from 'ethers'; +import { getConfig } from '../utils/config'; +import { getBucketContract, getBlobsContract, BlobStatus } from '../utils/contracts'; +import { base32ToHex } from '../utils/base32'; +import { UploadResponse, NodeInfo } from '../types'; + +export function useUpload(signer: ethers.Signer | null, bucketAddress: string | null) { + const [isUploading, setIsUploading] = useState(false); + const [uploadProgress, setUploadProgress] = useState(''); + const [blobStatus, setBlobStatus] = useState(null); + const [error, setError] = useState(null); + + const pollBlobStatus = useCallback(async (blobHash: string, maxAttempts: number = 60) => { + const config = getConfig(); + const provider = signer?.provider; + if (!provider) return; + + const blobsContract = getBlobsContract(config.blobsActor, provider); + + for (let i = 0; i < maxAttempts; i++) { + try { + const blob = await blobsContract.getBlob(blobHash); + const status = Number(blob.status ?? blob[3]); + + if (status === BlobStatus.Resolved) { + setBlobStatus('Resolved'); + setUploadProgress('Upload complete! Blob resolved.'); + return true; + } else if (status === BlobStatus.Failed) { + setBlobStatus('Failed'); + setUploadProgress('Blob resolution failed.'); + return false; + } else { + setBlobStatus('Pending'); + setUploadProgress(`Waiting for resolution... (${i + 1}/${maxAttempts})`); + } + } catch (err) { + console.log('Blob not yet registered, waiting...', err); + setUploadProgress(`Waiting for blob registration... (${i + 1}/${maxAttempts})`); + } + + // Wait 2 seconds before next poll + await new Promise(resolve => setTimeout(resolve, 2000)); + } + + setUploadProgress('Timeout waiting for blob resolution'); + return false; + }, [signer]); + + const uploadFile = useCallback(async (file: File, targetPath: string) => { + if (!signer || !bucketAddress) { + setError('Wallet or bucket not connected'); + return false; + } + + setIsUploading(true); + setUploadProgress('Preparing upload...'); + setBlobStatus(null); + setError(null); + + try { + const config = getConfig(); + + // Step 1: Upload to gateway + setUploadProgress('Uploading to gateway...'); + const formData = new FormData(); + formData.append('size', file.size.toString()); + formData.append('data', file); + + const uploadResponse = await fetch(`${config.objectsListenAddr}/v1/objects`, { + method: 'POST', + body: formData, + }); + + if (!uploadResponse.ok) { + throw new Error(`Upload failed: ${uploadResponse.statusText}`); + } + + const uploadResult: UploadResponse = await uploadResponse.json(); + console.log('Upload result:', uploadResult); + + // Get node info + const nodeResponse = await fetch(`${config.objectsListenAddr}/v1/node`); + const nodeInfo: NodeInfo = await nodeResponse.json(); + + // Convert base32 hashes to hex + const blobHash = base32ToHex(uploadResult.hash); + const metadataHash = base32ToHex(uploadResult.metadata_hash || uploadResult.metadataHash || ''); + const sourceNode = '0x' + nodeInfo.node_id; + + console.log('Blob hash (hex):', blobHash); + console.log('Metadata hash (hex):', metadataHash); + console.log('Source node:', sourceNode); + + // Step 2: Register in bucket + setUploadProgress('Registering in bucket...'); + const contract = getBucketContract(bucketAddress, signer); + + // Build the full path + let fullPath = targetPath; + if (!fullPath.endsWith('/') && fullPath !== '') { + fullPath += '/'; + } + fullPath += file.name; + + const tx = await contract.addObject( + sourceNode, + fullPath, + blobHash, + metadataHash, + BigInt(file.size) + ); + + setUploadProgress('Waiting for transaction confirmation...'); + await tx.wait(); + + // Step 3: Poll for blob status + setUploadProgress('Checking blob status...'); + await pollBlobStatus(blobHash); + + return true; + } catch (err: unknown) { + const error = err as Error; + console.error('Upload error:', err); + setError(error.message || 'Upload failed'); + return false; + } finally { + setIsUploading(false); + } + }, [signer, bucketAddress, pollBlobStatus]); + + const clearError = useCallback(() => { + setError(null); + }, []); + + return { + isUploading, + uploadProgress, + blobStatus, + error, + uploadFile, + clearError, + }; +} diff --git a/ipc-dropbox/src/hooks/useWallet.ts b/ipc-dropbox/src/hooks/useWallet.ts new file mode 100644 index 0000000000..59b9fd4190 --- /dev/null +++ b/ipc-dropbox/src/hooks/useWallet.ts @@ -0,0 +1,130 @@ +import { useState, useCallback, useEffect } from 'react'; +import { ethers } from 'ethers'; +import { getConfig } from '../utils/config'; + +declare global { + interface Window { + ethereum?: ethers.Eip1193Provider & { + on: (event: string, callback: (...args: unknown[]) => void) => void; + removeListener: (event: string, callback: (...args: unknown[]) => void) => void; + }; + } +} + +export interface WalletState { + address: string | null; + signer: ethers.Signer | null; + provider: ethers.BrowserProvider | null; + isConnecting: boolean; + error: string | null; +} + +export function useWallet() { + const [state, setState] = useState({ + address: null, + signer: null, + provider: null, + isConnecting: false, + error: null, + }); + + const connect = useCallback(async () => { + if (!window.ethereum) { + setState(s => ({ ...s, error: 'MetaMask not found. Please install MetaMask.' })); + return; + } + + setState(s => ({ ...s, isConnecting: true, error: null })); + + try { + const config = getConfig(); + const provider = new ethers.BrowserProvider(window.ethereum); + + // Request accounts + await provider.send('eth_requestAccounts', []); + + // Try to switch to the correct network + try { + const chainId = await provider.send('eth_chainId', []); + const targetChainId = '0x' + BigInt(config.chainId).toString(16); + + if (chainId !== targetChainId) { + try { + await provider.send('wallet_switchEthereumChain', [{ chainId: targetChainId }]); + } catch (switchError: unknown) { + const err = switchError as { code?: number }; + // Chain not added, try to add it + if (err.code === 4902) { + await provider.send('wallet_addEthereumChain', [{ + chainId: targetChainId, + chainName: 'IPC Local', + rpcUrls: [config.ethRpc], + nativeCurrency: { + name: 'FIL', + symbol: 'FIL', + decimals: 18, + }, + }]); + } + } + } + } catch { + // Ignore network switch errors + } + + const signer = await provider.getSigner(); + const address = await signer.getAddress(); + + setState({ + address, + signer, + provider, + isConnecting: false, + error: null, + }); + } catch (err: unknown) { + const error = err as Error; + setState(s => ({ + ...s, + isConnecting: false, + error: error.message || 'Failed to connect wallet', + })); + } + }, []); + + const disconnect = useCallback(() => { + setState({ + address: null, + signer: null, + provider: null, + isConnecting: false, + error: null, + }); + }, []); + + // Listen for account changes + useEffect(() => { + if (!window.ethereum) return; + + const handleAccountsChanged = (accounts: unknown) => { + const accs = accounts as string[]; + if (accs.length === 0) { + disconnect(); + } else if (state.address && accs[0].toLowerCase() !== state.address.toLowerCase()) { + connect(); + } + }; + + window.ethereum.on('accountsChanged', handleAccountsChanged); + return () => { + window.ethereum?.removeListener('accountsChanged', handleAccountsChanged); + }; + }, [state.address, connect, disconnect]); + + return { + ...state, + connect, + disconnect, + isConnected: !!state.address, + }; +} diff --git a/ipc-dropbox/src/index.css b/ipc-dropbox/src/index.css new file mode 100644 index 0000000000..3aedc0fa09 --- /dev/null +++ b/ipc-dropbox/src/index.css @@ -0,0 +1,509 @@ +* { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +:root { + --primary: #4f46e5; + --primary-hover: #4338ca; + --secondary: #6b7280; + --secondary-hover: #4b5563; + --success: #10b981; + --warning: #f59e0b; + --error: #ef4444; + --background: #f9fafb; + --surface: #ffffff; + --border: #e5e7eb; + --text: #111827; + --text-secondary: #6b7280; + --radius: 8px; + --shadow: 0 1px 3px rgba(0, 0, 0, 0.1); + --shadow-lg: 0 4px 6px rgba(0, 0, 0, 0.1); +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; + background-color: var(--background); + color: var(--text); + line-height: 1.5; +} + +.app { + min-height: 100vh; + display: flex; + flex-direction: column; +} + +/* Header */ +.header { + background: var(--surface); + border-bottom: 1px solid var(--border); + padding: 1rem 2rem; + display: flex; + justify-content: space-between; + align-items: center; + box-shadow: var(--shadow); +} + +.header h1 { + font-size: 1.5rem; + font-weight: 700; + color: var(--primary); +} + +/* Wallet Connect */ +.wallet-connect { + display: flex; + align-items: center; + gap: 1rem; +} + +.wallet-info { + display: flex; + align-items: center; + gap: 0.75rem; +} + +.wallet-address { + font-family: monospace; + background: var(--background); + padding: 0.5rem 0.75rem; + border-radius: var(--radius); + font-size: 0.875rem; +} + +/* Buttons */ +.btn { + padding: 0.5rem 1rem; + border: none; + border-radius: var(--radius); + font-size: 0.875rem; + font-weight: 500; + cursor: pointer; + transition: all 0.2s; +} + +.btn:disabled { + opacity: 0.5; + cursor: not-allowed; +} + +.btn-primary { + background: var(--primary); + color: white; +} + +.btn-primary:hover:not(:disabled) { + background: var(--primary-hover); +} + +.btn-secondary { + background: var(--secondary); + color: white; +} + +.btn-secondary:hover:not(:disabled) { + background: var(--secondary-hover); +} + +.btn-icon { + background: var(--background); + color: var(--text); + border: 1px solid var(--border); +} + +.btn-icon:hover:not(:disabled) { + background: var(--border); +} + +.btn-small { + padding: 0.25rem 0.5rem; + font-size: 0.75rem; +} + +.btn-danger { + background: var(--error); + color: white; +} + +.btn-danger:hover:not(:disabled) { + background: #dc2626; +} + +.btn-large { + padding: 0.75rem 1.5rem; + font-size: 1rem; +} + +/* Main Content */ +.main { + flex: 1; + padding: 2rem; + max-width: 1400px; + margin: 0 auto; + width: 100%; +} + +/* Welcome Screen */ +.welcome { + text-align: center; + padding: 4rem 2rem; +} + +.welcome h2 { + font-size: 2rem; + margin-bottom: 1rem; +} + +.welcome p { + color: var(--text-secondary); + margin-bottom: 2rem; +} + +/* Setup Steps */ +.setup-step { + max-width: 600px; + margin: 0 auto; + background: var(--surface); + padding: 2rem; + border-radius: var(--radius); + box-shadow: var(--shadow); +} + +.setup-step h2 { + font-size: 1.5rem; + margin-bottom: 1.5rem; + text-align: center; +} + +.credit-summary { + margin-bottom: 2rem; + padding-bottom: 2rem; + border-bottom: 1px solid var(--border); +} + +/* Credit Manager */ +.credit-manager h3, +.bucket-manager h3 { + font-size: 1rem; + margin-bottom: 1rem; + color: var(--text-secondary); +} + +.credit-info p, +.bucket-info p { + margin-bottom: 0.5rem; +} + +.credit-info strong, +.bucket-info strong { + color: var(--text); +} + +.buy-credit { + margin-top: 1rem; +} + +.buy-form { + display: flex; + align-items: center; + gap: 0.5rem; + margin-top: 0.75rem; +} + +.input { + padding: 0.5rem 0.75rem; + border: 1px solid var(--border); + border-radius: var(--radius); + font-size: 0.875rem; + width: 100px; +} + +.unit { + color: var(--text-secondary); + font-size: 0.875rem; +} + +/* Dashboard Layout */ +.dashboard { + display: grid; + grid-template-columns: 280px 1fr; + gap: 2rem; +} + +.sidebar { + display: flex; + flex-direction: column; + gap: 1.5rem; +} + +.sidebar > div { + background: var(--surface); + padding: 1.25rem; + border-radius: var(--radius); + box-shadow: var(--shadow); +} + +.content { + background: var(--surface); + border-radius: var(--radius); + box-shadow: var(--shadow); + overflow: hidden; +} + +/* File Explorer */ +.file-explorer { + min-height: 500px; +} + +.explorer-toolbar { + padding: 1rem 1.25rem; + border-bottom: 1px solid var(--border); + display: flex; + justify-content: space-between; + align-items: center; + flex-wrap: wrap; + gap: 1rem; +} + +.breadcrumbs { + display: flex; + align-items: center; + gap: 0.25rem; + flex-wrap: wrap; +} + +.breadcrumb { + background: none; + border: none; + color: var(--primary); + cursor: pointer; + padding: 0.25rem 0.5rem; + border-radius: 4px; + font-size: 0.875rem; +} + +.breadcrumb:hover:not(:disabled) { + background: var(--background); +} + +.breadcrumb:disabled { + color: var(--text); + cursor: default; + font-weight: 500; +} + +.separator { + color: var(--text-secondary); +} + +.toolbar-actions { + display: flex; + align-items: center; + gap: 0.5rem; +} + +/* New Folder Input */ +.new-folder-input { + padding: 1rem 1.25rem; + border-bottom: 1px solid var(--border); + display: flex; + align-items: center; + gap: 0.5rem; + background: var(--background); +} + +.new-folder-input .input { + flex: 1; + max-width: 300px; +} + +/* File List */ +.file-list { + overflow-x: auto; +} + +.file-header, +.file-row { + display: grid; + grid-template-columns: 1fr 100px 180px; + padding: 0.75rem 1.25rem; + gap: 1rem; + align-items: center; +} + +.file-header { + background: var(--background); + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + color: var(--text-secondary); + border-bottom: 1px solid var(--border); +} + +.file-row { + border-bottom: 1px solid var(--border); +} + +.file-row:hover { + background: var(--background); +} + +.file-row:last-child { + border-bottom: none; +} + +.col-name { + min-width: 0; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.col-size { + text-align: right; + font-size: 0.875rem; + color: var(--text-secondary); +} + +.col-actions { + text-align: right; + display: flex; + justify-content: flex-end; + gap: 0.5rem; +} + +.folder-link { + background: none; + border: none; + color: var(--primary); + cursor: pointer; + font-size: inherit; + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0; + text-align: left; +} + +.folder-link:hover { + text-decoration: underline; +} + +.file-name { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.icon { + font-size: 0.75rem; + padding: 0.25rem 0.5rem; + background: var(--background); + border-radius: 4px; + color: var(--text-secondary); +} + +.folder-icon { + background: #fef3c7; + color: #d97706; +} + +.file-icon { + background: #dbeafe; + color: #2563eb; +} + +/* Empty State */ +.empty-state { + padding: 4rem 2rem; + text-align: center; + color: var(--text-secondary); +} + +.empty-state .hint { + font-size: 0.875rem; + margin-top: 0.5rem; +} + +/* Loading */ +.loading { + padding: 2rem; + text-align: center; + color: var(--text-secondary); +} + +/* Messages */ +.error { + color: var(--error); + font-size: 0.875rem; + margin-top: 0.75rem; +} + +.warning { + color: var(--warning); + font-size: 0.875rem; + margin-bottom: 0.75rem; +} + +/* Footer */ +.footer { + text-align: center; + padding: 1rem; + color: var(--text-secondary); + font-size: 0.875rem; + border-top: 1px solid var(--border); +} + +/* Code */ +code { + font-family: monospace; + background: var(--background); + padding: 0.25rem 0.5rem; + border-radius: 4px; + font-size: 0.875rem; +} + +/* Responsive */ +@media (max-width: 900px) { + .dashboard { + grid-template-columns: 1fr; + } + + .sidebar { + flex-direction: row; + flex-wrap: wrap; + } + + .sidebar > div { + flex: 1; + min-width: 250px; + } +} + +@media (max-width: 600px) { + .header { + flex-direction: column; + gap: 1rem; + } + + .explorer-toolbar { + flex-direction: column; + align-items: stretch; + } + + .toolbar-actions { + flex-wrap: wrap; + justify-content: flex-start; + } + + .file-header, + .file-row { + grid-template-columns: 1fr 80px; + } + + .col-actions { + display: none; + } +} diff --git a/ipc-dropbox/src/main.tsx b/ipc-dropbox/src/main.tsx new file mode 100644 index 0000000000..964aeb4c7e --- /dev/null +++ b/ipc-dropbox/src/main.tsx @@ -0,0 +1,10 @@ +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App' +import './index.css' + +ReactDOM.createRoot(document.getElementById('root')!).render( + + + , +) diff --git a/ipc-dropbox/src/types.ts b/ipc-dropbox/src/types.ts new file mode 100644 index 0000000000..a645946e96 --- /dev/null +++ b/ipc-dropbox/src/types.ts @@ -0,0 +1,57 @@ +export interface Config { + tendermintRpc: string; + objectsListenAddr: string; + nodeOperationObjectApi: string; + ethRpc: string; + blobsActor: string; + admActor: string; + chainId: number; +} + +export interface ObjectMetadata { + key: string; + value: string; +} + +export interface ObjectState { + blobHash: string; + size: bigint; + expiry: bigint; + metadata: ObjectMetadata[]; +} + +export interface ObjectEntry { + key: string; + state: ObjectState; +} + +export interface QueryResult { + objects: ObjectEntry[]; + commonPrefixes: string[]; + nextKey: string; +} + +export interface UploadResponse { + hash: string; + metadata_hash?: string; + metadataHash?: string; +} + +export interface NodeInfo { + node_id: string; +} + +export interface CreditInfo { + balance: bigint; + freeCredit: bigint; + lastDebitEpoch: bigint; +} + +export interface FileItem { + name: string; + fullPath: string; + isFolder: boolean; + size?: bigint; + expiry?: bigint; + blobHash?: string; +} diff --git a/ipc-dropbox/src/utils/base32.ts b/ipc-dropbox/src/utils/base32.ts new file mode 100644 index 0000000000..559d6dbb40 --- /dev/null +++ b/ipc-dropbox/src/utils/base32.ts @@ -0,0 +1,34 @@ +const BASE32_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'; + +export function base32ToHex(base32: string): string { + // Normalize: uppercase and add padding + let input = base32.toUpperCase(); + const padding = (8 - (input.length % 8)) % 8; + input = input + '='.repeat(padding); + + // Decode base32 + let bits = ''; + for (const char of input) { + if (char === '=') break; + const index = BASE32_ALPHABET.indexOf(char); + if (index === -1) continue; + bits += index.toString(2).padStart(5, '0'); + } + + // Convert bits to bytes + const bytes: number[] = []; + for (let i = 0; i + 8 <= bits.length; i += 8) { + bytes.push(parseInt(bits.slice(i, i + 8), 2)); + } + + // Ensure exactly 32 bytes for hash + while (bytes.length < 32) { + bytes.push(0); + } + if (bytes.length > 32) { + bytes.length = 32; + } + + // Convert to hex + return '0x' + bytes.map(b => b.toString(16).padStart(2, '0')).join(''); +} diff --git a/ipc-dropbox/src/utils/config.ts b/ipc-dropbox/src/utils/config.ts new file mode 100644 index 0000000000..cbfbaa02e6 --- /dev/null +++ b/ipc-dropbox/src/utils/config.ts @@ -0,0 +1,13 @@ +import { Config } from '../types'; + +export function getConfig(): Config { + return { + tendermintRpc: import.meta.env.VITE_TENDERMINT_RPC || 'http://localhost:26657', + objectsListenAddr: import.meta.env.VITE_OBJECTS_LISTEN_ADDR || 'http://localhost:8080', + nodeOperationObjectApi: import.meta.env.VITE_NODE_OPERATION_OBJECT_API || 'http://localhost:8081', + ethRpc: import.meta.env.VITE_ETH_RPC || 'http://localhost:8545', + blobsActor: import.meta.env.VITE_BLOBS_ACTOR || '0x6d342defae60f6402aee1f804653bbae4e66ae46', + admActor: import.meta.env.VITE_ADM_ACTOR || '0x7caec36fc8a3a867ca5b80c6acb5e5871d05aa28', + chainId: parseInt(import.meta.env.VITE_CHAIN_ID || '1023102'), + }; +} diff --git a/ipc-dropbox/src/utils/contracts.ts b/ipc-dropbox/src/utils/contracts.ts new file mode 100644 index 0000000000..dba564594b --- /dev/null +++ b/ipc-dropbox/src/utils/contracts.ts @@ -0,0 +1,50 @@ +import { ethers } from 'ethers'; + +// ABI for Blobs Actor +export const BLOBS_ABI = [ + 'function buyCredit() payable', + 'function getAccount(address addr) view returns (tuple(uint64 capacityUsed, uint256 creditFree, uint256 creditCommitted, address creditSponsor, uint64 lastDebitEpoch, tuple(address addr, tuple(uint256 creditLimit, uint256 gasFeeLimit, uint64 expiry, uint256 creditUsed, uint256 gasFeeUsed) approval)[] approvalsTo, tuple(address addr, tuple(uint256 creditLimit, uint256 gasFeeLimit, uint64 expiry, uint256 creditUsed, uint256 gasFeeUsed) approval)[] approvalsFrom, uint64 maxTtl, uint256 gasAllowance))', + 'function getBlob(bytes32 blobHash) view returns (tuple(uint64 size, bytes32 metadataHash, tuple(string id, int64 expiry)[] subscriptions, uint8 status))', +]; + +// Blob status enum values +export enum BlobStatus { + Pending = 0, + Resolved = 1, + Failed = 2, +} + +// ABI for ADM Actor +export const ADM_ABI = [ + 'function createBucket() returns (address)', + 'function listBuckets(address owner) view returns (tuple(uint8 kind, address addr, tuple(string key, string value)[] metadata)[])', + 'event MachineInitialized(uint8 indexed kind, address machineAddress)', +]; + +// ABI for Bucket Actor +export const BUCKET_ABI = [ + 'function addObject(bytes32 source, string key, bytes32 hash, bytes32 recoveryHash, uint64 size)', + 'function getObject(string key) view returns (tuple(bytes32 blobHash, bytes32 recoveryHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata))', + 'function deleteObject(string key)', + 'function updateObjectMetadata(string key, tuple(string key, string value)[] metadata)', + 'function queryObjects() view returns (tuple(tuple(string key, tuple(bytes32 blobHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata) state)[] objects, string[] commonPrefixes, string nextKey))', + 'function queryObjects(string prefix) view returns (tuple(tuple(string key, tuple(bytes32 blobHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata) state)[] objects, string[] commonPrefixes, string nextKey))', + 'function queryObjects(string prefix, string delimiter) view returns (tuple(tuple(string key, tuple(bytes32 blobHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata) state)[] objects, string[] commonPrefixes, string nextKey))', + 'function queryObjects(string prefix, string delimiter, string startKey, uint64 limit) view returns (tuple(tuple(string key, tuple(bytes32 blobHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata) state)[] objects, string[] commonPrefixes, string nextKey))', + 'function owner() view returns (address)', +]; + +export function getBlobsContract(address: string, signer: ethers.Signer | ethers.Provider) { + return new ethers.Contract(address, BLOBS_ABI, signer); +} + +export function getAdmContract(address: string, signer: ethers.Signer | ethers.Provider) { + return new ethers.Contract(address, ADM_ABI, signer); +} + +export function getBucketContract(address: string, signer: ethers.Signer | ethers.Provider) { + return new ethers.Contract(address, BUCKET_ABI, signer); +} + +// Event topic for MachineInitialized +export const MACHINE_INITIALIZED_TOPIC = '0x8f7252642373d5f0b89a0c5cd9cd242e5cd5bb1a36aec623756e4f52a8c1ea6e'; diff --git a/ipc-dropbox/src/vite-env.d.ts b/ipc-dropbox/src/vite-env.d.ts new file mode 100644 index 0000000000..bc52dafec7 --- /dev/null +++ b/ipc-dropbox/src/vite-env.d.ts @@ -0,0 +1,15 @@ +/// + +interface ImportMetaEnv { + readonly VITE_TENDERMINT_RPC: string; + readonly VITE_OBJECTS_LISTEN_ADDR: string; + readonly VITE_NODE_OPERATION_OBJECT_API: string; + readonly VITE_ETH_RPC: string; + readonly VITE_BLOBS_ACTOR: string; + readonly VITE_ADM_ACTOR: string; + readonly VITE_CHAIN_ID: string; +} + +interface ImportMeta { + readonly env: ImportMetaEnv; +} diff --git a/ipc-dropbox/tsconfig.json b/ipc-dropbox/tsconfig.json new file mode 100644 index 0000000000..3934b8f6d6 --- /dev/null +++ b/ipc-dropbox/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/ipc-dropbox/tsconfig.node.json b/ipc-dropbox/tsconfig.node.json new file mode 100644 index 0000000000..42872c59f5 --- /dev/null +++ b/ipc-dropbox/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/ipc-dropbox/vite.config.ts b/ipc-dropbox/vite.config.ts new file mode 100644 index 0000000000..184cd3c58d --- /dev/null +++ b/ipc-dropbox/vite.config.ts @@ -0,0 +1,24 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' + +export default defineConfig({ + plugins: [react()], + server: { + port: 3000, + proxy: { + '/api/gateway': { + target: 'http://localhost:8080', + changeOrigin: true, + rewrite: (path) => path.replace(/^\/api\/gateway/, ''), + }, + '/api/node': { + target: 'http://localhost:8081', + changeOrigin: true, + rewrite: (path) => path.replace(/^\/api\/node/, ''), + }, + }, + }, + define: { + 'process.env': {} + } +}) diff --git a/ipc/provider/src/config/mod.rs b/ipc/provider/src/config/mod.rs index cbb9810995..baa4a9ea3b 100644 --- a/ipc/provider/src/config/mod.rs +++ b/ipc/provider/src/config/mod.rs @@ -67,8 +67,7 @@ impl Config { ) })?; - let config: Config = - Config::from_toml_str(contents.as_str()).context("failed to parse config TOML")?; + let config: Config = Config::from_toml_str(contents.as_str())?; Ok(config) } diff --git a/ipld/resolver/Cargo.toml b/ipld/resolver/Cargo.toml index b9fb682306..91c40b15eb 100644 --- a/ipld/resolver/Cargo.toml +++ b/ipld/resolver/Cargo.toml @@ -12,6 +12,7 @@ async-trait = { workspace = true } base64 = { workspace = true } blake2b_simd = { workspace = true } bloom = { workspace = true } +bytes = { workspace = true } lazy_static = { workspace = true } libipld = { workspace = true } libp2p = { workspace = true } @@ -27,6 +28,10 @@ serde = { workspace = true } serde_json = { workspace = true, features = ["raw_value"] } thiserror = { workspace = true } tokio = { workspace = true } +# Iroh/Recall dependencies +iroh = { workspace = true } +iroh-blobs = { workspace = true } +iroh_manager = { path = "../../recall/iroh_manager" } fvm_ipld_encoding = { workspace = true } fvm_shared = { workspace = true } diff --git a/ipld/resolver/src/behaviour/mod.rs b/ipld/resolver/src/behaviour/mod.rs index fdefaaacf3..e158af4dd6 100644 --- a/ipld/resolver/src/behaviour/mod.rs +++ b/ipld/resolver/src/behaviour/mod.rs @@ -43,6 +43,10 @@ pub enum ConfigError { Discovery(#[from] discovery::ConfigError), #[error("Error in the membership configuration")] Membership(#[from] membership::ConfigError), + #[error("Invalid iroh address")] + IrohAddr(#[from] std::net::AddrParseError), + #[error("Unable to create iroh client")] + IrohClient(#[from] anyhow::Error), } /// Libp2p behaviour bundle to manage content resolution from other subnets, using: diff --git a/ipld/resolver/src/client.rs b/ipld/resolver/src/client.rs index 29e9eac550..9bf4b39084 100644 --- a/ipld/resolver/src/client.rs +++ b/ipld/resolver/src/client.rs @@ -3,12 +3,14 @@ use anyhow::anyhow; use async_trait::async_trait; use ipc_api::subnet_id::SubnetID; +use iroh::NodeAddr; +use iroh_blobs::Hash; use libipld::Cid; use tokio::sync::mpsc::UnboundedSender; use tokio::sync::oneshot; use crate::{ - service::{Request, ResolveResult}, + service::{Request, ResolveReadRequestResult, ResolveResult}, vote_record::SignedVoteRecord, }; @@ -112,3 +114,67 @@ where Ok(res) } } + +/// Trait to limit the capabilities to resolving CIDs from Iroh. +#[async_trait] +pub trait ResolverIroh { + /// Send a hash for resolution from an Iroh node, await its completion, + /// then return the result, to be inspected by the caller. + async fn resolve_iroh( + &self, + hash: Hash, + size: u64, + node_addr: NodeAddr, + ) -> anyhow::Result; +} + +#[async_trait] +impl ResolverIroh for Client +where + V: Sync + Send + 'static, +{ + async fn resolve_iroh( + &self, + hash: Hash, + size: u64, + node_addr: NodeAddr, + ) -> anyhow::Result { + let (tx, rx) = oneshot::channel(); + let req = Request::ResolveIroh(hash, size, node_addr, tx); + self.send_request(req)?; + let res = rx.await?; + Ok(res) + } +} + +/// Trait to limit the capabilities to reading data from Iroh. +#[async_trait] +pub trait ResolverIrohReadRequest { + /// Send a hash for getting the data from iroh, await its completion, + /// then return the result, to be inspected by the caller. + async fn close_read_request( + &self, + hash: Hash, + offset: u32, + len: u32, + ) -> anyhow::Result; +} + +#[async_trait] +impl ResolverIrohReadRequest for Client +where + V: Sync + Send + 'static, +{ + async fn close_read_request( + &self, + hash: Hash, + offset: u32, + len: u32, + ) -> anyhow::Result { + let (tx, rx) = oneshot::channel(); + let req = Request::ResolveIrohRead(hash, offset, len, tx); + self.send_request(req)?; + let res = rx.await?; + Ok(res) + } +} diff --git a/ipld/resolver/src/lib.rs b/ipld/resolver/src/lib.rs index 3d54127b37..0f46e348ff 100644 --- a/ipld/resolver/src/lib.rs +++ b/ipld/resolver/src/lib.rs @@ -20,7 +20,7 @@ mod arb; pub mod missing_blocks; pub use behaviour::{ContentConfig, DiscoveryConfig, MembershipConfig, NetworkConfig}; -pub use client::{Client, Resolver}; -pub use service::{Config, ConnectionConfig, Event, NoKnownPeers, Service}; +pub use client::{Client, Resolver, ResolverIroh, ResolverIrohReadRequest}; +pub use service::{Config, ConnectionConfig, Event, IrohConfig, NoKnownPeers, Service}; pub use timestamp::Timestamp; pub use vote_record::{ValidatorKey, VoteRecord}; diff --git a/ipld/resolver/src/service.rs b/ipld/resolver/src/service.rs index d1141c0cc6..708285a521 100644 --- a/ipld/resolver/src/service.rs +++ b/ipld/resolver/src/service.rs @@ -1,10 +1,13 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: MIT + use std::collections::HashMap; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::path::PathBuf; use std::time::Duration; use crate::behaviour::{ - self, content, discovery, membership, Behaviour, BehaviourEvent, ConfigError, ContentConfig, + content, discovery, membership, Behaviour, BehaviourEvent, ConfigError, ContentConfig, DiscoveryConfig, MembershipConfig, NetworkConfig, }; use crate::client::Client; @@ -14,6 +17,12 @@ use anyhow::anyhow; use bloom::{BloomFilter, ASMS}; use ipc_api::subnet_id::SubnetID; use ipc_observability::emit; +use iroh::NodeAddr; +use iroh_blobs::net_protocol::DownloadMode; +use iroh_blobs::rpc::client::blobs::{DownloadOptions, ReadAtLen}; +use iroh_blobs::util::SetTagOption; +use iroh_blobs::{BlobFormat, Hash, Tag}; +use iroh_manager::{get_blob_hash_and_size, BlobsClient, IrohManager}; use libipld::store::StoreParams; use libipld::Cid; use libp2p::connection_limits::ConnectionLimits; @@ -22,7 +31,7 @@ use libp2p::swarm::SwarmEvent; use libp2p::{ core::{muxing::StreamMuxerBox, transport::Boxed}, identity::Keypair, - noise, Multiaddr, PeerId, Swarm, Transport, + noise, yamux, Multiaddr, PeerId, Swarm, Transport, }; use libp2p::{identify, ping}; use libp2p_bitswap::{BitswapResponse, BitswapStore}; @@ -35,13 +44,19 @@ use serde::Serialize; use tokio::select; use tokio::sync::broadcast; use tokio::sync::mpsc; -use tokio::sync::oneshot::{self, Sender}; +use tokio::sync::oneshot::Sender; /// Result of attempting to resolve a CID. pub type ResolveResult = anyhow::Result<()>; +/// Result of attempting to resolve a read request. +pub type ResolveReadRequestResult = anyhow::Result; + /// Channel to complete the results with. -type ResponseChannel = oneshot::Sender; +type ResponseChannel = Sender; + +/// Channel to complete the read request with. +type ReadRequestResponseChannel = Sender; /// State of a query. The fallback peers can be used /// if the current attempt fails. @@ -85,6 +100,15 @@ pub struct Config { pub membership: MembershipConfig, pub connection: ConnectionConfig, pub content: ContentConfig, + pub iroh: IrohConfig, +} + +#[derive(Debug, Clone)] +pub struct IrohConfig { + pub v4_addr: Option, + pub v6_addr: Option, + pub path: PathBuf, + pub rpc_addr: SocketAddr, } /// Internal requests to enqueue to the [`Service`] @@ -97,6 +121,8 @@ pub(crate) enum Request { PinSubnet(SubnetID), UnpinSubnet(SubnetID), Resolve(Cid, SubnetID, ResponseChannel), + ResolveIroh(Hash, u64, NodeAddr, ResponseChannel), + ResolveIrohRead(Hash, u32, u32, ReadRequestResponseChannel), RateLimitUsed(PeerId, usize), UpdateRateLimit(u32), } @@ -132,6 +158,8 @@ where background_lookup_filter: BloomFilter, /// To limit the number of peers contacted in a Bitswap resolution attempt. max_peers_per_query: usize, + /// Iroh node + iroh: IrohManager, } impl Service @@ -140,17 +168,17 @@ where V: Serialize + DeserializeOwned + Clone + Send + 'static, { /// Build a [`Service`] and a [`Client`] with the default `tokio` transport. - pub fn new(config: Config, store: S) -> Result + pub async fn new(config: Config, store: S) -> Result where S: BitswapStore, { - Self::new_with_transport(config, store, build_transport) + Self::new_with_transport(config, store, build_transport).await } /// Build a [`Service`] and a [`Client`] by passing in a transport factory function. /// /// The main goal is to be facilitate testing with a [`MemoryTransport`]. - pub fn new_with_transport( + pub async fn new_with_transport( config: Config, store: S, transport: F, @@ -192,6 +220,8 @@ where let (request_tx, request_rx) = mpsc::unbounded_channel(); let (event_tx, _) = broadcast::channel(config.connection.event_buffer_capacity as usize); + let iroh = config.iroh; + let service = Self { peer_id, listen_addr: config.connection.listen_addr, @@ -205,6 +235,8 @@ where config.connection.expected_peer_count, ), max_peers_per_query: config.connection.max_peers_per_query as usize, + iroh: IrohManager::new(iroh.v4_addr, iroh.v6_addr, iroh.path, Some(iroh.rpc_addr)) + .await?, }; Ok(service) @@ -219,6 +251,11 @@ where Client::new(self.request_tx.clone()) } + /// Returns a reference to the iroh node. + pub fn iroh(&self) -> &IrohManager { + &self.iroh + } + /// Create a new [`broadcast::Receiver`] instance bound to this `Service`, /// which will be notified upon each event coming from any of the subnets /// the `Service` is subscribed to. @@ -274,18 +311,21 @@ where // Connection events are handled by the behaviours, passed directly from the Swarm. Some(_) => { }, // The connection is closed. - None => { break; }, + None => { + return Err(anyhow!("connection closed")); + }, }, request = self.request_rx.recv() => match request { // A Client sent us a request. Some(req) => self.handle_request(req), // This shouldn't happen because the service has a copy of the sender. // All Client instances have been dropped. - None => { break; } + None => { + return Err(anyhow!("all client instances have been dropped")); + } } - }; + } } - Ok(()) } /// Handle events that the [`NetworkBehaviour`] macro generated for our [`Behaviour`], one for each field. @@ -450,6 +490,12 @@ where Request::Resolve(cid, subnet_id, response_channel) => { self.start_query(cid, subnet_id, response_channel) } + Request::ResolveIroh(hash, size, node_addr, response_channel) => { + self.start_iroh_query(hash, size, node_addr, response_channel) + } + Request::ResolveIrohRead(hash, offset, len, response_channel) => { + self.start_iroh_read_query(hash, offset, len, response_channel) + } Request::RateLimitUsed(peer_id, bytes) => { self.content_mut().rate_limit_used(peer_id, bytes) } @@ -493,6 +539,42 @@ where } } + /// Start a CID resolution using iroh. + fn start_iroh_query( + &mut self, + hash: Hash, + size: u64, + node_addr: NodeAddr, + response_channel: ResponseChannel, + ) { + let client = self.iroh.blobs_client().clone(); + tokio::spawn(async move { + let res = download_blob(&client, hash, size, node_addr).await; + match res { + Ok(_) => send_resolve_result(response_channel, Ok(())), + Err(e) => send_resolve_result(response_channel, Err(anyhow!(e))), + } + }); + } + + /// Start a read request resolution using iorh. + fn start_iroh_read_query( + &mut self, + hash: Hash, + offset: u32, + len: u32, + response_channel: ReadRequestResponseChannel, + ) { + let client = self.iroh.blobs_client().clone(); + tokio::spawn(async move { + let res = read_blob(&client, hash, offset, len).await; + match res { + Ok(bytes) => send_read_request_result(response_channel, Ok(bytes)), + Err(e) => send_read_request_result(response_channel, Err(anyhow!(e))), + } + }); + } + /// Handle the results from a resolve attempt. If it succeeded, notify the /// listener. Otherwise if we have fallback peers to try, start another /// query and send the result to them. By default these are the peers @@ -540,13 +622,13 @@ where // The following are helper functions because Rust Analyzer has trouble with recognising that `swarm.behaviour_mut()` is a legal call. - fn discovery_mut(&mut self) -> &mut behaviour::discovery::Behaviour { + fn discovery_mut(&mut self) -> &mut discovery::Behaviour { self.swarm.behaviour_mut().discovery_mut() } - fn membership_mut(&mut self) -> &mut behaviour::membership::Behaviour { + fn membership_mut(&mut self) -> &mut membership::Behaviour { self.swarm.behaviour_mut().membership_mut() } - fn content_mut(&mut self) -> &mut behaviour::content::Behaviour

{ + fn content_mut(&mut self) -> &mut content::Behaviour

{ self.swarm.behaviour_mut().content_mut() } } @@ -558,6 +640,15 @@ fn send_resolve_result(tx: Sender, res: ResolveResult) { } } +fn send_read_request_result( + tx: Sender>, + res: anyhow::Result, +) { + if tx.send(res).is_err() { + error!("error sending read request result; listener closed") + } +} + /// Builds the transport stack that libp2p will communicate over. /// /// Based on the equivalent in Forest. @@ -570,7 +661,11 @@ pub fn build_transport(local_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox)> { let mplex_config = { let mut mplex_config = MplexConfig::new(); mplex_config.set_max_buffer_size(usize::MAX); - mplex_config + + // FIXME: Yamux will end up beaing deprecated. + let yamux_config = yamux::Config::default(); + // yamux_config.set_window_update_mode(WindowUpdateMode::OnRead); + libp2p::core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) }; transport @@ -580,3 +675,58 @@ pub fn build_transport(local_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox)> { .timeout(Duration::from_secs(20)) .boxed() } + +async fn download_blob( + iroh: &BlobsClient, + seq_hash: Hash, + size: u64, + node_addr: NodeAddr, +) -> anyhow::Result<()> { + // Download top-level blob + // Use an explicit tag so we can keep track of it + + let tag = Tag(format!("stored-seq-{seq_hash}").into()); + info!("downloading {} from {:?}", tag, node_addr); + iroh.download_with_opts( + seq_hash, + DownloadOptions { + format: BlobFormat::HashSeq, + nodes: vec![node_addr], + tag: SetTagOption::Named(tag), + mode: DownloadMode::Queued, + }, + ) + .await? + .await?; + + // Verify downloaded size of user blob matches the expected size + let (_, size_actual) = get_blob_hash_and_size(iroh, seq_hash).await?; + if size != size_actual { + return Err(anyhow!( + "downloaded blob size {} does not match expected size {}", + size_actual, + size + )); + } + + // Delete the temporary tag (this might fail as not all nodes will have one). + let tag = Tag(format!("temp-seq-{seq_hash}").into()); + iroh.tags().delete(tag).await.ok(); + + debug!("downloaded blob {}", seq_hash); + + Ok(()) +} + +async fn read_blob( + iroh: &BlobsClient, + hash: Hash, + offset: u32, + len: u32, +) -> anyhow::Result { + let (hash, _) = get_blob_hash_and_size(iroh, hash).await?; + let len = ReadAtLen::AtMost(len as u64); + let res = iroh.read_at_to_bytes(hash, offset as u64, len).await?; + debug!("read blob {}: {:?}", hash, res); + Ok(res) +} diff --git a/ipld/resolver/tests/smoke.rs b/ipld/resolver/tests/smoke.rs index db28e7c71a..8bb00c0732 100644 --- a/ipld/resolver/tests/smoke.rs +++ b/ipld/resolver/tests/smoke.rs @@ -29,8 +29,8 @@ use fvm_ipld_hamt::Hamt; use fvm_shared::{address::Address, ActorID}; use ipc_api::subnet_id::SubnetID; use ipc_ipld_resolver::{ - Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, MembershipConfig, - NetworkConfig, Resolver, Service, VoteRecord, + Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, IrohConfig, + MembershipConfig, NetworkConfig, Resolver, Service, VoteRecord, }; use libipld::Cid as LibipldCid; use libp2p::{ @@ -106,7 +106,7 @@ impl ClusterBuilder { } /// Add a node with randomized address, optionally bootstrapping from an existing node. - fn add_node(&mut self, bootstrap: Option) { + async fn add_node(&mut self, bootstrap: Option) { let bootstrap_addr = bootstrap.map(|i| { let config = &self.agents[i].config; let peer_id = config.network.local_peer_id(); @@ -115,7 +115,7 @@ impl ClusterBuilder { addr }); let config = make_config(&mut self.rng, self.size, bootstrap_addr); - let (service, store) = make_service(config.clone()); + let (service, store) = make_service(config.clone()).await; let client = service.client(); let events = service.subscribe(); self.services.push(service); @@ -294,7 +294,7 @@ async fn single_bootstrap_publish_receive_preemptive() { async fn can_register_metrics() { let mut rng = rand::rngs::StdRng::seed_from_u64(0); let config = make_config(&mut rng, 1, None); - let (mut service, _) = make_service(config); + let (mut service, _) = make_service(config).await; let registry = prometheus::Registry::new(); service.register_metrics(®istry).unwrap(); } @@ -305,7 +305,9 @@ async fn make_cluster_with_bootstrap(cluster_size: u32, bootstrap_idx: usize) -> // Build a cluster of nodes. for i in 0..builder.size { - builder.add_node(if i == 0 { None } else { Some(bootstrap_idx) }); + builder + .add_node(if i == 0 { None } else { Some(bootstrap_idx) }) + .await; } // Start the swarms. @@ -314,13 +316,22 @@ async fn make_cluster_with_bootstrap(cluster_size: u32, bootstrap_idx: usize) -> cluster } -fn make_service(config: Config) -> (Service, TestBlockstore) { +async fn make_service(config: Config) -> (Service, TestBlockstore) { let store = TestBlockstore::default(); - let svc = Service::new_with_transport(config, store.clone(), build_transport).unwrap(); + let svc = Service::new_with_transport(config, store.clone(), build_transport) + .await + .unwrap(); (svc, store) } fn make_config(rng: &mut StdRng, cluster_size: u32, bootstrap_addr: Option) -> Config { + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + let random_id = rng.gen::(); + let config = Config { connection: ConnectionConfig { listen_addr: Multiaddr::from(Protocol::Memory(rng.gen::())), @@ -350,6 +361,12 @@ fn make_config(rng: &mut StdRng, cluster_size: u32, bootstrap_addr: Option) -> fmt::Result { + write!( + f, + "{}. {} {:?} ipv4={:?} ipv6={:?}", + self.iface.index, self.iface.name, self.iface.if_type, self.iface.ipv4, self.iface.ipv6 + ) + } +} + +impl PartialEq for Interface { + fn eq(&self, other: &Self) -> bool { + self.iface.index == other.iface.index + && self.iface.name == other.iface.name + && self.iface.flags == other.iface.flags + && self.iface.mac_addr.as_ref().map(|a| a.octets()) + == other.iface.mac_addr.as_ref().map(|a| a.octets()) + } +} + +impl Eq for Interface {} + +impl Interface { + /// Is this interface up? + pub(crate) fn is_up(&self) -> bool { + is_up(&self.iface) + } + + /// The name of the interface. + pub(crate) fn name(&self) -> &str { + &self.iface.name + } + + /// A list of all ip addresses of this interface. + pub fn addrs(&self) -> impl Iterator + '_ { + self.iface + .ipv4 + .iter() + .cloned() + .map(IpNet::V4) + .chain(self.iface.ipv6.iter().cloned().map(IpNet::V6)) + } + + /// Creates a fake interface for usage in tests. + /// + /// This allows tests to be independent of the host interfaces. + pub(crate) fn fake() -> Self { + use std::net::Ipv4Addr; + + use netdev::{interface::InterfaceType, mac::MacAddr, NetworkDevice}; + + Self { + iface: netdev::Interface { + index: 2, + name: String::from("wifi0"), + friendly_name: None, + description: None, + if_type: InterfaceType::Ethernet, + mac_addr: Some(MacAddr::new(2, 3, 4, 5, 6, 7)), + ipv4: vec![Ipv4Net::new(Ipv4Addr::new(192, 168, 0, 189), 24).unwrap()], + ipv6: vec![], + flags: 69699, + transmit_speed: None, + receive_speed: None, + gateway: Some(NetworkDevice { + mac_addr: MacAddr::new(2, 3, 4, 5, 6, 8), + ipv4: vec![Ipv4Addr::from([192, 168, 0, 1])], + ipv6: vec![], + }), + dns_servers: vec![], + default: false, + }, + } + } +} + +/// Structure of an IP network, either IPv4 or IPv6. +#[derive(Clone, Debug)] +pub enum IpNet { + /// Structure of IPv4 Network. + V4(Ipv4Net), + /// Structure of IPv6 Network. + V6(Ipv6Net), +} + +impl PartialEq for IpNet { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (IpNet::V4(a), IpNet::V4(b)) => { + a.addr() == b.addr() + && a.prefix_len() == b.prefix_len() + && a.netmask() == b.netmask() + } + (IpNet::V6(a), IpNet::V6(b)) => { + a.addr() == b.addr() + && a.prefix_len() == b.prefix_len() + && a.netmask() == b.netmask() + } + _ => false, + } + } +} +impl Eq for IpNet {} + +impl IpNet { + /// The IP address of this structure. + pub fn addr(&self) -> IpAddr { + match self { + IpNet::V4(a) => IpAddr::V4(a.addr()), + IpNet::V6(a) => IpAddr::V6(a.addr()), + } + } +} + +/// Intended to store the state of the machine's network interfaces, routing table, and +/// other network configuration. For now it's pretty basic. +#[derive(Debug, PartialEq, Eq)] +pub struct State { + /// Maps from an interface name interface. + pub interfaces: HashMap, + + /// Whether this machine has an IPv6 Global or Unique Local Address + /// which might provide connectivity. + pub have_v6: bool, + + /// Whether the machine has some non-localhost, non-link-local IPv4 address. + pub have_v4: bool, + + //// Whether the current network interface is considered "expensive", which currently means LTE/etc + /// instead of Wifi. This field is not populated by `get_state`. + pub(crate) is_expensive: bool, + + /// The interface name for the machine's default route. + /// + /// It is not yet populated on all OSes. + /// + /// When set, its value is the map key into `interface` and `interface_ips`. + pub(crate) default_route_interface: Option, + + /// The HTTP proxy to use, if any. + pub(crate) http_proxy: Option, + + /// The URL to the Proxy Autoconfig URL, if applicable. + pub(crate) pac: Option, +} + +impl fmt::Display for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut ifaces: Vec<_> = self.interfaces.values().collect(); + ifaces.sort_by_key(|iface| iface.iface.index); + for iface in ifaces { + write!(f, "{iface}")?; + if let Some(ref default_if) = self.default_route_interface { + if iface.name() == default_if { + write!(f, " (default)")?; + } + } + if f.alternate() { + writeln!(f)?; + } else { + write!(f, "; ")?; + } + } + Ok(()) + } +} + +impl State { + /// Returns the state of all the current machine's network interfaces. + /// + /// It does not set the returned `State.is_expensive`. The caller can populate that. + pub async fn new() -> Self { + let mut interfaces = HashMap::new(); + let mut have_v6 = false; + let mut have_v4 = false; + + let ifaces = netdev::interface::get_interfaces(); + for iface in ifaces { + let ni = Interface { iface }; + let if_up = ni.is_up(); + let name = ni.iface.name.clone(); + let pfxs: Vec<_> = ni.addrs().collect(); + + if if_up { + for pfx in &pfxs { + if pfx.addr().is_loopback() { + continue; + } + have_v6 |= is_usable_v6(&pfx.addr()); + have_v4 |= is_usable_v4(&pfx.addr()); + } + } + + interfaces.insert(name, ni); + } + + let default_route_interface = default_route_interface().await; + + State { + interfaces, + have_v4, + have_v6, + is_expensive: false, + default_route_interface, + http_proxy: None, + pac: None, + } + } + + /// Creates a fake interface state for usage in tests. + /// + /// This allows tests to be independent of the host interfaces. + pub fn fake() -> Self { + let fake = Interface::fake(); + let ifname = fake.iface.name.clone(); + Self { + interfaces: [(ifname.clone(), fake)].into_iter().collect(), + have_v6: true, + have_v4: true, + is_expensive: false, + default_route_interface: Some(ifname), + http_proxy: None, + pac: None, + } + } +} + +/// Reports whether ip is a usable IPv4 address which should have Internet connectivity. +/// +/// Globally routable and private IPv4 addresses are always Usable, and link local +/// 169.254.x.x addresses are in some environments. +fn is_usable_v4(ip: &IpAddr) -> bool { + if !ip.is_ipv4() || ip.is_loopback() { + return false; + } + + true +} + +/// Reports whether ip is a usable IPv6 address which should have Internet connectivity. +/// +/// Globally routable IPv6 addresses are always Usable, and Unique Local Addresses +/// (fc00::/7) are in some environments used with address translation. +/// +/// We consider all 2000::/3 addresses to be routable, which is the interpretation of +/// +/// as well. However this probably includes some addresses which should not be routed, +/// e.g. documentation addresses. See also +/// for an +/// alternative implementation which is both stricter and laxer in some regards. +fn is_usable_v6(ip: &IpAddr) -> bool { + match ip { + IpAddr::V6(ip) => { + // V6 Global1 2000::/3 + let mask: u16 = 0b1110_0000_0000_0000; + let base: u16 = 0x2000; + let segment1 = ip.segments()[0]; + if (base & mask) == (segment1 & mask) { + return true; + } + + is_private_v6(ip) + } + IpAddr::V4(_) => false, + } +} + +/// The details about a default route. +#[derive(Debug, Clone)] +pub struct DefaultRouteDetails { + /// The interface name. + /// It's like "eth0" (Linux), "Ethernet 2" (Windows), "en0" (macOS). + pub interface_name: String, +} + +impl DefaultRouteDetails { + /// Reads the default route from the current system and returns the details. + pub async fn new() -> Option { + default_route().await + } +} + +/// Like `DefaultRoutDetails::new` but only returns the interface name. +pub async fn default_route_interface() -> Option { + DefaultRouteDetails::new().await.map(|v| v.interface_name) +} + +/// Likely IPs of the residentla router, and the ip address of the current +/// machine using it. +#[derive(Debug, Clone)] +pub struct HomeRouter { + /// Ip of the router. + pub gateway: IpAddr, + /// Our local Ip if known. + pub my_ip: Option, +} + +impl HomeRouter { + /// Returns the likely IP of the residential router, which will always + /// be a private address, if found. + /// In addition, it returns the IP address of the current machine on + /// the LAN using that gateway. + /// This is used as the destination for UPnP, NAT-PMP, PCP, etc queries. + pub fn new() -> Option { + let gateway = Self::get_default_gateway()?; + let my_ip = netdev::interface::get_local_ipaddr(); + + Some(HomeRouter { gateway, my_ip }) + } + + #[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" + ))] + fn get_default_gateway() -> Option { + // netdev doesn't work yet + // See: https://github.com/shellrow/default-net/issues/34 + bsd::likely_home_router() + } + + #[cfg(any(target_os = "linux", target_os = "android", target_os = "windows"))] + fn get_default_gateway() -> Option { + let gateway = netdev::get_default_gateway().ok()?; + gateway + .ipv4 + .iter() + .cloned() + .map(IpAddr::V4) + .chain(gateway.ipv6.iter().cloned().map(IpAddr::V6)) + .next() + } +} + +#[cfg(test)] +mod tests { + use std::net::Ipv6Addr; + + use super::*; + + #[tokio::test] + async fn test_default_route() { + let default_route = DefaultRouteDetails::new() + .await + .expect("missing default route"); + println!("default_route: {:#?}", default_route); + } + + #[tokio::test] + async fn test_likely_home_router() { + let home_router = HomeRouter::new().expect("missing home router"); + println!("home router: {:#?}", home_router); + } + + #[test] + fn test_is_usable_v6() { + let loopback = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1); + assert!(!is_usable_v6(&loopback.into())); + + let link_local = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xcbc9, 0x6aff, 0x5b07, 0x4a9e); + assert!(!is_usable_v6(&link_local.into())); + + let relay_use1 = Ipv6Addr::new(0x2a01, 0x4ff, 0xf0, 0xc4a1, 0, 0, 0, 0x1); + assert!(is_usable_v6(&relay_use1.into())); + + let random_2603 = Ipv6Addr::new(0x2603, 0x3ff, 0xf1, 0xc3aa, 0x1, 0x2, 0x3, 0x1); + assert!(is_usable_v6(&random_2603.into())); + } +} diff --git a/patches/netwatch/src/interfaces/bsd.rs b/patches/netwatch/src/interfaces/bsd.rs new file mode 100644 index 0000000000..5097b86b6f --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd.rs @@ -0,0 +1,1118 @@ +//! Based on + +#![allow(unused)] + +use std::{ + collections::HashMap, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + sync::LazyLock, +}; + +use libc::{c_int, uintptr_t, AF_INET, AF_INET6, AF_LINK, AF_ROUTE, AF_UNSPEC, CTL_NET}; +#[cfg(any(target_os = "macos", target_os = "ios"))] +use libc::{ + NET_RT_DUMP, RTAX_BRD, RTAX_DST, RTAX_GATEWAY, RTAX_MAX, RTAX_NETMASK, RTA_IFP, RTF_GATEWAY, +}; +use nested_enum_utils::common_fields; +use snafu::{Backtrace, IntoError, OptionExt, Snafu}; +use tracing::warn; + +use super::DefaultRouteDetails; + +#[cfg(target_os = "freebsd")] +mod freebsd; +#[cfg(target_os = "freebsd")] +pub(crate) use self::freebsd::*; +#[cfg(target_os = "netbsd")] +mod netbsd; +#[cfg(target_os = "netbsd")] +pub(crate) use self::netbsd::*; +#[cfg(target_os = "openbsd")] +mod openbsd; +#[cfg(target_os = "openbsd")] +pub(crate) use self::openbsd::*; + +#[cfg(any(target_os = "macos", target_os = "ios"))] +mod macos; +#[cfg(any(target_os = "macos", target_os = "ios"))] +use self::macos::*; + +pub async fn default_route() -> Option { + let idx = default_route_interface_index()?; + let interfaces = netdev::get_interfaces(); + let iface = interfaces.into_iter().find(|i| i.index == idx)?; + + Some(DefaultRouteDetails { + interface_name: iface.name, + }) +} + +pub fn likely_home_router() -> Option { + let rib = fetch_routing_table()?; + let msgs = parse_routing_table(&rib)?; + for rm in msgs { + if !is_default_gateway(&rm) { + continue; + } + + if let Some(gw) = rm.addrs.get(RTAX_GATEWAY as usize) { + if let Addr::Inet4 { ip } = gw { + return Some(IpAddr::V4(*ip)); + } + + if let Addr::Inet6 { ip, .. } = gw { + return Some(IpAddr::V6(*ip)); + } + } + } + None +} + +/// Returns the index of the network interface that +/// owns the default route. It returns the first IPv4 or IPv6 default route it +/// finds (it does not prefer one or the other). +fn default_route_interface_index() -> Option { + // $ netstat -nr + // Routing tables + // Internet: + // Destination Gateway Flags Netif Expire + // default 10.0.0.1 UGSc en0 <-- want this one + // default 10.0.0.1 UGScI en1 + + // From man netstat: + // U RTF_UP Route usable + // G RTF_GATEWAY Destination requires forwarding by intermediary + // S RTF_STATIC Manually added + // c RTF_PRCLONING Protocol-specified generate new routes on use + // I RTF_IFSCOPE Route is associated with an interface scope + + let rib = fetch_routing_table()?; + let msgs = parse_routing_table(&rib)?; + for rm in msgs { + if is_default_gateway(&rm) { + return Some(rm.index as u32); + } + } + None +} + +const V4_DEFAULT: [u8; 4] = [0u8; 4]; +const V6_DEFAULT: [u8; 16] = [0u8; 16]; + +fn is_default_gateway(rm: &RouteMessage) -> bool { + if rm.flags & RTF_GATEWAY as u32 == 0 { + return false; + } + + #[cfg(any(target_os = "macos", target_os = "ios"))] + if rm.flags & libc::RTF_IFSCOPE as u32 != 0 { + return false; + } + + // Addrs is [RTAX_DST, RTAX_GATEWAY, RTAX_NETMASK, ...] + if rm.addrs.len() <= RTAX_NETMASK as usize { + return false; + } + + let Some(dst) = rm.addrs.get(RTAX_DST as usize) else { + return false; + }; + let Some(netmask) = rm.addrs.get(RTAX_NETMASK as usize) else { + return false; + }; + + match (dst, netmask) { + (Addr::Inet4 { ip: dst }, Addr::Inet4 { ip: netmask }) => { + if dst.octets() == V4_DEFAULT && netmask.octets() == V4_DEFAULT { + return true; + } + } + (Addr::Inet6 { ip: dst, .. }, Addr::Inet6 { ip: netmask, .. }) => { + if dst.octets() == V6_DEFAULT && netmask.octets() == V6_DEFAULT { + return true; + } + } + _ => {} + } + false +} + +#[cfg(any(target_os = "freebsd", target_os = "openbsd", target_os = "netbsd"))] +fn fetch_routing_table() -> Option> { + match fetch_rib(AF_UNSPEC, libc::NET_RT_DUMP, 0) { + Ok(res) => Some(res), + Err(err) => { + warn!("fetch_rib failed: {:?}", err); + None + } + } +} + +#[cfg(any(target_os = "freebsd", target_os = "openbsd", target_os = "netbsd"))] +fn parse_routing_table(rib: &[u8]) -> Option> { + match parse_rib(libc::NET_RT_IFLIST, rib) { + Ok(res) => { + let res = res + .into_iter() + .filter_map(|m| match m { + WireMessage::Route(r) => Some(r), + _ => None, + }) + .collect(); + Some(res) + } + Err(err) => { + warn!("parse_rib failed: {:?}", err); + None + } + } +} + +#[cfg(any(target_os = "macos", target_os = "ios",))] +fn fetch_routing_table() -> Option> { + const NET_RT_DUMP2: i32 = 7; + match fetch_rib(libc::AF_UNSPEC, NET_RT_DUMP2, 0) { + Ok(res) => Some(res), + Err(err) => { + warn!("fetch_rib failed: {:?}", err); + None + } + } +} + +#[cfg(any(target_os = "macos", target_os = "ios",))] +fn parse_routing_table(rib: &[u8]) -> Option> { + match parse_rib(libc::NET_RT_IFLIST2, rib) { + Ok(res) => { + let res = res + .into_iter() + .filter_map(|m| match m { + WireMessage::Route(r) => Some(r), + _ => None, + }) + .collect(); + Some(res) + } + Err(err) => { + warn!("parse_rib failed: {:?}", err); + None + } + } +} + +#[cfg(any(target_os = "macos", target_os = "ios"))] +const fn is_valid_rib_type(typ: RIBType) -> bool { + const NET_RT_STAT: RIBType = 4; + const NET_RT_TRASH: RIBType = 5; + if typ == NET_RT_STAT || typ == NET_RT_TRASH { + return false; + } + true +} + +#[cfg(any(target_os = "freebsd", target_os = "netbsd"))] +const fn is_valid_rib_type(typ: RIBType) -> bool { + true +} + +#[cfg(target_os = "openbsd")] +const fn is_valid_rib_type(typ: RIBType) -> bool { + if typ == NET_RT_STATS || typ == NET_RT_TABLE { + return false; + } + true +} + +#[derive(Debug, Copy, Clone)] +struct WireFormat { + /// offset of header extension + ext_off: usize, + /// offset of message body + body_off: usize, + typ: MessageType, +} + +#[derive(Debug)] +pub enum WireMessage { + Route(RouteMessage), + Interface(InterfaceMessage), + InterfaceAddr(InterfaceAddrMessage), + InterfaceMulticastAddr(InterfaceMulticastAddrMessage), + InterfaceAnnounce(InterfaceAnnounceMessage), +} + +/// Safely convert a some bytes from a slice into a u16. +fn u16_from_ne_range( + data: &[u8], + range: impl std::slice::SliceIndex<[u8], Output = [u8]>, +) -> Result { + data.get(range) + .and_then(|s| TryInto::<[u8; 2]>::try_into(s).ok()) + .map(u16::from_ne_bytes) + .context(MessageTooShortSnafu) +} + +/// Safely convert some bytes from a slice into a u32. +fn u32_from_ne_range( + data: &[u8], + range: impl std::slice::SliceIndex<[u8], Output = [u8]>, +) -> Result { + data.get(range) + .and_then(|s| TryInto::<[u8; 4]>::try_into(s).ok()) + .map(u32::from_ne_bytes) + .context(MessageTooShortSnafu) +} + +impl WireFormat { + fn parse(&self, _typ: RIBType, data: &[u8]) -> Result, RouteError> { + match self.typ { + #[cfg(any( + target_os = "freebsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" + ))] + MessageType::Route => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + let attrs: i32 = u32_from_ne_range(data, 12..16)? + .try_into() + .map_err(|_| InvalidMessageSnafu.build())?; + let addrs = parse_addrs(attrs, parse_kernel_inet_addr, &data[self.body_off..])?; + let mut m = RouteMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 8..12)?, + index: u16_from_ne_range(data, 4..6)?, + id: u32_from_ne_range(data, 16..20)? as _, + seq: u32_from_ne_range(data, 20..24)?, + ext_off: self.ext_off, + error: None, + addrs, + }; + let errno = u32_from_ne_range(data, 28..32)?; + if errno != 0 { + m.error = Some(std::io::Error::from_raw_os_error(errno as _)); + } + + Ok(Some(WireMessage::Route(m))) + } + #[cfg(target_os = "openbsd")] + MessageType::Route => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + let ll = u16_from_ne_range(data, 4..6)? as usize; + snafu::ensure!(data.len() >= ll as usize, InvalidMessageSnafu); + + let addrs = parse_addrs( + u32_from_ne_range(data, 12..16)? as _, + parse_kernel_inet_addr, + &data[ll..], + )?; + + let mut m = RouteMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 16..20)?, + index: u16_from_ne_range(data, 6..8)?, + id: u32_from_ne_range(data, 24..28)? as _, + seq: u32_from_ne_range(data, 28..32)?, + ext_off: self.ext_off, + error: None, + addrs, + }; + let errno = u32_from_ne_range(data, 32..36)?; + if errno != 0 { + m.error = Some(std::io::Error::from_raw_os_error(errno as _)); + } + + Ok(Some(WireMessage::Route(m))) + } + MessageType::Interface => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, 0..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + + let attrs = u32_from_ne_range(data, 4..8)?; + if attrs as c_int & RTA_IFP == 0 { + return Ok(None); + } + let addr = parse_link_addr(&data[self.body_off..])?; + let name = addr.name().map(|s| s.to_string()); + let m = InterfaceMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 8..12)? as _, + index: u16_from_ne_range(data, 12..14)? as _, + ext_off: self.ext_off, + addr_rtax_ifp: addr, + name, + }; + + Ok(Some(WireMessage::Interface(m))) + } + MessageType::InterfaceAddr => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + + #[cfg(target_os = "netbsd")] + let index = u16_from_ne_range(data, 16..18)?; + #[cfg(not(target_os = "netbsd"))] + let index = u16_from_ne_range(data, 12..14)?; + + let addrs = parse_addrs( + u32_from_ne_range(data, 4..8)? as _, + parse_kernel_inet_addr, + &data[self.body_off..], + )?; + + let m = InterfaceAddrMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 8..12)? as _, + index: index as _, + addrs, + }; + Ok(Some(WireMessage::InterfaceAddr(m))) + } + MessageType::InterfaceMulticastAddr => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + + let addrs = parse_addrs( + u32_from_ne_range(data, 4..8)? as _, + parse_kernel_inet_addr, + &data[self.body_off..], + )?; + let m = InterfaceMulticastAddrMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 8..12)? as _, + index: u16_from_ne_range(data, 12..14)? as _, + addrs, + }; + Ok(Some(WireMessage::InterfaceMulticastAddr(m))) + } + MessageType::InterfaceAnnounce => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + + let mut name = String::new(); + for i in 0..16 { + if data[6 + i] != 0 { + continue; + } + name = std::str::from_utf8(&data[6..6 + i]) + .map_err(|_| InvalidAddressSnafu.build())? + .to_string(); + break; + } + + let m = InterfaceAnnounceMessage { + version: data[2] as _, + r#type: data[3] as _, + index: u16_from_ne_range(data, 4..6)? as _, + what: u16_from_ne_range(data, 22..24)? as _, + name, + }; + + Ok(Some(WireMessage::InterfaceAnnounce(m))) + } + } + } +} + +#[derive(Debug, Copy, Clone)] +enum MessageType { + Route, + Interface, + InterfaceAddr, + InterfaceMulticastAddr, + InterfaceAnnounce, +} + +static ROUTING_STACK: LazyLock = LazyLock::new(probe_routing_stack); + +struct RoutingStack { + rtm_version: i32, + kernel_align: usize, + wire_formats: HashMap, +} + +/// Parses b as a routing information base and returns a list of routing messages. +pub fn parse_rib(typ: RIBType, data: &[u8]) -> Result, RouteError> { + snafu::ensure!( + is_valid_rib_type(typ), + InvalidRibTypeSnafu { rib_type: typ } + ); + + let mut msgs = Vec::new(); + let mut nmsgs = 0; + let mut nskips = 0; + let mut b = data; + + while b.len() > 4 { + nmsgs += 1; + let l = u16_from_ne_range(b, ..2)?; + snafu::ensure!(l != 0, InvalidMessageSnafu); + snafu::ensure!(b.len() >= l as usize, MessageTooShortSnafu); + if b[2] as i32 != ROUTING_STACK.rtm_version { + // b = b[l:]; + continue; + } + match ROUTING_STACK.wire_formats.get(&(b[3] as i32)) { + Some(w) => { + let m = w.parse(typ, &b[..l as usize])?; + match m { + Some(m) => { + msgs.push(m); + } + None => { + nskips += 1; + } + } + } + None => { + nskips += 1; + } + } + b = &b[l as usize..]; + } + + // We failed to parse any of the messages - version mismatch? + snafu::ensure!(nmsgs == msgs.len() + nskips, MessageMismatchSnafu); + + Ok(msgs) +} + +/// A RouteMessage represents a message conveying an address prefix, a +/// nexthop address and an output interface. +/// +/// Unlike other messages, this message can be used to query adjacency +/// information for the given address prefix, to add a new route, and +/// to delete or modify the existing route from the routing information +/// base inside the kernel by writing and reading route messages on a +/// routing socket. +/// +/// For the manipulation of routing information, the route message must +/// contain appropriate fields that include: +/// +/// Version = +/// Type = +/// Flags = +/// Index = +/// ID = +/// Seq = +/// Addrs = +#[derive(Debug)] +pub struct RouteMessage { + /// message version + pub version: isize, + /// message type + pub r#type: isize, + /// route flags + pub flags: u32, + /// interface index when attached + pub index: u16, + /// sender's identifier; usually process ID + pub id: uintptr_t, + /// sequence number + pub seq: u32, + // error on requested operation + pub error: Option, + // addresses + pub addrs: Vec, + // offset of header extension + ext_off: usize, + // raw: []byte // raw message +} + +/// An interface message. +#[derive(Debug)] +pub struct InterfaceMessage { + /// Message version + pub version: isize, + /// Message type + pub r#type: isize, + // Interface flags + pub flags: isize, + // interface index + pub index: isize, + /// Interface name + pub name: Option, + /// Addresses + pub addr_rtax_ifp: Addr, + /// Offset of header extension + pub ext_off: usize, +} + +/// An interface address message. +#[derive(Debug)] +pub struct InterfaceAddrMessage { + /// Message version + pub version: isize, + /// Message type + pub r#type: isize, + /// Interface flags + pub flags: isize, + /// Interface index + pub index: isize, + /// Addresses + pub addrs: Vec, +} + +/// Interface multicast address message. +#[derive(Debug)] +pub struct InterfaceMulticastAddrMessage { + /// message version + pub version: isize, + /// message type + pub r#type: isize, + /// interface flags + pub flags: isize, + /// interface index + pub index: isize, + /// addresses + pub addrs: Vec, +} + +/// Interface announce message. +#[derive(Debug)] +pub struct InterfaceAnnounceMessage { + /// message version + pub version: isize, + /// message type + pub r#type: isize, + /// interface index + pub index: isize, + /// interface name + pub name: String, + /// what type of announcement + pub what: isize, +} + +/// Represents a type of routing information base. +type RIBType = i32; + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum RouteError { + #[snafu(display("message mismatch"))] + MessageMismatch {}, + #[snafu(display("message too short"))] + MessageTooShort {}, + #[snafu(display("invalid message"))] + InvalidMessage {}, + #[snafu(display("invalid address"))] + InvalidAddress {}, + #[snafu(display("invalid rib type {rib_type}"))] + InvalidRibType { rib_type: RIBType }, + #[snafu(display("io error calling '{name}'"))] + Io { + source: std::io::Error, + name: &'static str, + }, +} + +/// FetchRIB fetches a routing information base from the operating system. +/// +/// The provided af must be an address family. +/// +/// The provided arg must be a RIBType-specific argument. +/// When RIBType is related to routes, arg might be a set of route +/// flags. When RIBType is related to network interfaces, arg might be +/// an interface index or a set of interface flags. In most cases, zero +/// means a wildcard. +fn fetch_rib(af: i32, typ: RIBType, arg: i32) -> Result, RouteError> { + let mut round = 0; + loop { + round += 1; + + let mut mib: [i32; 6] = [CTL_NET, AF_ROUTE, 0, af, typ, arg]; + let mut n: libc::size_t = 0; + let err = unsafe { + libc::sysctl( + mib.as_mut_ptr() as *mut _, + 6, + std::ptr::null_mut(), + &mut n, + std::ptr::null_mut(), + 0, + ) + }; + if err != 0 { + return Err(IoSnafu { name: "sysctl" }.into_error(std::io::Error::last_os_error())); + } + if n == 0 { + // nothing available + return Ok(Vec::new()); + } + let mut b = vec![0u8; n]; + let err = unsafe { + libc::sysctl( + mib.as_mut_ptr() as _, + 6, + b.as_mut_ptr() as _, + &mut n, + std::ptr::null_mut(), + 0, + ) + }; + if err != 0 { + // If the sysctl failed because the data got larger + // between the two sysctl calls, try a few times + // before failing. (golang.org/issue/45736). + let io_err = std::io::Error::last_os_error(); + const MAX_TRIES: usize = 3; + if io_err.raw_os_error().unwrap_or_default() == libc::ENOMEM && round < MAX_TRIES { + continue; + } + return Err(IoSnafu { name: "sysctl" }.into_error(io_err)); + } + // Truncate b, to the new length + b.truncate(n); + + return Ok(b); + } +} + +/// Represents an address associated with packet routing. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Addr { + /// Represents a link-layer address. + Link { + /// interface index when attached + index: i32, + /// interface name when attached + name: Option, + /// link-layer address when attached + addr: Option>, + }, + /// Represents an internet address for IPv4. + Inet4 { ip: Ipv4Addr }, + /// Represents an internet address for IPv6. + Inet6 { ip: Ipv6Addr, zone: u32 }, + /// Represents an address of various operating system-specific features. + Default { + af: i32, + /// raw format of address + raw: Box<[u8]>, + }, +} + +impl Addr { + pub fn family(&self) -> i32 { + match self { + Addr::Link { .. } => AF_LINK, + Addr::Inet4 { .. } => AF_INET, + Addr::Inet6 { .. } => AF_INET6, + Addr::Default { af, .. } => *af, + } + } + + pub fn name(&self) -> Option<&str> { + match self { + Addr::Link { name, .. } => name.as_ref().map(|s| s.as_str()), + _ => None, + } + } + + pub fn ip(&self) -> Option { + match self { + Addr::Inet4 { ip } => Some(IpAddr::V4(*ip)), + Addr::Inet6 { ip, .. } => { + // TODO: how to add the zone? + Some(IpAddr::V6(*ip)) + } + _ => None, + } + } +} + +fn roundup(l: usize) -> usize { + if l == 0 { + return ROUTING_STACK.kernel_align; + } + let mut x = l + ROUTING_STACK.kernel_align - 1; + x &= !(ROUTING_STACK.kernel_align - 1); + x +} + +fn parse_addrs(attrs: i32, default_fn: F, data: &[u8]) -> Result, RouteError> +where + F: Fn(i32, &[u8]) -> Result<(i32, Addr), RouteError>, +{ + let mut addrs = Vec::with_capacity(RTAX_MAX as usize); + let af = AF_UNSPEC; + + let mut b = data; + for i in 0..RTAX_MAX as usize { + if b.len() < roundup(0) { + break; + } + + if attrs & (1 << i) == 0 { + continue; + } + if i <= RTAX_BRD as usize { + match b[1] as i32 { + AF_LINK => { + let a = parse_link_addr(b)?; + addrs.push(a); + let l = roundup(b[0] as usize); + snafu::ensure!(b.len() >= l, MessageTooShortSnafu); + b = &b[l..]; + } + AF_INET | AF_INET6 => { + let af = b[1] as i32; + let a = parse_inet_addr(af, b)?; + addrs.push(a); + let l = roundup(b[0] as usize); + snafu::ensure!(b.len() >= l, MessageTooShortSnafu); + b = &b[l..]; + } + _ => { + let (l, a) = default_fn(af, b)?; + addrs.push(a); + let ll = roundup(l as usize); + if b.len() < ll { + b = &b[l as usize..]; + } else { + b = &b[ll..]; + } + } + } + } else { + let a = parse_default_addr(b)?; + addrs.push(a); + let l = roundup(b[0] as usize); + snafu::ensure!(b.len() >= l, MessageTooShortSnafu); + b = &b[l..]; + } + } + // The only remaining bytes in b should be alignment. + // However, under some circumstances DragonFly BSD appears to put + // more addresses in the message than are indicated in the address + // bitmask, so don't check for this. + Ok(addrs) +} + +/// Parses `b` as an internet address for IPv4 or IPv6. +fn parse_inet_addr(af: i32, b: &[u8]) -> Result { + match af { + AF_INET => { + snafu::ensure!(b.len() >= SIZEOF_SOCKADDR_INET, InvalidAddressSnafu); + + let ip = Ipv4Addr::new(b[4], b[5], b[6], b[7]); + Ok(Addr::Inet4 { ip }) + } + AF_INET6 => { + snafu::ensure!(b.len() >= SIZEOF_SOCKADDR_INET6, InvalidAddressSnafu); + + let mut zone = u32_from_ne_range(b, 24..28)?; + let mut oc: [u8; 16] = b + .get(8..24) + .and_then(|s| TryInto::<[u8; 16]>::try_into(s).ok()) + .context(InvalidMessageSnafu)?; + if oc[0] == 0xfe && oc[1] & 0xc0 == 0x80 + || oc[0] == 0xff && (oc[1] & 0x0f == 0x01 || oc[1] & 0x0f == 0x02) + { + // KAME based IPv6 protocol stack usually + // embeds the interface index in the + // interface-local or link-local address as + // the kernel-internal form. + // NOTE: This is the only place in which uses big-endian. Is that right? + let id = oc + .get(2..4) + .and_then(|s| TryInto::<[u8; 2]>::try_into(s).ok()) + .map(u16::from_be_bytes) + .context(InvalidMessageSnafu)? as u32; + if id != 0 { + zone = id; + oc[2] = 0; + oc[3] = 0; + } + } + Ok(Addr::Inet6 { + ip: Ipv6Addr::from(oc), + zone, + }) + } + _ => Err(InvalidAddressSnafu.build()), + } +} + +/// Parses b as an internet address in conventional BSD kernel form. +fn parse_kernel_inet_addr(af: i32, b: &[u8]) -> Result<(i32, Addr), RouteError> { + // The encoding looks similar to the NLRI encoding. + // +----------------------------+ + // | Length (1 octet) | + // +----------------------------+ + // | Address prefix (variable) | + // +----------------------------+ + // + // The differences between the kernel form and the NLRI + // encoding are: + // + // - The length field of the kernel form indicates the prefix + // length in bytes, not in bits + // + // - In the kernel form, zero value of the length field + // doesn't mean 0.0.0.0/0 or ::/0 + // + // - The kernel form appends leading bytes to the prefix field + // to make the tuple to be conformed with + // the routing message boundary + let mut l = b[0] as usize; + + #[cfg(any(target_os = "macos", target_os = "ios"))] + { + // On Darwin, an address in the kernel form is also used as a message filler. + if l == 0 || b.len() > roundup(l) { + l = roundup(l) + } + } + #[cfg(not(any(target_os = "macos", target_os = "ios")))] + { + l = roundup(l); + } + + snafu::ensure!(b.len() >= l, InvalidAddressSnafu); + // Don't reorder case expressions. + // The case expressions for IPv6 must come first. + const OFF4: usize = 4; // offset of in_addr + const OFF6: usize = 8; // offset of in6_addr + + let addr = if b[0] as usize == SIZEOF_SOCKADDR_INET6 { + let octets: [u8; 16] = b + .get(OFF6..OFF6 + 16) + .and_then(|s| TryInto::try_into(s).ok()) + .context(InvalidMessageSnafu)?; + let ip = Ipv6Addr::from(octets); + Addr::Inet6 { ip, zone: 0 } + } else if af == AF_INET6 { + let mut octets = [0u8; 16]; + if l - 1 < OFF6 { + octets[..l - 1].copy_from_slice(&b[1..l]); + } else { + octets.copy_from_slice(&b[l - OFF6..l]); + } + let ip = Ipv6Addr::from(octets); + Addr::Inet6 { ip, zone: 0 } + } else if b[0] as usize == SIZEOF_SOCKADDR_INET { + let octets: [u8; 4] = b + .get(OFF4..OFF4 + 4) + .and_then(|s| TryInto::try_into(s).ok()) + .context(InvalidMessageSnafu)?; + let ip = Ipv4Addr::from(octets); + Addr::Inet4 { ip } + } else { + // an old fashion, AF_UNSPEC or unknown means AF_INET + let mut octets = [0u8; 4]; + if l - 1 < OFF4 { + octets[..l - 1].copy_from_slice(&b[1..l]); + } else { + octets.copy_from_slice(&b[l - OFF4..l]); + } + let ip = Ipv4Addr::from(octets); + Addr::Inet4 { ip } + }; + + Ok((b[0] as _, addr)) +} + +fn parse_link_addr(b: &[u8]) -> Result { + snafu::ensure!(b.len() >= 8, InvalidAddressSnafu); + let (_, mut a) = parse_kernel_link_addr(AF_LINK, &b[4..])?; + + if let Addr::Link { index, .. } = &mut a { + *index = u16_from_ne_range(b, 2..4)? as _; + } + + Ok(a) +} + +// Parses b as a link-layer address in conventional BSD kernel form. +fn parse_kernel_link_addr(_: i32, b: &[u8]) -> Result<(usize, Addr), RouteError> { + // The encoding looks like the following: + // +----------------------------+ + // | Type (1 octet) | + // +----------------------------+ + // | Name length (1 octet) | + // +----------------------------+ + // | Address length (1 octet) | + // +----------------------------+ + // | Selector length (1 octet) | + // +----------------------------+ + // | Data (variable) | + // +----------------------------+ + // + // On some platforms, all-bit-one of length field means "don't + // care". + let mut nlen = b[1] as usize; + let mut alen = b[2] as usize; + let mut slen = b[3] as usize; + + if nlen == 0xff { + nlen = 0; + } + if alen == 0xff { + alen = 0; + } + if slen == 0xff { + slen = 0; + } + + let l = 4 + nlen + alen + slen; + snafu::ensure!(b.len() >= l, InvalidAddressSnafu); + let mut data = &b[4..]; + + let name = if nlen > 0 { + let name = std::str::from_utf8(&data[..nlen]) + .map_err(|_| InvalidAddressSnafu.build())? + .to_string(); + data = &data[nlen..]; + Some(name) + } else { + None + }; + + let addr = if alen > 0 { + Some(data[..alen].to_vec().into_boxed_slice()) + } else { + None + }; + + let a = Addr::Link { + index: 0, + name, + addr, + }; + + Ok((l, a)) +} + +fn parse_default_addr(b: &[u8]) -> Result { + snafu::ensure!( + b.len() >= 2 && b.len() >= b[0] as usize, + InvalidAddressSnafu + ); + Ok(Addr::Default { + af: b[1] as _, + raw: b[..b[0] as usize].to_vec().into_boxed_slice(), + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_fetch_parse_routing_table() { + let rib_raw = fetch_routing_table().unwrap(); + assert!(!rib_raw.is_empty()); + println!("got rib: {}", rib_raw.len()); + let rib_parsed = parse_routing_table(&rib_raw).unwrap(); + println!("got {} entries", rib_parsed.len()); + assert!(!rib_parsed.is_empty()); + } + + struct ParseAddrsTest { + attrs: i32, + #[allow(clippy::type_complexity)] + parse_fn: Box Result<(i32, Addr), RouteError>>, + b: Vec, + addrs: Vec, + } + + #[test] + #[cfg(target_endian = "little")] + fn test_parse_addrs() { + #[cfg(any(target_os = "macos", target_os = "ios"))] + use libc::{RTA_BRD, RTA_DST, RTA_GATEWAY, RTA_IFA, RTA_IFP, RTA_NETMASK}; + + let parse_addrs_little_endian_tests = [ + ParseAddrsTest { + attrs: RTA_DST | RTA_GATEWAY | RTA_NETMASK | RTA_BRD, + parse_fn: Box::new(parse_kernel_inet_addr), + b: vec![ + 0x38, 0x12, 0x0, 0x0, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x38, 0x12, 0x2, 0x0, 0x6, 0x3, + 0x6, 0x0, 0x65, 0x6d, 0x31, 0x0, 0xc, 0x29, 0x66, 0x2c, 0xdc, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x10, 0x2, 0x0, 0x0, 0xac, 0x10, 0xdc, 0xb4, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x10, 0x2, 0x0, 0x0, 0xac, 0x10, 0xdc, 0xff, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + ], + addrs: vec![ + Addr::Link { + index: 0, + name: None, + addr: None, + }, + Addr::Link { + index: 2, + name: Some("em1".to_string()), + addr: Some(vec![0x00, 0x0c, 0x29, 0x66, 0x2c, 0xdc].into_boxed_slice()), + }, + Addr::Inet4 { + ip: Ipv4Addr::from([172, 16, 220, 180]), + }, + /*nil, + nil, + nil, + nil,*/ + Addr::Inet4 { + ip: Ipv4Addr::from([172, 16, 220, 255]), + }, + ], + }, + ParseAddrsTest { + attrs: RTA_NETMASK | RTA_IFP | RTA_IFA, + parse_fn: Box::new(parse_kernel_inet_addr), + b: vec![ + 0x7, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0x0, 0x18, 0x12, 0xa, 0x0, 0x87, 0x8, + 0x0, 0x0, 0x76, 0x6c, 0x61, 0x6e, 0x35, 0x36, 0x38, 0x32, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x10, 0x2, 0x0, 0x0, 0xa9, 0xfe, 0x0, 0x1, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, + ], + addrs: vec![ + // nil, + // nil, + Addr::Inet4 { + ip: Ipv4Addr::from([255, 255, 255, 0]), + }, + // nil, + Addr::Link { + index: 10, + name: Some("vlan5682".to_string()), + addr: None, + }, + Addr::Inet4 { + ip: Ipv4Addr::from([169, 254, 0, 1]), + }, + // nil, + // nil, + ], + }, + ]; + + for (i, tt) in parse_addrs_little_endian_tests.into_iter().enumerate() { + let addrs = parse_addrs(tt.attrs, tt.parse_fn, &tt.b) + .unwrap_or_else(|_| panic!("failed {}", i)); + + assert_eq!(addrs, tt.addrs, "{}", i); + } + } +} diff --git a/patches/netwatch/src/interfaces/bsd/freebsd.rs b/patches/netwatch/src/interfaces/bsd/freebsd.rs new file mode 100644 index 0000000000..6be6d52300 --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd/freebsd.rs @@ -0,0 +1,326 @@ +use libc::c_int; + +use super::{MessageType, RoutingStack, WireFormat}; + +// Missing constants from libc. +// https://github.com/rust-lang/libc/issues/3711 + +// net/route.h +pub const RTF_GATEWAY: c_int = 0x2; +pub const RTAX_DST: c_int = 0; +pub const RTAX_GATEWAY: c_int = 1; +pub const RTAX_NETMASK: c_int = 2; +pub const RTAX_IFP: c_int = 4; +pub const RTAX_BRD: c_int = 7; +pub const RTAX_MAX: c_int = 8; +pub const RTM_VERSION: c_int = 5; +pub const RTA_DST: c_int = 0x1; +pub const RTA_GATEWAY: c_int = 0x2; +pub const RTA_NETMASK: c_int = 0x4; +pub const RTA_GENMASK: c_int = 0x8; +pub const RTA_IFP: c_int = 0x10; +pub const RTA_IFA: c_int = 0x20; +pub const RTA_AUTHOR: c_int = 0x40; +pub const RTA_BRD: c_int = 0x80; + +// Message types +pub const RTM_ADD: c_int = 0x1; +pub const RTM_DELETE: c_int = 0x2; +pub const RTM_CHANGE: c_int = 0x3; +pub const RTM_GET: c_int = 0x4; +pub const RTM_LOSING: c_int = 0x5; +pub const RTM_REDIRECT: c_int = 0x6; +pub const RTM_MISS: c_int = 0x7; +pub const RTM_LOCK: c_int = 0x8; +pub const RTM_OLDADD: c_int = 0x9; +pub const RTM_OLDDEL: c_int = 0xa; +pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_NEWADDR: c_int = 0xc; +pub const RTM_DELADDR: c_int = 0xd; +pub const RTM_IFINFO: c_int = 0xe; +pub const RTM_NEWMADDR: c_int = 0xf; +pub const RTM_DELMADDR: c_int = 0x10; +pub const RTM_IFANNOUNCE: c_int = 0x11; +pub const RTM_IEEE80211: c_int = 0x12; + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_freebsd_amd64.go +#[cfg(target_arch = "x86_64")] +pub use self::amd64::*; +#[cfg(target_arch = "x86_64")] +mod amd64 { + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11: usize = 0x98; + + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10_EMU: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10_EMU: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10_EMU: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10_EMU: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11_EMU: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11_EMU: usize = 0x98; + + pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; + pub const SIZEOF_SOCKADDR_INET: usize = 0x10; + pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; +} + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_freebsd_386.go +#[cfg(target_arch = "x86")] +pub use self::i686::*; +#[cfg(target_arch = "x86")] +mod i686 { + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10: usize = 0x68; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10: usize = 0x6c; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10: usize = 0x5c; + pub const SIZEOF_RT_METRICS_FREE_BSD10: usize = 0x38; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7: usize = 0x60; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8: usize = 0x60; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9: usize = 0x60; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10: usize = 0x64; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7: usize = 0x50; + pub const SIZEOF_IF_DATA_FREE_BSD8: usize = 0x50; + pub const SIZEOF_IF_DATA_FREE_BSD9: usize = 0x50; + pub const SIZEOF_IF_DATA_FREE_BSD10: usize = 0x54; + pub const SIZEOF_IF_DATA_FREE_BSD11: usize = 0x98; + + // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 + // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT + + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10_EMU: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10_EMU: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10_EMU: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10_EMU: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11_EMU: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11_EMU: usize = 0x98; + + pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; + pub const SIZEOF_SOCKADDR_INET: usize = 0x10; + pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; +} + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_freebsd_arm.go +#[cfg(target_arch = "arm")] +pub use self::arm::*; +#[cfg(target_arch = "arm")] +mod arm { + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10: usize = 0x68; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10: usize = 0x6c; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10: usize = 0x5c; + pub const SIZEOF_RT_METRICS_FREE_BSD10: usize = 0x38; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD8: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD9: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD10: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD11: usize = 0x98; + + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10_EMU: usize = 0x68; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10_EMU: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10_EMU: usize = 0x6c; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10_EMU: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10_EMU: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10_EMU: usize = 0x5c; + pub const SIZEOF_RT_METRICS_FREE_BSD10_EMU: usize = 0x38; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7_EMU: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8_EMU: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9_EMU: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10_EMU: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11_EMU: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7_EMU: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD8_EMU: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD9_EMU: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD10_EMU: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD11_EMU: usize = 0x98; + + pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; + pub const SIZEOF_SOCKADDR_INET: usize = 0x10; + pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; +} + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_freebsd_arm.go +#[cfg(target_arch = "aarch64")] +pub use self::arm64::*; +#[cfg(target_arch = "aarch64")] +mod arm64 { + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11: usize = 0x98; + + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10_EMU: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10_EMU: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10_EMU: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10_EMU: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11_EMU: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11_EMU: usize = 0x98; + + pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; + pub const SIZEOF_SOCKADDR_INET: usize = 0x10; + pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; +} + +/// 386 emulation on amd64 +fn detect_compat_freebsd32() -> bool { + // TODO: implement detection when someone actually needs it + false +} + +pub(super) fn probe_routing_stack() -> RoutingStack { + let rtm_version = RTM_VERSION; + + // Currently only BSD11 support is implemented. + // At the time of this writing rust supports 10 and 11, if this is a problem + // please file an issue. + + let (rtm, ifm, ifam, ifmam, ifanm) = if detect_compat_freebsd32() { + unimplemented!() + } else { + let rtm = WireFormat { + ext_off: SIZEOF_RT_MSGHDR_FREE_BSD10 - SIZEOF_RT_METRICS_FREE_BSD10, + body_off: SIZEOF_RT_MSGHDR_FREE_BSD10, + typ: MessageType::Route, + }; + let ifm = WireFormat { + ext_off: 16, + body_off: SIZEOF_IF_MSGHDR_FREE_BSD11, + typ: MessageType::Interface, + }; + let ifam = WireFormat { + ext_off: SIZEOF_IFA_MSGHDR_FREE_BSD10, + body_off: SIZEOF_IFA_MSGHDR_FREE_BSD10, + typ: MessageType::InterfaceAddr, + }; + let ifmam = WireFormat { + ext_off: SIZEOF_IFMA_MSGHDR_FREE_BSD10, + body_off: SIZEOF_IFMA_MSGHDR_FREE_BSD10, + typ: MessageType::InterfaceMulticastAddr, + }; + let ifanm = WireFormat { + ext_off: SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10, + body_off: SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10, + typ: MessageType::InterfaceAnnounce, + }; + (rtm, ifm, ifam, ifmam, ifanm) + }; + + let wire_formats = [ + (RTM_ADD, rtm), + (RTM_DELETE, rtm), + (RTM_CHANGE, rtm), + (RTM_GET, rtm), + (RTM_LOSING, rtm), + (RTM_REDIRECT, rtm), + (RTM_MISS, rtm), + (RTM_LOCK, rtm), + (RTM_RESOLVE, rtm), + (RTM_NEWADDR, ifam), + (RTM_DELADDR, ifam), + (RTM_IFINFO, ifm), + (RTM_NEWMADDR, ifmam), + (RTM_DELMADDR, ifmam), + (RTM_IFANNOUNCE, ifanm), + (RTM_IEEE80211, ifanm), + ] + .into_iter() + .collect(); + RoutingStack { + rtm_version, + wire_formats, + kernel_align: 4, + } +} diff --git a/patches/netwatch/src/interfaces/bsd/macos.rs b/patches/netwatch/src/interfaces/bsd/macos.rs new file mode 100644 index 0000000000..5c29ff943a --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd/macos.rs @@ -0,0 +1,86 @@ +use super::{MessageType, RoutingStack, WireFormat}; + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_darwin.go +const SIZEOF_IF_MSGHDR_DARWIN15: usize = 0x70; +const SIZEOF_IFA_MSGHDR_DARWIN15: usize = 0x14; +const SIZEOF_IFMA_MSGHDR_DARWIN15: usize = 0x10; +const SIZEOF_IF_MSGHDR2_DARWIN15: usize = 0xa0; +const SIZEOF_IFMA_MSGHDR2_DARWIN15: usize = 0x14; +const SIZEOF_IF_DATA_DARWIN15: usize = 0x60; +const SIZEOF_IF_DATA64_DARWIN15: usize = 0x80; + +const SIZEOF_RT_MSGHDR_DARWIN15: usize = 0x5c; +const SIZEOF_RT_MSGHDR2_DARWIN15: usize = 0x5c; +const SIZEOF_RT_METRICS_DARWIN15: usize = 0x38; + +const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; +pub(super) const SIZEOF_SOCKADDR_INET: usize = 0x10; +pub(super) const SIZEOF_SOCKADDR_INET6: usize = 0x1c; + +pub(super) fn probe_routing_stack() -> RoutingStack { + let rtm_version = libc::RTM_VERSION; + + let rtm = WireFormat { + ext_off: 36, + body_off: SIZEOF_RT_MSGHDR_DARWIN15, + typ: MessageType::Route, + }; + let rtm2 = WireFormat { + ext_off: 36, + body_off: SIZEOF_RT_MSGHDR2_DARWIN15, + typ: MessageType::Route, + }; + let ifm = WireFormat { + ext_off: 16, + body_off: SIZEOF_IF_MSGHDR_DARWIN15, + typ: MessageType::Interface, + }; + let ifm2 = WireFormat { + ext_off: 32, + body_off: SIZEOF_IF_MSGHDR2_DARWIN15, + typ: MessageType::Interface, + }; + let ifam = WireFormat { + ext_off: SIZEOF_IFA_MSGHDR_DARWIN15, + body_off: SIZEOF_IFA_MSGHDR_DARWIN15, + typ: MessageType::InterfaceAddr, + }; + let ifmam = WireFormat { + ext_off: SIZEOF_IFMA_MSGHDR_DARWIN15, + body_off: SIZEOF_IFMA_MSGHDR_DARWIN15, + typ: MessageType::InterfaceMulticastAddr, + }; + let ifmam2 = WireFormat { + ext_off: SIZEOF_IFMA_MSGHDR2_DARWIN15, + body_off: SIZEOF_IFMA_MSGHDR2_DARWIN15, + typ: MessageType::InterfaceMulticastAddr, + }; + + let wire_formats = [ + (libc::RTM_ADD, rtm), + (libc::RTM_DELETE, rtm), + (libc::RTM_CHANGE, rtm), + (libc::RTM_GET, rtm), + (libc::RTM_LOSING, rtm), + (libc::RTM_REDIRECT, rtm), + (libc::RTM_MISS, rtm), + (libc::RTM_LOCK, rtm), + (libc::RTM_RESOLVE, rtm), + (libc::RTM_NEWADDR, ifam), + (libc::RTM_DELADDR, ifam), + (libc::RTM_IFINFO, ifm), + (libc::RTM_NEWMADDR, ifmam), + (libc::RTM_DELMADDR, ifmam), + (libc::RTM_IFINFO2, ifm2), + (libc::RTM_NEWMADDR2, ifmam2), + (libc::RTM_GET2, rtm2), + ] + .into_iter() + .collect(); + + RoutingStack { + rtm_version, + wire_formats, + kernel_align: 4, + } +} diff --git a/patches/netwatch/src/interfaces/bsd/netbsd.rs b/patches/netwatch/src/interfaces/bsd/netbsd.rs new file mode 100644 index 0000000000..531d692b4c --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd/netbsd.rs @@ -0,0 +1,115 @@ +use libc::c_int; + +use super::{MessageType, RoutingStack, WireFormat}; + +// Missing constants from libc. +// https://github.com/rust-lang/libc/issues/3711 + +// net/route.h +pub const RTF_GATEWAY: c_int = 0x2; +pub const RTAX_DST: c_int = 0; +pub const RTAX_GATEWAY: c_int = 1; +pub const RTAX_NETMASK: c_int = 2; +pub const RTAX_IFP: c_int = 4; +pub const RTAX_BRD: c_int = 7; +pub const RTAX_MAX: c_int = 9; +pub const RTM_VERSION: c_int = 4; +pub const RTA_DST: c_int = 0x1; +pub const RTA_GATEWAY: c_int = 0x2; +pub const RTA_NETMASK: c_int = 0x4; +pub const RTA_GENMASK: c_int = 0x8; +pub const RTA_IFP: c_int = 0x10; +pub const RTA_IFA: c_int = 0x20; +pub const RTA_AUTHOR: c_int = 0x40; +pub const RTA_BRD: c_int = 0x80; + +// Message types +pub const RTM_ADD: c_int = 0x1; +pub const RTM_DELETE: c_int = 0x2; +pub const RTM_CHANGE: c_int = 0x3; +pub const RTM_GET: c_int = 0x4; +pub const RTM_LOSING: c_int = 0x5; +pub const RTM_REDIRECT: c_int = 0x6; +pub const RTM_MISS: c_int = 0x7; +pub const RTM_LOCK: c_int = 0x8; +pub const RTM_OLDADD: c_int = 0x9; +pub const RTM_OLDDEL: c_int = 0xa; +// pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_ONEWADDR: c_int = 0xc; +pub const RTM_ODELADDR: c_int = 0xd; +pub const RTM_OOIFINFO: c_int = 0xe; +pub const RTM_OIFINFO: c_int = 0xf; +pub const RTM_NEWMADDR: c_int = 0xf; +pub const RTM_IFANNOUNCE: c_int = 0x10; +pub const RTM_IEEE80211: c_int = 0x11; +pub const RTM_SETGATE: c_int = 0x12; + +pub const RTM_LLINFO_UPD: c_int = 0x13; + +pub const RTM_IFINFO: c_int = 0x14; +pub const RTM_OCHGADDR: c_int = 0x15; +pub const RTM_NEWADDR: c_int = 0x16; +pub const RTM_DELADDR: c_int = 0x17; +pub const RTM_CHGADDR: c_int = 0x18; + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_netbsd.go + +pub(super) const SIZEOF_IF_MSGHDR_NET_BSD7: usize = 0x98; +pub(super) const SIZEOF_IFA_MSGHDR_NET_BSD7: usize = 0x18; +pub(super) const SIZEOF_IF_ANNOUNCEMSGHDR_NET_BSD7: usize = 0x18; + +pub(super) const SIZEOF_RT_MSGHDR_NET_BSD7: usize = 0x78; +pub(super) const SIZEOF_RT_METRICS_NET_BSD7: usize = 0x50; + +pub(super) const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; +pub(super) const SIZEOF_SOCKADDR_INET: usize = 0x10; +pub(super) const SIZEOF_SOCKADDR_INET6: usize = 0x1c; + +pub(super) fn probe_routing_stack() -> RoutingStack { + let rtm_version = RTM_VERSION; + + let rtm = WireFormat { + ext_off: 40, + body_off: SIZEOF_RT_MSGHDR_NET_BSD7, + typ: MessageType::Route, + }; + let ifm = WireFormat { + ext_off: 16, + body_off: SIZEOF_IF_MSGHDR_NET_BSD7, + typ: MessageType::Interface, + }; + let ifam = WireFormat { + ext_off: SIZEOF_IFA_MSGHDR_NET_BSD7, + body_off: SIZEOF_IFA_MSGHDR_NET_BSD7, + typ: MessageType::InterfaceAddr, + }; + let ifannm = WireFormat { + ext_off: SIZEOF_IF_ANNOUNCEMSGHDR_NET_BSD7, + body_off: SIZEOF_IF_ANNOUNCEMSGHDR_NET_BSD7, + typ: MessageType::InterfaceAnnounce, + }; + + let wire_formats = [ + (RTM_ADD, rtm), + (RTM_DELETE, rtm), + (RTM_CHANGE, rtm), + (RTM_GET, rtm), + (RTM_LOSING, rtm), + (RTM_REDIRECT, rtm), + (RTM_MISS, rtm), + (RTM_LOCK, rtm), + (RTM_NEWADDR, ifam), + (RTM_DELADDR, ifam), + (RTM_IFANNOUNCE, ifannm), + (RTM_IFINFO, ifm), + ] + .into_iter() + .collect(); + + // NetBSD 6 and above kernels require 64-bit aligned access to routing facilities. + RoutingStack { + rtm_version, + wire_formats, + kernel_align: 8, + } +} diff --git a/patches/netwatch/src/interfaces/bsd/openbsd.rs b/patches/netwatch/src/interfaces/bsd/openbsd.rs new file mode 100644 index 0000000000..39af522b4d --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd/openbsd.rs @@ -0,0 +1,105 @@ +use libc::c_int; + +use super::{MessageType, RoutingStack, WireFormat}; + +// Missing constants from libc. +// https://github.com/rust-lang/libc/issues/3711 + +// net/route.h +pub const RTF_GATEWAY: c_int = 0x2; +pub const RTAX_DST: c_int = 0; +pub const RTAX_GATEWAY: c_int = 1; +pub const RTAX_NETMASK: c_int = 2; +pub const RTAX_IFP: c_int = 4; +pub const RTAX_BRD: c_int = 7; +pub const RTAX_MAX: c_int = 15; +pub const RTM_VERSION: c_int = 5; +pub const RTA_DST: c_int = 0x1; +pub const RTA_GATEWAY: c_int = 0x2; +pub const RTA_NETMASK: c_int = 0x4; +pub const RTA_GENMASK: c_int = 0x8; +pub const RTA_IFP: c_int = 0x10; +pub const RTA_IFA: c_int = 0x20; +pub const RTA_AUTHOR: c_int = 0x40; +pub const RTA_BRD: c_int = 0x80; + +// Message types +pub const RTM_ADD: c_int = 0x1; +pub const RTM_DELETE: c_int = 0x2; +pub const RTM_CHANGE: c_int = 0x3; +pub const RTM_GET: c_int = 0x4; +pub const RTM_LOSING: c_int = 0x5; +pub const RTM_REDIRECT: c_int = 0x6; +pub const RTM_MISS: c_int = 0x7; +pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_NEWADDR: c_int = 0xc; +pub const RTM_DELADDR: c_int = 0xd; +pub const RTM_IFINFO: c_int = 0xe; +pub const RTM_IFANNOUNCE: c_int = 0xf; +pub const RTM_DESYNC: c_int = 0x10; +pub const RTM_INVALIDATE: c_int = 0x11; +pub const RTM_BFD: c_int = 0x12; +pub const RTM_PROPOSAL: c_int = 0x13; +pub const RTM_CHGADDRATTR: c_int = 0x14; +pub const RTM_80211INFO: c_int = 0x15; +pub const RTM_SOURCE: c_int = 0x16; + +// socket.h +pub const NET_RT_STATS: c_int = 5; +pub const NET_RT_TABLE: c_int = 5; + +pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; +pub const SIZEOF_SOCKADDR_INET: usize = 0x10; +pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/sys_openbsd.go + +pub(super) fn probe_routing_stack() -> RoutingStack { + let rtm_version = RTM_VERSION; + + let rtm = WireFormat { + ext_off: 0, + body_off: 0, + typ: MessageType::Route, + }; + let ifm = WireFormat { + ext_off: 0, + body_off: 0, + typ: MessageType::Interface, + }; + let ifam = WireFormat { + ext_off: 0, + body_off: 0, + typ: MessageType::InterfaceAddr, + }; + let ifannm = WireFormat { + ext_off: 0, + body_off: 0, + typ: MessageType::InterfaceAnnounce, + }; + + let wire_formats = [ + (RTM_ADD, rtm), + (RTM_DELETE, rtm), + (RTM_CHANGE, rtm), + (RTM_GET, rtm), + (RTM_LOSING, rtm), + (RTM_REDIRECT, rtm), + (RTM_MISS, rtm), + (RTM_RESOLVE, rtm), + (RTM_NEWADDR, ifam), + (RTM_DELADDR, ifam), + (RTM_IFINFO, ifm), + (RTM_IFANNOUNCE, ifannm), + (RTM_DESYNC, ifannm), + ] + .into_iter() + .collect(); + + // NetBSD 6 and above kernels require 64-bit aligned access to routing facilities. + RoutingStack { + rtm_version, + wire_formats, + kernel_align: 8, + } +} diff --git a/patches/netwatch/src/interfaces/linux.rs b/patches/netwatch/src/interfaces/linux.rs new file mode 100644 index 0000000000..cf12e5ebe8 --- /dev/null +++ b/patches/netwatch/src/interfaces/linux.rs @@ -0,0 +1,338 @@ +//! Linux-specific network interfaces implementations. + +use nested_enum_utils::common_fields; +use snafu::{Backtrace, OptionExt, ResultExt, Snafu}; +use tokio::{ + fs::File, + io::{AsyncBufReadExt, BufReader}, +}; + +use super::DefaultRouteDetails; + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[snafu(visibility(pub(super)))] +#[non_exhaustive] +pub enum Error { + #[snafu(display("IO"))] + Io { source: std::io::Error }, + #[cfg(not(target_os = "android"))] + #[snafu(display("no netlink response"))] + NoResponse {}, + #[cfg(not(target_os = "android"))] + #[snafu(display("interface not found"))] + InterfaceNotFound {}, + #[snafu(display("iface field is missing"))] + MissingIfaceField {}, + #[snafu(display("destination field is missing"))] + MissingDestinationField {}, + #[snafu(display("mask field is missing"))] + MissingMaskField {}, + #[cfg(not(target_os = "android"))] + #[snafu(display("netlink"))] + Netlink { + source: netlink_proto::Error, + }, + #[cfg(not(target_os = "android"))] + #[snafu(display("unexpected netlink message"))] + UnexpectedNetlinkMessage {}, + #[cfg(not(target_os = "android"))] + #[snafu(display("netlink error message: {message:?}"))] + NetlinkErrorMessage { + message: netlink_packet_core::error::ErrorMessage, + }, +} + +pub async fn default_route() -> Option { + let route = default_route_proc().await; + if let Ok(route) = route { + return route; + } + + #[cfg(target_os = "android")] + let res = android::default_route().await; + + #[cfg(not(target_os = "android"))] + let res = sane::default_route().await; + + res.ok().flatten() +} + +const PROC_NET_ROUTE_PATH: &str = "/proc/net/route"; + +async fn default_route_proc() -> Result, Error> { + const ZERO_ADDR: &str = "00000000"; + let file = File::open(PROC_NET_ROUTE_PATH).await.context(IoSnafu)?; + + // Explicitly set capacity, this is min(4096, DEFAULT_BUF_SIZE): + // https://github.com/google/gvisor/issues/5732 + // On a regular Linux kernel you can read the first 128 bytes of /proc/net/route, + // then come back later to read the next 128 bytes and so on. + // + // In Google Cloud Run, where /proc/net/route comes from gVisor, you have to + // read it all at once. If you read only the first few bytes then the second + // read returns 0 bytes no matter how much originally appeared to be in the file. + // + // At the time of this writing (Mar 2021) Google Cloud Run has eth0 and eth1 + // with a 384 byte /proc/net/route. We allocate a large buffer to ensure we'll + // read it all in one call. + let reader = BufReader::with_capacity(8 * 1024, file); + let mut lines_iter = reader.lines(); + while let Some(line) = lines_iter.next_line().await.context(IoSnafu)? { + if !line.contains(ZERO_ADDR) { + continue; + } + let mut fields = line.split_ascii_whitespace(); + let iface = fields.next().context(MissingIfaceFieldSnafu)?; + let destination = fields.next().context(MissingDestinationFieldSnafu)?; + let mask = fields.nth(5).context(MissingMaskFieldSnafu)?; + // if iface.starts_with("tailscale") || iface.starts_with("wg") { + // continue; + // } + if destination == ZERO_ADDR && mask == ZERO_ADDR { + return Ok(Some(DefaultRouteDetails { + interface_name: iface.to_string(), + })); + } + } + Ok(None) +} + +#[cfg(target_os = "android")] +mod android { + use tokio::process::Command; + + use super::*; + + /// Try find the default route by parsing the "ip route" command output. + /// + /// We use this on Android where /proc/net/route can be missing entries or have locked-down + /// permissions. See also comments in . + pub async fn default_route() -> Result, Error> { + let output = Command::new("/system/bin/ip") + .args(["route", "show", "table", "0"]) + .kill_on_drop(true) + .output() + .await + .context(IoSnafu)?; + let stdout = std::string::String::from_utf8_lossy(&output.stdout); + let details = parse_android_ip_route(&stdout).map(|iface| DefaultRouteDetails { + interface_name: iface.to_string(), + }); + Ok(details) + } +} + +#[cfg(not(target_os = "android"))] +mod sane { + use n0_future::{Either, StreamExt, TryStream}; + use netlink_packet_core::{NetlinkMessage, NLM_F_DUMP, NLM_F_REQUEST}; + use netlink_packet_route::{ + link::{LinkAttribute, LinkMessage}, + route::{RouteAttribute, RouteHeader, RouteMessage, RouteProtocol, RouteScope, RouteType}, + AddressFamily, RouteNetlinkMessage, + }; + use netlink_sys::protocols::NETLINK_ROUTE; + use snafu::IntoError; + use tracing::{info_span, Instrument}; + + use super::*; + + type Handle = netlink_proto::ConnectionHandle; + + macro_rules! try_rtnl { + ($msg: expr, $message_type:path) => {{ + use netlink_packet_core::NetlinkPayload; + use netlink_packet_route::RouteNetlinkMessage; + + let (_header, payload) = $msg.into_parts(); + match payload { + NetlinkPayload::InnerMessage($message_type(msg)) => msg, + NetlinkPayload::Error(err) => { + return Err(NetlinkErrorMessageSnafu { message: err }.build()) + } + _ => return Err(UnexpectedNetlinkMessageSnafu.build()), + } + }}; + } + + pub async fn default_route() -> Result, Error> { + let (connection, handle, _receiver) = + netlink_proto::new_connection::(NETLINK_ROUTE).context(IoSnafu)?; + + let task = tokio::spawn(connection.instrument(info_span!("netlink.conn"))); + + let default = default_route_netlink_family(&handle, AddressFamily::Inet).await?; + let default = match default { + Some(default) => Some(default), + None => { + default_route_netlink_family(&handle, netlink_packet_route::AddressFamily::Inet6) + .await? + } + }; + task.abort(); + task.await.ok(); + Ok(default.map(|(name, _index)| DefaultRouteDetails { + interface_name: name, + })) + } + + fn get_route( + handle: Handle, + message: RouteMessage, + ) -> impl TryStream { + let mut req = NetlinkMessage::from(RouteNetlinkMessage::GetRoute(message)); + req.header.flags = NLM_F_REQUEST | NLM_F_DUMP; + + match handle.request(req, netlink_proto::sys::SocketAddr::new(0, 0)) { + Ok(response) => Either::Left( + response.map(move |msg| Ok(try_rtnl!(msg, RouteNetlinkMessage::NewRoute))), + ), + Err(e) => Either::Right(n0_future::stream::once::>(Err( + NetlinkSnafu.into_error(e), + ))), + } + } + + fn create_route_message(family: netlink_packet_route::AddressFamily) -> RouteMessage { + let mut message = RouteMessage::default(); + message.header.table = RouteHeader::RT_TABLE_MAIN; + message.header.protocol = RouteProtocol::Static; + message.header.scope = RouteScope::Universe; + message.header.kind = RouteType::Unicast; + message.header.address_family = family; + message + } + + /// Returns the `(name, index)` of the interface for the default route. + async fn default_route_netlink_family( + handle: &Handle, + family: netlink_packet_route::AddressFamily, + ) -> Result, Error> { + let msg = create_route_message(family); + let mut routes = get_route(handle.clone(), msg); + + while let Some(route) = routes.try_next().await? { + let route_attrs = route.attributes; + + if !route_attrs + .iter() + .any(|attr| matches!(attr, RouteAttribute::Gateway(_))) + { + // A default route has a gateway. + continue; + } + + if route.header.destination_prefix_length > 0 { + // A default route has no destination prefix length because it needs to route all + // destinations. + continue; + } + + let index = route_attrs.iter().find_map(|attr| match attr { + RouteAttribute::Oif(index) => Some(*index), + _ => None, + }); + + if let Some(index) = index { + if index == 0 { + continue; + } + let name = iface_by_index(handle, index).await?; + return Ok(Some((name, index))); + } + } + Ok(None) + } + + fn get_link( + handle: Handle, + message: LinkMessage, + ) -> impl TryStream { + let mut req = NetlinkMessage::from(RouteNetlinkMessage::GetLink(message)); + req.header.flags = NLM_F_REQUEST; + + match handle.request(req, netlink_proto::sys::SocketAddr::new(0, 0)) { + Ok(response) => Either::Left( + response.map(move |msg| Ok(try_rtnl!(msg, RouteNetlinkMessage::NewLink))), + ), + Err(e) => Either::Right(n0_future::stream::once::>(Err( + NetlinkSnafu.into_error(e), + ))), + } + } + + fn create_link_get_message(index: u32) -> LinkMessage { + let mut message = LinkMessage::default(); + message.header.index = index; + message + } + + async fn iface_by_index(handle: &Handle, index: u32) -> Result { + let message = create_link_get_message(index); + let mut links = get_link(handle.clone(), message); + let msg = links.try_next().await?.context(NoResponseSnafu)?; + + for nla in msg.attributes { + if let LinkAttribute::IfName(name) = nla { + return Ok(name); + } + } + Err(InterfaceNotFoundSnafu.build()) + } + + #[cfg(test)] + mod tests { + use super::*; + + #[tokio::test] + async fn test_default_route_netlink() { + let route = default_route().await.unwrap(); + // assert!(route.is_some()); + if let Some(route) = route { + assert!(!route.interface_name.is_empty()); + } + } + } +} + +/// Parses the output of the android `/system/bin/ip` command for the default route. +/// +/// Searches for line like `default via 10.0.2.2. dev radio0 table 1016 proto static mtu +/// 1500` +#[cfg(any(target_os = "android", test))] +fn parse_android_ip_route(stdout: &str) -> Option<&str> { + for line in stdout.lines() { + if !line.starts_with("default via") { + continue; + } + let mut fields = line.split_ascii_whitespace(); + if let Some(_dev) = fields.find(|s: &&str| *s == "dev") { + return fields.next(); + } + } + None +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_default_route_proc() { + let route = default_route_proc().await.unwrap(); + // assert!(route.is_some()); + if let Some(route) = route { + assert!(!route.interface_name.is_empty()); + } + } + + #[test] + fn test_parse_android_ip_route() { + let stdout = "default via 10.0.2.2. dev radio0 table 1016 proto static mtu 1500"; + let iface = parse_android_ip_route(stdout).unwrap(); + assert_eq!(iface, "radio0"); + } +} diff --git a/patches/netwatch/src/interfaces/wasm_browser.rs b/patches/netwatch/src/interfaces/wasm_browser.rs new file mode 100644 index 0000000000..190431b0f0 --- /dev/null +++ b/patches/netwatch/src/interfaces/wasm_browser.rs @@ -0,0 +1,118 @@ +use std::{collections::HashMap, fmt}; + +use js_sys::{JsString, Reflect}; + +pub const BROWSER_INTERFACE: &str = "browserif"; + +/// Represents a network interface. +#[derive(Debug, PartialEq, Eq)] +pub struct Interface { + is_up: bool, +} + +impl fmt::Display for Interface { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "navigator.onLine={}", self.is_up) + } +} + +impl Interface { + async fn new() -> Self { + let is_up = Self::is_up(); + tracing::debug!(onLine = is_up, "Fetched globalThis.navigator.onLine"); + Self { + is_up: is_up.unwrap_or(true), + } + } + + fn is_up() -> Option { + let navigator = Reflect::get( + js_sys::global().as_ref(), + JsString::from("navigator").as_ref(), + ) + .ok()?; + + let is_up = Reflect::get(&navigator, JsString::from("onLine").as_ref()).ok()?; + + is_up.as_bool() + } + + /// The name of the interface. + pub(crate) fn name(&self) -> &str { + BROWSER_INTERFACE + } +} + +/// Intended to store the state of the machine's network interfaces, routing table, and +/// other network configuration. For now it's pretty basic. +#[derive(Debug, PartialEq, Eq)] +pub struct State { + /// Maps from an interface name interface. + pub interfaces: HashMap, + + /// Whether this machine has an IPv6 Global or Unique Local Address + /// which might provide connectivity. + pub have_v6: bool, + + /// Whether the machine has some non-localhost, non-link-local IPv4 address. + pub have_v4: bool, + + //// Whether the current network interface is considered "expensive", which currently means LTE/etc + /// instead of Wifi. This field is not populated by `get_state`. + pub(crate) is_expensive: bool, + + /// The interface name for the machine's default route. + /// + /// It is not yet populated on all OSes. + /// + /// When set, its value is the map key into `interface` and `interface_ips`. + pub(crate) default_route_interface: Option, + + /// The HTTP proxy to use, if any. + pub(crate) http_proxy: Option, + + /// The URL to the Proxy Autoconfig URL, if applicable. + pub(crate) pac: Option, +} + +impl fmt::Display for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for iface in self.interfaces.values() { + write!(f, "{iface}")?; + if let Some(ref default_if) = self.default_route_interface { + if iface.name() == default_if { + write!(f, " (default)")?; + } + } + if f.alternate() { + writeln!(f)?; + } else { + write!(f, "; ")?; + } + } + Ok(()) + } +} + +impl State { + /// Returns the state of all the current machine's network interfaces. + /// + /// It does not set the returned `State.is_expensive`. The caller can populate that. + pub async fn new() -> Self { + let mut interfaces = HashMap::new(); + let have_v6 = false; + let have_v4 = false; + + interfaces.insert(BROWSER_INTERFACE.to_string(), Interface::new().await); + + State { + interfaces, + have_v4, + have_v6, + is_expensive: false, + default_route_interface: Some(BROWSER_INTERFACE.to_string()), + http_proxy: None, + pac: None, + } + } +} diff --git a/patches/netwatch/src/interfaces/windows.rs b/patches/netwatch/src/interfaces/windows.rs new file mode 100644 index 0000000000..8e14048d4b --- /dev/null +++ b/patches/netwatch/src/interfaces/windows.rs @@ -0,0 +1,58 @@ +use std::collections::HashMap; + +use nested_enum_utils::common_fields; +use serde::Deserialize; +use snafu::{Backtrace, OptionExt, ResultExt, Snafu}; +use tracing::warn; +use wmi::{query::FilterValue, COMLibrary, WMIConnection}; + +use super::DefaultRouteDetails; + +/// API Docs: +#[derive(Deserialize, Debug)] +#[allow(non_camel_case_types, non_snake_case)] +struct Win32_IP4RouteTable { + Name: String, +} + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[allow(dead_code)] // not sure why we have this here? + #[snafu(display("IO"))] + Io { source: std::io::Error }, + #[snafu(display("not route found"))] + NoRoute {}, + #[snafu(display("WMI"))] + Wmi { source: wmi::WMIError }, +} + +fn get_default_route() -> Result { + let com_con = COMLibrary::new().context(WmiSnafu)?; + let wmi_con = WMIConnection::new(com_con).context(WmiSnafu)?; + + let query: HashMap<_, _> = [("Destination".into(), FilterValue::Str("0.0.0.0"))].into(); + let route: Win32_IP4RouteTable = wmi_con + .filtered_query(&query) + .context(WmiSnafu)? + .drain(..) + .next() + .context(NoRouteSnafu)?; + + Ok(DefaultRouteDetails { + interface_name: route.Name, + }) +} + +pub async fn default_route() -> Option { + match get_default_route() { + Ok(route) => Some(route), + Err(err) => { + warn!("failed to retrieve default route: {:#?}", err); + None + } + } +} diff --git a/patches/netwatch/src/ip.rs b/patches/netwatch/src/ip.rs new file mode 100644 index 0000000000..8aafeb3059 --- /dev/null +++ b/patches/netwatch/src/ip.rs @@ -0,0 +1,159 @@ +//! IP address related utilities. + +#[cfg(not(wasm_browser))] +use std::net::IpAddr; +use std::net::Ipv6Addr; + +#[cfg(not(wasm_browser))] +const IFF_UP: u32 = 0x1; +#[cfg(not(wasm_browser))] +const IFF_LOOPBACK: u32 = 0x8; + +/// List of machine's IP addresses. +#[cfg(not(wasm_browser))] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct LocalAddresses { + /// Loopback addresses. + pub loopback: Vec, + /// Regular addresses. + pub regular: Vec, +} + +#[cfg(not(wasm_browser))] +impl Default for LocalAddresses { + fn default() -> Self { + Self::new() + } +} + +#[cfg(not(wasm_browser))] +impl LocalAddresses { + /// Returns the machine's IP addresses. + /// If there are no regular addresses it will return any IPv4 linklocal or IPv6 unique local + /// addresses because we know of environments where these are used with NAT to provide connectivity. + pub fn new() -> Self { + let ifaces = netdev::interface::get_interfaces(); + + let mut loopback = Vec::new(); + let mut regular4 = Vec::new(); + let mut regular6 = Vec::new(); + let mut linklocal4 = Vec::new(); + let mut ula6 = Vec::new(); + + for iface in ifaces { + if !is_up(&iface) { + // Skip down interfaces + continue; + } + let ifc_is_loopback = is_loopback(&iface); + let addrs = iface + .ipv4 + .iter() + .map(|a| IpAddr::V4(a.addr())) + .chain(iface.ipv6.iter().map(|a| IpAddr::V6(a.addr()))); + + for ip in addrs { + let ip = ip.to_canonical(); + + if ip.is_loopback() || ifc_is_loopback { + loopback.push(ip); + } else if is_link_local(ip) { + if ip.is_ipv4() { + linklocal4.push(ip); + } + + // We know of no cases where the IPv6 fe80:: addresses + // are used to provide WAN connectivity. It is also very + // common for users to have no IPv6 WAN connectivity, + // but their OS supports IPv6 so they have an fe80:: + // address. We don't want to report all of those + // IPv6 LL to Control. + } else if ip.is_ipv6() && is_private(&ip) { + // Google Cloud Run uses NAT with IPv6 Unique + // Local Addresses to provide IPv6 connectivity. + ula6.push(ip); + } else if ip.is_ipv4() { + regular4.push(ip); + } else { + regular6.push(ip); + } + } + } + + if regular4.is_empty() && regular6.is_empty() { + // if we have no usable IP addresses then be willing to accept + // addresses we otherwise wouldn't, like: + // + 169.254.x.x (AWS Lambda uses NAT with these) + // + IPv6 ULA (Google Cloud Run uses these with address translation) + regular4 = linklocal4; + regular6 = ula6; + } + let mut regular = regular4; + regular.extend(regular6); + + regular.sort(); + loopback.sort(); + + LocalAddresses { loopback, regular } + } +} + +#[cfg(not(wasm_browser))] +pub(crate) const fn is_up(interface: &netdev::Interface) -> bool { + interface.flags & IFF_UP != 0 +} + +#[cfg(not(wasm_browser))] +pub(crate) const fn is_loopback(interface: &netdev::Interface) -> bool { + interface.flags & IFF_LOOPBACK != 0 +} + +/// Reports whether ip is a private address, according to RFC 1918 +/// (IPv4 addresses) and RFC 4193 (IPv6 addresses). That is, it reports whether +/// ip is in 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, or fc00::/7. +#[cfg(not(wasm_browser))] +pub(crate) fn is_private(ip: &IpAddr) -> bool { + match ip { + IpAddr::V4(ip) => { + // RFC 1918 allocates 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 as + // private IPv4 address subnets. + let octets = ip.octets(); + octets[0] == 10 + || (octets[0] == 172 && octets[1] & 0xf0 == 16) + || (octets[0] == 192 && octets[1] == 168) + } + IpAddr::V6(ip) => is_private_v6(ip), + } +} + +#[cfg(not(wasm_browser))] +pub(crate) fn is_private_v6(ip: &Ipv6Addr) -> bool { + // RFC 4193 allocates fc00::/7 as the unique local unicast IPv6 address subnet. + ip.octets()[0] & 0xfe == 0xfc +} + +#[cfg(not(wasm_browser))] +pub(super) fn is_link_local(ip: IpAddr) -> bool { + match ip { + IpAddr::V4(ip) => ip.is_link_local(), + IpAddr::V6(ip) => is_unicast_link_local(ip), + } +} + +/// Returns true if the address is a unicast address with link-local scope, as defined in RFC 4291. +// Copied from std lib, not stable yet +pub const fn is_unicast_link_local(addr: Ipv6Addr) -> bool { + (addr.segments()[0] & 0xffc0) == 0xfe80 +} + +#[cfg(test)] +mod tests { + #[cfg(not(wasm_browser))] + #[test] + fn test_local_addresses() { + let addrs = super::LocalAddresses::new(); + dbg!(&addrs); + assert!(!addrs.loopback.is_empty()); + assert!(!addrs.regular.is_empty()); + } +} diff --git a/patches/netwatch/src/ip_family.rs b/patches/netwatch/src/ip_family.rs new file mode 100644 index 0000000000..882890b58b --- /dev/null +++ b/patches/netwatch/src/ip_family.rs @@ -0,0 +1,47 @@ +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + +/// Ip family selection between Ipv4 and Ipv6. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum IpFamily { + /// Ipv4 + V4, + /// Ipv6 + V6, +} + +impl From for IpFamily { + fn from(value: IpAddr) -> Self { + match value { + IpAddr::V4(_) => Self::V4, + IpAddr::V6(_) => Self::V6, + } + } +} + +impl IpFamily { + /// Returns the matching default address. + pub fn unspecified_addr(&self) -> IpAddr { + match self { + Self::V4 => Ipv4Addr::UNSPECIFIED.into(), + Self::V6 => Ipv6Addr::UNSPECIFIED.into(), + } + } + + /// Returns the matching localhost address. + pub fn local_addr(&self) -> IpAddr { + match self { + Self::V4 => Ipv4Addr::LOCALHOST.into(), + Self::V6 => Ipv6Addr::LOCALHOST.into(), + } + } +} + +#[cfg(not(wasm_browser))] +impl From for socket2::Domain { + fn from(value: IpFamily) -> Self { + match value { + IpFamily::V4 => socket2::Domain::IPV4, + IpFamily::V6 => socket2::Domain::IPV6, + } + } +} diff --git a/patches/netwatch/src/lib.rs b/patches/netwatch/src/lib.rs new file mode 100644 index 0000000000..d26af9ecae --- /dev/null +++ b/patches/netwatch/src/lib.rs @@ -0,0 +1,13 @@ +//! Networking related utilities + +#[cfg_attr(wasm_browser, path = "interfaces/wasm_browser.rs")] +pub mod interfaces; +pub mod ip; +mod ip_family; +pub mod netmon; +#[cfg(not(wasm_browser))] +mod udp; + +pub use self::ip_family::IpFamily; +#[cfg(not(wasm_browser))] +pub use self::udp::UdpSocket; diff --git a/patches/netwatch/src/netmon.rs b/patches/netwatch/src/netmon.rs new file mode 100644 index 0000000000..246fe2a66b --- /dev/null +++ b/patches/netwatch/src/netmon.rs @@ -0,0 +1,131 @@ +//! Monitoring of networking interfaces and route changes. + +use n0_future::{ + boxed::BoxFuture, + task::{self, AbortOnDropHandle}, +}; +use nested_enum_utils::common_fields; +use snafu::{Backtrace, ResultExt, Snafu}; +use tokio::sync::{mpsc, oneshot}; + +mod actor; +#[cfg(target_os = "android")] +mod android; +#[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" +))] +mod bsd; +#[cfg(target_os = "linux")] +mod linux; +#[cfg(wasm_browser)] +mod wasm_browser; +#[cfg(target_os = "windows")] +mod windows; + +pub use self::actor::CallbackToken; +use self::actor::{Actor, ActorMessage}; + +/// Monitors networking interface and route changes. +#[derive(Debug)] +pub struct Monitor { + /// Task handle for the monitor task. + _handle: AbortOnDropHandle<()>, + actor_tx: mpsc::Sender, +} + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[snafu(display("channel closed"))] + ChannelClosed {}, + #[snafu(display("actor error"))] + Actor { source: actor::Error }, +} + +impl From> for Error { + fn from(_value: mpsc::error::SendError) -> Self { + ChannelClosedSnafu.build() + } +} + +impl From for Error { + fn from(_value: oneshot::error::RecvError) -> Self { + ChannelClosedSnafu.build() + } +} + +impl Monitor { + /// Create a new monitor. + pub async fn new() -> Result { + let actor = Actor::new().await.context(ActorSnafu)?; + let actor_tx = actor.subscribe(); + + let handle = task::spawn(async move { + actor.run().await; + }); + + Ok(Monitor { + _handle: AbortOnDropHandle::new(handle), + actor_tx, + }) + } + + /// Subscribe to network changes. + pub async fn subscribe(&self, callback: F) -> Result + where + F: Fn(bool) -> BoxFuture<()> + 'static + Sync + Send, + { + let (s, r) = oneshot::channel(); + self.actor_tx + .send(ActorMessage::Subscribe(Box::new(callback), s)) + .await?; + let token = r.await?; + Ok(token) + } + + /// Unsubscribe a callback from network changes, using the provided token. + pub async fn unsubscribe(&self, token: CallbackToken) -> Result<(), Error> { + let (s, r) = oneshot::channel(); + self.actor_tx + .send(ActorMessage::Unsubscribe(token, s)) + .await?; + r.await?; + Ok(()) + } + + /// Potential change detected outside + pub async fn network_change(&self) -> Result<(), Error> { + self.actor_tx.send(ActorMessage::NetworkChange).await?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use n0_future::future::FutureExt; + + use super::*; + + #[tokio::test] + async fn test_smoke_monitor() { + let mon = Monitor::new().await.unwrap(); + let _token = mon + .subscribe(|is_major| { + async move { + println!("CHANGE DETECTED: {}", is_major); + } + .boxed() + }) + .await + .unwrap(); + + tokio::time::sleep(std::time::Duration::from_secs(15)).await; + } +} diff --git a/patches/netwatch/src/netmon/actor.rs b/patches/netwatch/src/netmon/actor.rs new file mode 100644 index 0000000000..bd5743ce1c --- /dev/null +++ b/patches/netwatch/src/netmon/actor.rs @@ -0,0 +1,274 @@ +use std::{collections::HashMap, sync::Arc}; + +use n0_future::{ + boxed::BoxFuture, + task, + time::{self, Duration, Instant}, +}; +#[cfg(not(wasm_browser))] +use os::is_interesting_interface; +pub(super) use os::Error; +use os::RouteMonitor; +use tokio::sync::{mpsc, oneshot}; +use tracing::{debug, trace}; + +#[cfg(target_os = "android")] +use super::android as os; +#[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" +))] +use super::bsd as os; +#[cfg(target_os = "linux")] +use super::linux as os; +#[cfg(wasm_browser)] +use super::wasm_browser as os; +#[cfg(target_os = "windows")] +use super::windows as os; +use crate::interfaces::State; +#[cfg(not(wasm_browser))] +use crate::{interfaces::IpNet, ip::is_link_local}; + +/// The message sent by the OS specific monitors. +#[derive(Debug, Copy, Clone)] +pub(super) enum NetworkMessage { + /// A change was detected. + #[allow(dead_code)] + Change, +} + +/// How often we execute a check for big jumps in wall time. +#[cfg(not(any(target_os = "ios", target_os = "android")))] +const POLL_WALL_TIME_INTERVAL: Duration = Duration::from_secs(15); +/// Set background polling time to 1h to effectively disable it on mobile, +/// to avoid increased battery usage. Sleep detection won't work this way there. +#[cfg(any(target_os = "ios", target_os = "android"))] +const POLL_WALL_TIME_INTERVAL: Duration = Duration::from_secs(60 * 60); +const MON_CHAN_CAPACITY: usize = 16; +const ACTOR_CHAN_CAPACITY: usize = 16; + +pub(super) struct Actor { + /// Latest known interface state. + interface_state: State, + /// Latest observed wall time. + wall_time: Instant, + /// OS specific monitor. + #[allow(dead_code)] + route_monitor: RouteMonitor, + mon_receiver: mpsc::Receiver, + actor_receiver: mpsc::Receiver, + actor_sender: mpsc::Sender, + /// Callback registry. + callbacks: HashMap>, + callback_token: u64, +} + +/// Token to remove a callback +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct CallbackToken(u64); + +/// Callbacks that get notified about changes. +pub(super) type Callback = Box BoxFuture<()> + Sync + Send + 'static>; + +pub(super) enum ActorMessage { + Subscribe(Callback, oneshot::Sender), + Unsubscribe(CallbackToken, oneshot::Sender<()>), + NetworkChange, +} + +impl Actor { + pub(super) async fn new() -> Result { + let interface_state = State::new().await; + let wall_time = Instant::now(); + + let (mon_sender, mon_receiver) = mpsc::channel(MON_CHAN_CAPACITY); + let route_monitor = RouteMonitor::new(mon_sender)?; + let (actor_sender, actor_receiver) = mpsc::channel(ACTOR_CHAN_CAPACITY); + + Ok(Actor { + interface_state, + wall_time, + route_monitor, + mon_receiver, + actor_receiver, + actor_sender, + callbacks: Default::default(), + callback_token: 0, + }) + } + + pub(super) fn subscribe(&self) -> mpsc::Sender { + self.actor_sender.clone() + } + + pub(super) async fn run(mut self) { + const DEBOUNCE: Duration = Duration::from_millis(250); + + let mut last_event = None; + let mut debounce_interval = time::interval(DEBOUNCE); + let mut wall_time_interval = time::interval(POLL_WALL_TIME_INTERVAL); + + loop { + tokio::select! { + biased; + + _ = debounce_interval.tick() => { + if let Some(time_jumped) = last_event.take() { + self.handle_potential_change(time_jumped).await; + } + } + _ = wall_time_interval.tick() => { + trace!("tick: wall_time_interval"); + if self.check_wall_time_advance() { + // Trigger potential change + last_event.replace(true); + debounce_interval.reset_immediately(); + } + } + event = self.mon_receiver.recv() => { + match event { + Some(NetworkMessage::Change) => { + trace!("network activity detected"); + last_event.replace(false); + debounce_interval.reset_immediately(); + } + None => { + debug!("shutting down, network monitor receiver gone"); + break; + } + } + } + msg = self.actor_receiver.recv() => { + match msg { + Some(ActorMessage::Subscribe(callback, s)) => { + let token = self.next_callback_token(); + self.callbacks.insert(token, Arc::new(callback)); + s.send(token).ok(); + } + Some(ActorMessage::Unsubscribe(token, s)) => { + self.callbacks.remove(&token); + s.send(()).ok(); + } + Some(ActorMessage::NetworkChange) => { + trace!("external network activity detected"); + last_event.replace(false); + debounce_interval.reset_immediately(); + } + None => { + debug!("shutting down, actor receiver gone"); + break; + } + } + } + } + } + } + + fn next_callback_token(&mut self) -> CallbackToken { + let token = CallbackToken(self.callback_token); + self.callback_token += 1; + token + } + + async fn handle_potential_change(&mut self, time_jumped: bool) { + trace!("potential change"); + + let new_state = State::new().await; + let old_state = &self.interface_state; + + // No major changes, continue on + if !time_jumped && old_state == &new_state { + debug!("no changes detected"); + return; + } + + let is_major = is_major_change(old_state, &new_state) || time_jumped; + + if is_major { + self.interface_state = new_state; + } + + debug!("triggering {} callbacks", self.callbacks.len()); + for cb in self.callbacks.values() { + let cb = cb.clone(); + task::spawn(async move { + cb(is_major).await; + }); + } + } + + /// Reports whether wall time jumped more than 150% + /// of `POLL_WALL_TIME_INTERVAL`, indicating we probably just came out of sleep. + fn check_wall_time_advance(&mut self) -> bool { + let now = Instant::now(); + let jumped = if let Some(elapsed) = now.checked_duration_since(self.wall_time) { + elapsed > POLL_WALL_TIME_INTERVAL * 3 / 2 + } else { + false + }; + + self.wall_time = now; + jumped + } +} + +#[cfg(wasm_browser)] +fn is_major_change(s1: &State, s2: &State) -> bool { + // All changes are major. + // In the browser, there only are changes from online to offline + s1 != s2 +} + +#[cfg(not(wasm_browser))] +fn is_major_change(s1: &State, s2: &State) -> bool { + if s1.have_v6 != s2.have_v6 + || s1.have_v4 != s2.have_v4 + || s1.is_expensive != s2.is_expensive + || s1.default_route_interface != s2.default_route_interface + || s1.http_proxy != s2.http_proxy + || s1.pac != s2.pac + { + return true; + } + + for (iname, i) in &s1.interfaces { + if !is_interesting_interface(i.name()) { + continue; + } + let Some(i2) = s2.interfaces.get(iname) else { + return true; + }; + if i != i2 || !prefixes_major_equal(i.addrs(), i2.addrs()) { + return true; + } + } + + false +} + +/// Checks whether `a` and `b` are equal after ignoring uninteresting +/// things, like link-local, loopback and multicast addresses. +#[cfg(not(wasm_browser))] +fn prefixes_major_equal(a: impl Iterator, b: impl Iterator) -> bool { + fn is_interesting(p: &IpNet) -> bool { + let a = p.addr(); + if is_link_local(a) || a.is_loopback() || a.is_multicast() { + return false; + } + true + } + + let a = a.filter(is_interesting); + let b = b.filter(is_interesting); + + for (a, b) in a.zip(b) { + if a != b { + return false; + } + } + + true +} diff --git a/patches/netwatch/src/netmon/android.rs b/patches/netwatch/src/netmon/android.rs new file mode 100644 index 0000000000..14189bfa13 --- /dev/null +++ b/patches/netwatch/src/netmon/android.rs @@ -0,0 +1,26 @@ +use tokio::sync::mpsc; + +use super::actor::NetworkMessage; + +#[derive(Debug, derive_more::Display)] +#[display("error")] +pub struct Error; + +impl std::error::Error for Error {} + +#[derive(Debug)] +pub(super) struct RouteMonitor { + _sender: mpsc::Sender, +} + +impl RouteMonitor { + pub(super) fn new(_sender: mpsc::Sender) -> Result { + // Very sad monitor. Android doesn't allow us to do this + + Ok(RouteMonitor { _sender }) + } +} + +pub(super) fn is_interesting_interface(_name: &str) -> bool { + true +} diff --git a/patches/netwatch/src/netmon/bsd.rs b/patches/netwatch/src/netmon/bsd.rs new file mode 100644 index 0000000000..e1734c735e --- /dev/null +++ b/patches/netwatch/src/netmon/bsd.rs @@ -0,0 +1,136 @@ +#[cfg(any(target_os = "macos", target_os = "ios"))] +use libc::{RTAX_DST, RTAX_IFP}; +use snafu::{Backtrace, ResultExt, Snafu}; +use tokio::{io::AsyncReadExt, sync::mpsc}; +use tokio_util::task::AbortOnDropHandle; +use tracing::{trace, warn}; + +use super::actor::NetworkMessage; +#[cfg(any(target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))] +use crate::interfaces::bsd::{RTAX_DST, RTAX_IFP}; +use crate::{interfaces::bsd::WireMessage, ip::is_link_local}; + +#[derive(Debug)] +pub(super) struct RouteMonitor { + _handle: AbortOnDropHandle<()>, +} + +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[snafu(display("IO"))] + Io { + source: std::io::Error, + backtrace: Option, + }, +} + +fn create_socket() -> std::io::Result { + use std::os::fd::{FromRawFd, IntoRawFd}; + + // socket2 0.5+ compatibility: use raw socket type constant instead of Type::RAW + let socket = socket2::Socket::new(libc::AF_ROUTE.into(), socket2::Type::from(libc::SOCK_RAW), None)?; + socket.set_nonblocking(true)?; + + // socket2 0.5+ compatibility: explicit conversion through raw file descriptor + let fd = socket.into_raw_fd(); + let socket_std: std::os::unix::net::UnixStream = unsafe { std::os::unix::net::UnixStream::from_raw_fd(fd) }; + let socket: tokio::net::UnixStream = socket_std.try_into()?; + + trace!("AF_ROUTE socket bound"); + + Ok(socket) +} + +impl RouteMonitor { + pub(super) fn new(sender: mpsc::Sender) -> Result { + let mut socket = create_socket().context(IoSnafu)?; + let handle = tokio::task::spawn(async move { + trace!("AF_ROUTE monitor started"); + + // TODO: cleaner shutdown + let mut buffer = vec![0u8; 2048]; + loop { + match socket.read(&mut buffer).await { + Ok(read) => { + trace!("AF_ROUTE: read {} bytes", read); + match super::super::interfaces::bsd::parse_rib( + libc::NET_RT_DUMP, + &buffer[..read], + ) { + Ok(msgs) => { + if contains_interesting_message(&msgs) { + sender.send(NetworkMessage::Change).await.ok(); + } + } + Err(err) => { + warn!("AF_ROUTE: failed to parse rib: {:?}", err); + } + } + } + Err(err) => { + warn!("AF_ROUTE: error reading: {:?}", err); + // recreate socket, as it is likely in an invalid state + // TODO: distinguish between different errors? + match create_socket() { + Ok(new_socket) => { + socket = new_socket; + } + Err(err) => { + warn!("AF_ROUTE: unable to bind a new socket: {:?}", err); + // TODO: what to do here? + } + } + } + } + } + }); + + Ok(RouteMonitor { + _handle: AbortOnDropHandle::new(handle), + }) + } +} + +fn contains_interesting_message(msgs: &[WireMessage]) -> bool { + msgs.iter().any(is_interesting_message) +} + +pub(super) fn is_interesting_message(msg: &WireMessage) -> bool { + match msg { + WireMessage::InterfaceMulticastAddr(_) => true, + WireMessage::Interface(_) => false, + WireMessage::InterfaceAddr(msg) => { + if let Some(addr) = msg.addrs.get(RTAX_IFP as usize) { + if let Some(name) = addr.name() { + if !is_interesting_interface(name) { + return false; + } + } + } + true + } + WireMessage::Route(msg) => { + // Ignore local unicast + if let Some(addr) = msg.addrs.get(RTAX_DST as usize) { + if let Some(ip) = addr.ip() { + if is_link_local(ip) { + return false; + } + } + } + + true + } + WireMessage::InterfaceAnnounce(_) => false, + } +} + +pub(super) fn is_interesting_interface(name: &str) -> bool { + let base_name = name.trim_end_matches("0123456789"); + if base_name == "llw" || base_name == "awdl" || base_name == "ipsec" { + return false; + } + + true +} diff --git a/patches/netwatch/src/netmon/linux.rs b/patches/netwatch/src/netmon/linux.rs new file mode 100644 index 0000000000..0eed826030 --- /dev/null +++ b/patches/netwatch/src/netmon/linux.rs @@ -0,0 +1,189 @@ +use std::{ + collections::{HashMap, HashSet}, + net::IpAddr, +}; + +use libc::{ + RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV4_RULE, RTNLGRP_IPV6_IFADDR, + RTNLGRP_IPV6_ROUTE, RTNLGRP_IPV6_RULE, +}; +use n0_future::StreamExt; +use netlink_packet_core::NetlinkPayload; +use netlink_packet_route::{address, route, RouteNetlinkMessage}; +use netlink_sys::{AsyncSocket, SocketAddr}; +use snafu::{Backtrace, ResultExt, Snafu}; +use tokio::{sync::mpsc, task::JoinHandle}; +use tracing::{trace, warn}; + +use super::actor::NetworkMessage; +use crate::ip::is_link_local; + +#[derive(Debug)] +pub(super) struct RouteMonitor { + conn_handle: JoinHandle<()>, + handle: JoinHandle<()>, +} + +impl Drop for RouteMonitor { + fn drop(&mut self) { + self.handle.abort(); + self.conn_handle.abort(); + } +} + +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[snafu(display("IO"))] + Io { + source: std::io::Error, + backtrace: Option, + }, +} + +const fn nl_mgrp(group: u32) -> u32 { + if group > 31 { + panic!("use netlink_sys::Socket::add_membership() for this group"); + } + if group == 0 { + 0 + } else { + 1 << (group - 1) + } +} +macro_rules! get_nla { + ($msg:expr, $nla:path) => { + $msg.attributes.iter().find_map(|nla| match nla { + $nla(n) => Some(n), + _ => None, + }) + }; +} + +impl RouteMonitor { + pub(super) fn new(sender: mpsc::Sender) -> Result { + use netlink_sys::protocols::NETLINK_ROUTE; + + let (mut conn, _handle, mut messages) = netlink_proto::new_connection::< + netlink_packet_route::RouteNetlinkMessage, + >(NETLINK_ROUTE) + .context(IoSnafu)?; + + // Specify flags to listen on. + let groups = nl_mgrp(RTNLGRP_IPV4_IFADDR) + | nl_mgrp(RTNLGRP_IPV6_IFADDR) + | nl_mgrp(RTNLGRP_IPV4_ROUTE) + | nl_mgrp(RTNLGRP_IPV6_ROUTE) + | nl_mgrp(RTNLGRP_IPV4_RULE) + | nl_mgrp(RTNLGRP_IPV6_RULE); + + let addr = SocketAddr::new(0, groups); + conn.socket_mut() + .socket_mut() + .bind(&addr) + .context(IoSnafu)?; + + let conn_handle = tokio::task::spawn(conn); + + let handle = tokio::task::spawn(async move { + // let mut addr_cache: HashMap>> = HashMap::new(); + let mut addr_cache: HashMap> = HashMap::new(); + + while let Some((message, _)) = messages.next().await { + match message.payload { + NetlinkPayload::Error(err) => { + warn!("error reading netlink payload: {:?}", err); + } + NetlinkPayload::Done(_) => { + trace!("done received, exiting"); + break; + } + NetlinkPayload::InnerMessage(msg) => match msg { + RouteNetlinkMessage::NewAddress(msg) => { + trace!("NEWADDR: {:?}", msg); + let addrs = addr_cache.entry(msg.header.index).or_default(); + if let Some(addr) = get_nla!(msg, address::AddressAttribute::Address) { + if addrs.contains(addr) { + // already cached + continue; + } else { + addrs.insert(*addr); + sender.send(NetworkMessage::Change).await.ok(); + } + } + } + RouteNetlinkMessage::DelAddress(msg) => { + trace!("DELADDR: {:?}", msg); + let addrs = addr_cache.entry(msg.header.index).or_default(); + if let Some(addr) = get_nla!(msg, address::AddressAttribute::Address) { + addrs.remove(addr); + } + sender.send(NetworkMessage::Change).await.ok(); + } + RouteNetlinkMessage::NewRoute(msg) | RouteNetlinkMessage::DelRoute(msg) => { + trace!("ROUTE:: {:?}", msg); + + // Ignore the following messages + let table = get_nla!(msg, route::RouteAttribute::Table) + .copied() + .unwrap_or_default(); + if let Some(dst) = get_nla!(msg, route::RouteAttribute::Destination) { + match dst { + route::RouteAddress::Inet(addr) => { + if (table == 255 || table == 254) + && (addr.is_multicast() + || is_link_local(IpAddr::V4(*addr))) + { + continue; + } + } + route::RouteAddress::Inet6(addr) => { + if (table == 255 || table == 254) + && (addr.is_multicast() + || is_link_local(IpAddr::V6(*addr))) + { + continue; + } + } + _ => {} + } + } + sender.send(NetworkMessage::Change).await.ok(); + } + RouteNetlinkMessage::NewRule(msg) => { + trace!("NEWRULE: {:?}", msg); + sender.send(NetworkMessage::Change).await.ok(); + } + RouteNetlinkMessage::DelRule(msg) => { + trace!("DELRULE: {:?}", msg); + sender.send(NetworkMessage::Change).await.ok(); + } + RouteNetlinkMessage::NewLink(msg) => { + trace!("NEWLINK: {:?}", msg); + // ignored atm + } + RouteNetlinkMessage::DelLink(msg) => { + trace!("DELLINK: {:?}", msg); + // ignored atm + } + msg => { + trace!("unhandled: {:?}", msg); + } + }, + _ => { + // ignore other types + } + } + } + }); + + Ok(RouteMonitor { + handle, + conn_handle, + }) + } +} + +pub(super) fn is_interesting_interface(_name: &str) -> bool { + true +} diff --git a/patches/netwatch/src/netmon/wasm_browser.rs b/patches/netwatch/src/netmon/wasm_browser.rs new file mode 100644 index 0000000000..86da37ebcf --- /dev/null +++ b/patches/netwatch/src/netmon/wasm_browser.rs @@ -0,0 +1,86 @@ +use js_sys::{ + wasm_bindgen::{prelude::Closure, JsCast}, + Function, +}; +use n0_future::task; +use tokio::sync::mpsc; +use web_sys::{EventListener, EventTarget}; + +use super::actor::NetworkMessage; + +#[derive(Debug, derive_more::Display)] +#[display("error")] +pub struct Error; + +impl std::error::Error for Error {} + +#[derive(Debug)] +pub(super) struct RouteMonitor { + _listeners: Option, +} + +impl RouteMonitor { + pub(super) fn new(sender: mpsc::Sender) -> Result { + let closure: Function = Closure::::new(move || { + tracing::trace!("browser RouteMonitor event triggered"); + // task::spawn is effectively translated into a queueMicrotask in JS + let sender = sender.clone(); + task::spawn(async move { + sender + .send(NetworkMessage::Change) + .await + .inspect_err(|err| { + tracing::debug!(?err, "failed sending NetworkMessage::Change") + }) + }); + }) + .into_js_value() + .unchecked_into(); + // The closure keeps itself alive via reference counting internally + let _listeners = add_event_listeners(&closure); + Ok(RouteMonitor { _listeners }) + } +} + +fn add_event_listeners(f: &Function) -> Option { + let online_listener = EventListener::new(); + online_listener.set_handle_event(f); + let offline_listener = EventListener::new(); + offline_listener.set_handle_event(f); + + // https://developer.mozilla.org/en-US/docs/Web/API/Navigator/onLine#listening_for_changes_in_network_status + let window: EventTarget = js_sys::global().unchecked_into(); + window + .add_event_listener_with_event_listener("online", &online_listener) + .inspect_err(|err| tracing::debug!(?err, "failed adding event listener")) + .ok()?; + + window + .add_event_listener_with_event_listener("offline", &offline_listener) + .inspect_err(|err| tracing::debug!(?err, "failed adding event listener")) + .ok()?; + + Some(Listeners { + online_listener, + offline_listener, + }) +} + +#[derive(Debug)] +struct Listeners { + online_listener: EventListener, + offline_listener: EventListener, +} + +impl Drop for Listeners { + fn drop(&mut self) { + tracing::trace!("Removing online/offline event listeners"); + let window: EventTarget = js_sys::global().unchecked_into(); + window + .remove_event_listener_with_event_listener("online", &self.online_listener) + .ok(); + window + .remove_event_listener_with_event_listener("offline", &self.offline_listener) + .ok(); + } +} diff --git a/patches/netwatch/src/netmon/windows.rs b/patches/netwatch/src/netmon/windows.rs new file mode 100644 index 0000000000..57037745bf --- /dev/null +++ b/patches/netwatch/src/netmon/windows.rs @@ -0,0 +1,223 @@ +use std::{collections::HashMap, sync::Arc}; + +use libc::c_void; +use nested_enum_utils::common_fields; +use snafu::{Backtrace, ResultExt, Snafu}; +use tokio::sync::mpsc; +use tracing::{trace, warn}; +use windows::Win32::{ + Foundation::HANDLE as Handle, + NetworkManagement::IpHelper::{ + MIB_IPFORWARD_ROW2, MIB_NOTIFICATION_TYPE, MIB_UNICASTIPADDRESS_ROW, + }, +}; + +use super::actor::NetworkMessage; + +#[derive(Debug)] +pub(super) struct RouteMonitor { + #[allow(dead_code)] + cb_handler: CallbackHandler, +} + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[snafu(display("IO"))] + Io { source: std::io::Error }, + #[snafu(display("win32"))] + Win32 { source: windows_result::Error }, +} + +impl RouteMonitor { + pub(super) fn new(sender: mpsc::Sender) -> Result { + // Register two callbacks with the windows api + let mut cb_handler = CallbackHandler::default(); + + // 1. Unicast Address Changes + let s = sender.clone(); + cb_handler.register_unicast_address_change_callback(Box::new(move || { + if let Err(err) = s.blocking_send(NetworkMessage::Change) { + warn!("unable to send: unicast change notification: {:?}", err); + } + }))?; + + // 2. Route Changes + cb_handler.register_route_change_callback(Box::new(move || { + if let Err(err) = sender.blocking_send(NetworkMessage::Change) { + warn!("unable to send: route change notification: {:?}", err); + } + }))?; + + Ok(RouteMonitor { cb_handler }) + } +} + +pub(super) fn is_interesting_interface(_name: &str) -> bool { + true +} + +/// Manages callbacks registered with the win32 networking API. +#[derive(derive_more::Debug, Default)] +struct CallbackHandler { + /// Stores the callbacks and `Handle`s for unicast. + // `Handle` is not hashable, so store the underlying `isize`. + #[debug("HashMap>, + /// Stores the callbacks and `Handle`s for route. + // `Handle` is not hashable, so store the underlying `isize`. + #[debug("HashMap>, +} + +impl Drop for CallbackHandler { + fn drop(&mut self) { + // Make sure to unregister all callbacks left. + let handles: Vec<_> = self + .unicast_callbacks + .keys() + .map(|h| UnicastCallbackHandle(Handle(*h as *mut c_void))) + .collect(); + + for handle in handles { + self.unregister_unicast_address_change_callback(handle).ok(); // best effort + } + + let handles: Vec<_> = self + .route_callbacks + .keys() + .map(|h| RouteCallbackHandle(Handle(*h as *mut c_void))) + .collect(); + + for handle in handles { + self.unregister_route_change_callback(handle).ok(); // best effort + } + } +} + +struct UnicastCallbackHandle(Handle); +type UnicastCallback = Box; + +struct RouteCallbackHandle(Handle); +type RouteCallback = Box; + +impl CallbackHandler { + fn register_unicast_address_change_callback( + &mut self, + cb: UnicastCallback, + ) -> Result { + trace!("registering unicast callback"); + let mut handle = Handle::default(); + let cb = Arc::new(cb); + unsafe { + windows::Win32::NetworkManagement::IpHelper::NotifyUnicastIpAddressChange( + windows::Win32::Networking::WinSock::AF_UNSPEC, + Some(unicast_change_callback), + Some(Arc::as_ptr(&cb) as *const c_void), // context + false, // initial notification, + &mut handle, + ) + .ok() + .context(Win32Snafu)?; + } + + self.unicast_callbacks.insert(handle.0 as isize, cb); + + Ok(UnicastCallbackHandle(handle)) + } + + fn unregister_unicast_address_change_callback( + &mut self, + handle: UnicastCallbackHandle, + ) -> Result<(), Error> { + trace!("unregistering unicast callback"); + if self + .unicast_callbacks + .remove(&(handle.0 .0 as isize)) + .is_some() + { + unsafe { + windows::Win32::NetworkManagement::IpHelper::CancelMibChangeNotify2(handle.0) + .ok() + .context(Win32Snafu)?; + } + } + + Ok(()) + } + + fn register_route_change_callback( + &mut self, + cb: RouteCallback, + ) -> Result { + trace!("registering route change callback"); + let mut handle = Handle::default(); + let cb = Arc::new(cb); + unsafe { + windows::Win32::NetworkManagement::IpHelper::NotifyRouteChange2( + windows::Win32::Networking::WinSock::AF_UNSPEC, + Some(route_change_callback), + Arc::as_ptr(&cb) as *const c_void, // context + false, // initial notification, + &mut handle, + ) + .ok() + .context(Win32Snafu)?; + } + + self.route_callbacks.insert(handle.0 as isize, cb); + + Ok(RouteCallbackHandle(handle)) + } + + fn unregister_route_change_callback( + &mut self, + handle: RouteCallbackHandle, + ) -> Result<(), Error> { + trace!("unregistering route callback"); + if self + .route_callbacks + .remove(&(handle.0 .0 as isize)) + .is_some() + { + unsafe { + windows::Win32::NetworkManagement::IpHelper::CancelMibChangeNotify2(handle.0) + .ok() + .context(Win32Snafu)?; + } + } + + Ok(()) + } +} + +unsafe extern "system" fn unicast_change_callback( + callercontext: *const c_void, + _row: *const MIB_UNICASTIPADDRESS_ROW, + _notificationtype: MIB_NOTIFICATION_TYPE, +) { + if callercontext.is_null() { + // Nothing we can do + return; + } + let callercontext = callercontext as *const UnicastCallback; + let cb = &*callercontext; + cb(); +} + +unsafe extern "system" fn route_change_callback( + callercontext: *const c_void, + _row: *const MIB_IPFORWARD_ROW2, + _notificationtype: MIB_NOTIFICATION_TYPE, +) { + if callercontext.is_null() { + // Nothing we can do + return; + } + let callercontext = callercontext as *const RouteCallback; + let cb = &*callercontext; + cb(); +} diff --git a/patches/netwatch/src/udp.rs b/patches/netwatch/src/udp.rs new file mode 100644 index 0000000000..1e0a6d2656 --- /dev/null +++ b/patches/netwatch/src/udp.rs @@ -0,0 +1,910 @@ +use std::{ + future::Future, + io, + net::SocketAddr, + pin::Pin, + sync::{atomic::AtomicBool, RwLock, RwLockReadGuard, TryLockError}, + task::{Context, Poll}, +}; + +use atomic_waker::AtomicWaker; +use quinn_udp::Transmit; +use tokio::io::Interest; +use tracing::{debug, trace, warn}; + +use super::IpFamily; + +/// Wrapper around a tokio UDP socket. +#[derive(Debug)] +pub struct UdpSocket { + socket: RwLock, + recv_waker: AtomicWaker, + send_waker: AtomicWaker, + /// Set to true, when an error occurred, that means we need to rebind the socket. + is_broken: AtomicBool, +} + +/// UDP socket read/write buffer size (7MB). The value of 7MB is chosen as it +/// is the max supported by a default configuration of macOS. Some platforms will silently clamp the value. +const SOCKET_BUFFER_SIZE: usize = 7 << 20; +impl UdpSocket { + /// Bind only Ipv4 on any interface. + pub fn bind_v4(port: u16) -> io::Result { + Self::bind(IpFamily::V4, port) + } + + /// Bind only Ipv6 on any interface. + pub fn bind_v6(port: u16) -> io::Result { + Self::bind(IpFamily::V6, port) + } + + /// Bind only Ipv4 on localhost. + pub fn bind_local_v4(port: u16) -> io::Result { + Self::bind_local(IpFamily::V4, port) + } + + /// Bind only Ipv6 on localhost. + pub fn bind_local_v6(port: u16) -> io::Result { + Self::bind_local(IpFamily::V6, port) + } + + /// Bind to the given port only on localhost. + pub fn bind_local(network: IpFamily, port: u16) -> io::Result { + let addr = SocketAddr::new(network.local_addr(), port); + Self::bind_raw(addr) + } + + /// Bind to the given port and listen on all interfaces. + pub fn bind(network: IpFamily, port: u16) -> io::Result { + let addr = SocketAddr::new(network.unspecified_addr(), port); + Self::bind_raw(addr) + } + + /// Bind to any provided [`SocketAddr`]. + pub fn bind_full(addr: impl Into) -> io::Result { + Self::bind_raw(addr) + } + + /// Is the socket broken and needs a rebind? + pub fn is_broken(&self) -> bool { + self.is_broken.load(std::sync::atomic::Ordering::Acquire) + } + + /// Marks this socket as needing a rebind + fn mark_broken(&self) { + self.is_broken + .store(true, std::sync::atomic::Ordering::Release); + } + + /// Rebind the underlying socket. + pub fn rebind(&self) -> io::Result<()> { + { + let mut guard = self.socket.write().unwrap(); + guard.rebind()?; + + // Clear errors + self.is_broken + .store(false, std::sync::atomic::Ordering::Release); + + drop(guard); + } + + // wakeup + self.wake_all(); + + Ok(()) + } + + fn bind_raw(addr: impl Into) -> io::Result { + let socket = SocketState::bind(addr.into())?; + + Ok(UdpSocket { + socket: RwLock::new(socket), + recv_waker: AtomicWaker::default(), + send_waker: AtomicWaker::default(), + is_broken: AtomicBool::new(false), + }) + } + + /// Receives a single datagram message on the socket from the remote address + /// to which it is connected. On success, returns the number of bytes read. + /// + /// The function must be called with valid byte array `buf` of sufficient + /// size to hold the message bytes. If a message is too long to fit in the + /// supplied buffer, excess bytes may be discarded. + /// + /// The [`connect`] method will connect this socket to a remote address. + /// This method will fail if the socket is not connected. + /// + /// [`connect`]: method@Self::connect + pub fn recv<'a, 'b>(&'b self, buffer: &'a mut [u8]) -> RecvFut<'a, 'b> { + RecvFut { + socket: self, + buffer, + } + } + + /// Receives a single datagram message on the socket. On success, returns + /// the number of bytes read and the origin. + /// + /// The function must be called with valid byte array `buf` of sufficient + /// size to hold the message bytes. If a message is too long to fit in the + /// supplied buffer, excess bytes may be discarded. + pub fn recv_from<'a, 'b>(&'b self, buffer: &'a mut [u8]) -> RecvFromFut<'a, 'b> { + RecvFromFut { + socket: self, + buffer, + } + } + + /// Sends data on the socket to the remote address that the socket is + /// connected to. + /// + /// The [`connect`] method will connect this socket to a remote address. + /// This method will fail if the socket is not connected. + /// + /// [`connect`]: method@Self::connect + /// + /// # Return + /// + /// On success, the number of bytes sent is returned, otherwise, the + /// encountered error is returned. + pub fn send<'a, 'b>(&'b self, buffer: &'a [u8]) -> SendFut<'a, 'b> { + SendFut { + socket: self, + buffer, + } + } + + /// Sends data on the socket to the given address. On success, returns the + /// number of bytes written. + pub fn send_to<'a, 'b>(&'b self, buffer: &'a [u8], to: SocketAddr) -> SendToFut<'a, 'b> { + SendToFut { + socket: self, + buffer, + to, + } + } + + /// Connects the UDP socket setting the default destination for send() and + /// limiting packets that are read via `recv` from the address specified in + /// `addr`. + pub fn connect(&self, addr: SocketAddr) -> io::Result<()> { + trace!(%addr, "connecting"); + let guard = self.socket.read().unwrap(); + let (socket_tokio, _state) = guard.try_get_connected()?; + + let sock_ref = socket2::SockRef::from(&socket_tokio); + sock_ref.connect(&socket2::SockAddr::from(addr))?; + + Ok(()) + } + + /// Returns the local address of this socket. + pub fn local_addr(&self) -> io::Result { + let guard = self.socket.read().unwrap(); + let (socket, _state) = guard.try_get_connected()?; + + socket.local_addr() + } + + /// Closes the socket, and waits for the underlying `libc::close` call to be finished. + pub async fn close(&self) { + let socket = self.socket.write().unwrap().close(); + self.wake_all(); + if let Some((sock, _)) = socket { + let std_sock = sock.into_std(); + let res = tokio::runtime::Handle::current() + .spawn_blocking(move || { + // Calls libc::close, which can block + drop(std_sock); + }) + .await; + if let Err(err) = res { + warn!("failed to close socket: {:?}", err); + } + } + } + + /// Check if this socket is closed. + pub fn is_closed(&self) -> bool { + self.socket.read().unwrap().is_closed() + } + + /// Handle potential read errors, updating internal state. + /// + /// Returns `Some(error)` if the error is fatal otherwise `None. + fn handle_read_error(&self, error: io::Error) -> Option { + match error.kind() { + io::ErrorKind::NotConnected => { + // This indicates the underlying socket is broken, and we should attempt to rebind it + self.mark_broken(); + None + } + _ => Some(error), + } + } + + /// Handle potential write errors, updating internal state. + /// + /// Returns `Some(error)` if the error is fatal otherwise `None. + fn handle_write_error(&self, error: io::Error) -> Option { + match error.kind() { + io::ErrorKind::BrokenPipe => { + // This indicates the underlying socket is broken, and we should attempt to rebind it + self.mark_broken(); + None + } + _ => Some(error), + } + } + + /// Try to get a read lock for the sockets, but don't block for trying to acquire it. + fn poll_read_socket( + &self, + waker: &AtomicWaker, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + let guard = match self.socket.try_read() { + Ok(guard) => guard, + Err(TryLockError::Poisoned(e)) => panic!("socket lock poisoned: {e}"), + Err(TryLockError::WouldBlock) => { + waker.register(cx.waker()); + + match self.socket.try_read() { + Ok(guard) => { + // we're actually fine, no need to cause a spurious wakeup + waker.take(); + guard + } + Err(TryLockError::Poisoned(e)) => panic!("socket lock poisoned: {e}"), + Err(TryLockError::WouldBlock) => { + // Ok fine, we registered our waker, the lock is really closed, + // we can return pending. + return Poll::Pending; + } + } + } + }; + Poll::Ready(guard) + } + + fn wake_all(&self) { + self.recv_waker.wake(); + self.send_waker.wake(); + } + + /// Checks if the socket needs a rebind, and if so does it. + /// + /// Returns an error if the rebind is needed, but failed. + fn maybe_rebind(&self) -> io::Result<()> { + if self.is_broken() { + self.rebind()?; + } + Ok(()) + } + + /// Poll for writable + pub fn poll_writable(&self, cx: &mut std::task::Context<'_>) -> Poll> { + loop { + if let Err(err) = self.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = std::task::ready!(self.poll_read_socket(&self.send_waker, cx)); + let (socket, _state) = guard.try_get_connected()?; + + match socket.poll_send_ready(cx) { + Poll::Pending => { + self.send_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => return Poll::Ready(Ok(())), + Poll::Ready(Err(err)) => { + if let Some(err) = self.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } + + /// Send a quinn based `Transmit`. + pub fn try_send_quinn(&self, transmit: &Transmit<'_>) -> io::Result<()> { + loop { + self.maybe_rebind()?; + + let guard = match self.socket.try_read() { + Ok(guard) => guard, + Err(TryLockError::Poisoned(e)) => { + panic!("lock poisoned: {:?}", e); + } + Err(TryLockError::WouldBlock) => { + return Err(io::Error::new(io::ErrorKind::WouldBlock, "")); + } + }; + let (socket, state) = guard.try_get_connected()?; + + let res = socket.try_io(Interest::WRITABLE, || state.send(socket.into(), transmit)); + + match res { + Ok(()) => return Ok(()), + Err(err) => match self.handle_write_error(err) { + Some(err) => return Err(err), + None => { + continue; + } + }, + } + } + } + + /// quinn based `poll_recv` + pub fn poll_recv_quinn( + &self, + cx: &mut Context, + bufs: &mut [io::IoSliceMut<'_>], + meta: &mut [quinn_udp::RecvMeta], + ) -> Poll> { + loop { + if let Err(err) = self.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = n0_future::ready!(self.poll_read_socket(&self.recv_waker, cx)); + let (socket, state) = guard.try_get_connected()?; + + match socket.poll_recv_ready(cx) { + Poll::Pending => { + self.recv_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + // We are ready to read, continue + } + Poll::Ready(Err(err)) => match self.handle_read_error(err) { + Some(err) => return Poll::Ready(Err(err)), + None => { + continue; + } + }, + } + + let res = socket.try_io(Interest::READABLE, || state.recv(socket.into(), bufs, meta)); + match res { + Ok(count) => { + for meta in meta.iter().take(count) { + trace!( + src = %meta.addr, + len = meta.len, + count = meta.len / meta.stride, + dst = %meta.dst_ip.map(|x| x.to_string()).unwrap_or_default(), + "UDP recv" + ); + } + return Poll::Ready(Ok(count)); + } + Err(err) => { + // ignore spurious wakeups + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + match self.handle_read_error(err) { + Some(err) => return Poll::Ready(Err(err)), + None => { + continue; + } + } + } + } + } + } + + /// Whether transmitted datagrams might get fragmented by the IP layer + /// + /// Returns `false` on targets which employ e.g. the `IPV6_DONTFRAG` socket option. + pub fn may_fragment(&self) -> bool { + let guard = self.socket.read().unwrap(); + guard.may_fragment() + } + + /// The maximum amount of segments which can be transmitted if a platform + /// supports Generic Send Offload (GSO). + /// + /// This is 1 if the platform doesn't support GSO. Subject to change if errors are detected + /// while using GSO. + pub fn max_gso_segments(&self) -> usize { + let guard = self.socket.read().unwrap(); + guard.max_gso_segments() + } + + /// The number of segments to read when GRO is enabled. Used as a factor to + /// compute the receive buffer size. + /// + /// Returns 1 if the platform doesn't support GRO. + pub fn gro_segments(&self) -> usize { + let guard = self.socket.read().unwrap(); + guard.gro_segments() + } +} + +/// Receive future +#[derive(Debug)] +pub struct RecvFut<'a, 'b> { + socket: &'b UdpSocket, + buffer: &'a mut [u8], +} + +impl Future for RecvFut<'_, '_> { + type Output = io::Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let Self { socket, buffer } = &mut *self; + + loop { + if let Err(err) = socket.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = n0_future::ready!(socket.poll_read_socket(&socket.recv_waker, cx)); + let (inner_socket, _state) = guard.try_get_connected()?; + + match inner_socket.poll_recv_ready(cx) { + Poll::Pending => { + self.socket.recv_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + let res = inner_socket.try_recv(buffer); + if let Err(err) = res { + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + if let Some(err) = socket.handle_read_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + return Poll::Ready(res); + } + Poll::Ready(Err(err)) => { + if let Some(err) = socket.handle_read_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } +} + +/// Receive future +#[derive(Debug)] +pub struct RecvFromFut<'a, 'b> { + socket: &'b UdpSocket, + buffer: &'a mut [u8], +} + +impl Future for RecvFromFut<'_, '_> { + type Output = io::Result<(usize, SocketAddr)>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let Self { socket, buffer } = &mut *self; + + loop { + if let Err(err) = socket.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = n0_future::ready!(socket.poll_read_socket(&socket.recv_waker, cx)); + let (inner_socket, _state) = guard.try_get_connected()?; + + match inner_socket.poll_recv_ready(cx) { + Poll::Pending => { + self.socket.recv_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + let res = inner_socket.try_recv_from(buffer); + if let Err(err) = res { + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + if let Some(err) = socket.handle_read_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + return Poll::Ready(res); + } + Poll::Ready(Err(err)) => { + if let Some(err) = socket.handle_read_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } +} + +/// Send future +#[derive(Debug)] +pub struct SendFut<'a, 'b> { + socket: &'b UdpSocket, + buffer: &'a [u8], +} + +impl Future for SendFut<'_, '_> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + loop { + if let Err(err) = self.socket.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = + n0_future::ready!(self.socket.poll_read_socket(&self.socket.send_waker, cx)); + let (socket, _state) = guard.try_get_connected()?; + + match socket.poll_send_ready(cx) { + Poll::Pending => { + self.socket.send_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + let res = socket.try_send(self.buffer); + if let Err(err) = res { + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + if let Some(err) = self.socket.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + return Poll::Ready(res); + } + Poll::Ready(Err(err)) => { + if let Some(err) = self.socket.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } +} + +/// Send future +#[derive(Debug)] +pub struct SendToFut<'a, 'b> { + socket: &'b UdpSocket, + buffer: &'a [u8], + to: SocketAddr, +} + +impl Future for SendToFut<'_, '_> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + loop { + if let Err(err) = self.socket.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = + n0_future::ready!(self.socket.poll_read_socket(&self.socket.send_waker, cx)); + let (socket, _state) = guard.try_get_connected()?; + + match socket.poll_send_ready(cx) { + Poll::Pending => { + self.socket.send_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + let res = socket.try_send_to(self.buffer, self.to); + if let Err(err) = res { + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + + if let Some(err) = self.socket.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + return Poll::Ready(res); + } + Poll::Ready(Err(err)) => { + if let Some(err) = self.socket.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } +} + +#[derive(Debug)] +enum SocketState { + Connected { + socket: tokio::net::UdpSocket, + state: quinn_udp::UdpSocketState, + /// The addr we are binding to. + addr: SocketAddr, + }, + Closed { + last_max_gso_segments: usize, + last_gro_segments: usize, + last_may_fragment: bool, + }, +} + +impl SocketState { + fn try_get_connected( + &self, + ) -> io::Result<(&tokio::net::UdpSocket, &quinn_udp::UdpSocketState)> { + match self { + Self::Connected { + socket, + state, + addr: _, + } => Ok((socket, state)), + Self::Closed { .. } => { + warn!("socket closed"); + Err(io::Error::new(io::ErrorKind::BrokenPipe, "socket closed")) + } + } + } + + fn bind(addr: SocketAddr) -> io::Result { + let network = IpFamily::from(addr.ip()); + let socket = socket2::Socket::new( + network.into(), + socket2::Type::DGRAM, + Some(socket2::Protocol::UDP), + )?; + + if let Err(err) = socket.set_recv_buffer_size(SOCKET_BUFFER_SIZE) { + debug!( + "failed to set recv_buffer_size to {}: {:?}", + SOCKET_BUFFER_SIZE, err + ); + } + if let Err(err) = socket.set_send_buffer_size(SOCKET_BUFFER_SIZE) { + debug!( + "failed to set send_buffer_size to {}: {:?}", + SOCKET_BUFFER_SIZE, err + ); + } + if network == IpFamily::V6 { + // Avoid dualstack + socket.set_only_v6(true)?; + } + + // Binding must happen before calling quinn, otherwise `local_addr` + // is not yet available on all OSes. + socket.bind(&addr.into())?; + + // Ensure nonblocking + socket.set_nonblocking(true)?; + + let socket: std::net::UdpSocket = socket.into(); + + // Convert into tokio UdpSocket + let socket = tokio::net::UdpSocket::from_std(socket)?; + let socket_ref = quinn_udp::UdpSockRef::from(&socket); + let socket_state = quinn_udp::UdpSocketState::new(socket_ref)?; + + let local_addr = socket.local_addr()?; + if addr.port() != 0 && local_addr.port() != addr.port() { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "wrong port bound: {:?}: wanted: {} got {}", + network, + addr.port(), + local_addr.port(), + ), + )); + } + + Ok(Self::Connected { + socket, + state: socket_state, + addr: local_addr, + }) + } + + fn rebind(&mut self) -> io::Result<()> { + let (addr, closed_state) = match self { + Self::Connected { state, addr, .. } => { + let s = SocketState::Closed { + last_max_gso_segments: state.max_gso_segments(), + last_gro_segments: state.gro_segments(), + last_may_fragment: state.may_fragment(), + }; + (*addr, s) + } + Self::Closed { .. } => { + return Err(io::Error::new( + io::ErrorKind::Other, + "socket is closed and cannot be rebound", + )); + } + }; + debug!("rebinding {}", addr); + + *self = closed_state; + *self = Self::bind(addr)?; + + Ok(()) + } + + fn is_closed(&self) -> bool { + matches!(self, Self::Closed { .. }) + } + + fn close(&mut self) -> Option<(tokio::net::UdpSocket, quinn_udp::UdpSocketState)> { + match self { + Self::Connected { state, .. } => { + let s = SocketState::Closed { + last_max_gso_segments: state.max_gso_segments(), + last_gro_segments: state.gro_segments(), + last_may_fragment: state.may_fragment(), + }; + let Self::Connected { socket, state, .. } = std::mem::replace(self, s) else { + unreachable!("just checked"); + }; + Some((socket, state)) + } + Self::Closed { .. } => None, + } + } + + fn may_fragment(&self) -> bool { + match self { + Self::Connected { state, .. } => state.may_fragment(), + Self::Closed { + last_may_fragment, .. + } => *last_may_fragment, + } + } + + fn max_gso_segments(&self) -> usize { + match self { + Self::Connected { state, .. } => state.max_gso_segments(), + Self::Closed { + last_max_gso_segments, + .. + } => *last_max_gso_segments, + } + } + + fn gro_segments(&self) -> usize { + match self { + Self::Connected { state, .. } => state.gro_segments(), + Self::Closed { + last_gro_segments, .. + } => *last_gro_segments, + } + } +} + +impl Drop for UdpSocket { + fn drop(&mut self) { + trace!("dropping UdpSocket"); + if let Some((socket, _)) = self.socket.write().unwrap().close() { + if let Ok(handle) = tokio::runtime::Handle::try_current() { + // No wakeup after dropping write lock here, since we're getting dropped. + // this will be empty if `close` was called before + let std_sock = socket.into_std(); + handle.spawn_blocking(move || { + // Calls libc::close, which can block + drop(std_sock); + }); + } + } + } +} + +#[cfg(test)] +mod tests { + use testresult::TestResult; + + use super::*; + + #[tokio::test] + async fn test_reconnect() -> TestResult { + let (s_b, mut r_b) = tokio::sync::mpsc::channel(16); + let handle_a = tokio::task::spawn(async move { + let socket = UdpSocket::bind_local(IpFamily::V4, 0)?; + let addr = socket.local_addr()?; + s_b.send(addr).await?; + println!("socket bound to {:?}", addr); + + let mut buffer = [0u8; 16]; + for i in 0..100 { + println!("-- tick {i}"); + let read = socket.recv_from(&mut buffer).await; + match read { + Ok((count, addr)) => { + println!("got {:?}", &buffer[..count]); + println!("sending {:?} to {:?}", &buffer[..count], addr); + socket.send_to(&buffer[..count], addr).await?; + } + Err(err) => { + eprintln!("error reading: {:?}", err); + } + } + } + socket.close().await; + Ok::<_, testresult::TestError>(()) + }); + + let socket = UdpSocket::bind_local(IpFamily::V4, 0)?; + let first_addr = socket.local_addr()?; + println!("socket2 bound to {:?}", socket.local_addr()?); + let addr = r_b.recv().await.unwrap(); + + let mut buffer = [0u8; 16]; + for i in 0u8..100 { + println!("round one - {}", i); + socket.send_to(&[i][..], addr).await?; + let (count, from) = socket.recv_from(&mut buffer).await?; + assert_eq!(addr, from); + assert_eq!(count, 1); + assert_eq!(buffer[0], i); + + // check for errors + assert!(!socket.is_broken()); + + // rebind + socket.rebind()?; + + // check that the socket has the same address as before + assert_eq!(socket.local_addr()?, first_addr); + } + + handle_a.await.ok(); + + Ok(()) + } + + #[tokio::test] + async fn test_udp_mark_broken() -> TestResult { + let socket_a = UdpSocket::bind_local(IpFamily::V4, 0)?; + let addr_a = socket_a.local_addr()?; + println!("socket bound to {:?}", addr_a); + + let socket_b = UdpSocket::bind_local(IpFamily::V4, 0)?; + let addr_b = socket_b.local_addr()?; + println!("socket bound to {:?}", addr_b); + + let handle = tokio::task::spawn(async move { + let mut buffer = [0u8; 16]; + for _ in 0..2 { + match socket_b.recv_from(&mut buffer).await { + Ok((count, addr)) => { + println!("got {:?} from {:?}", &buffer[..count], addr); + } + Err(err) => { + eprintln!("error recv: {:?}", err); + } + } + } + }); + socket_a.send_to(&[0][..], addr_b).await?; + socket_a.mark_broken(); + assert!(socket_a.is_broken()); + socket_a.send_to(&[0][..], addr_b).await?; + assert!(!socket_a.is_broken()); + + handle.await?; + Ok(()) + } +} diff --git a/patches/netwatch/tests/smoke.rs b/patches/netwatch/tests/smoke.rs new file mode 100644 index 0000000000..04da94ee0b --- /dev/null +++ b/patches/netwatch/tests/smoke.rs @@ -0,0 +1,73 @@ +//! A very basic smoke test for netwatch, to make sure it doesn't error out immediately +//! in Wasm at all. +//! +//! We can't test browsers easily, because that would mean we need control over turning +//! the browser online/offline. +//! +//! However, this gives us a minimum guarantee that the Wasm build doesn't break fully. +use n0_future::FutureExt; +use netwatch::netmon; +use testresult::TestResult; +#[cfg(not(wasm_browser))] +use tokio::test; +#[cfg(wasm_browser)] +use wasm_bindgen_test::wasm_bindgen_test as test; + +// Enable this if you want to run these tests in the browser. +// Unfortunately it's either-or: Enable this and you can run in the browser, disable to run in nodejs. +// #[cfg(wasm_browser)] +// wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + +#[test] +async fn smoke_test() -> TestResult { + setup_logging(); + + tracing::info!("Creating netmon::Monitor"); + let monitor = netmon::Monitor::new().await?; + tracing::info!("netmon::Monitor created."); + + // Unfortunately this doesn't do anything in node.js, because it doesn't have + // globalThis.navigator.onLine or globalThis.addEventListener("online"/"offline", ...) APIs, + // so this is more of a test to see if we gracefully handle these situations & if our + // .wasm files are without "env" imports. + tracing::info!("subscribing to netmon callback"); + let token = monitor + .subscribe(|is_major| { + async move { + tracing::info!(is_major, "network change"); + } + .boxed() + }) + .await?; + tracing::info!("successfully subscribed to netmon callback"); + + tracing::info!("unsubscribing"); + monitor.unsubscribe(token).await?; + tracing::info!("unsubscribed"); + + tracing::info!("dropping netmon::Monitor"); + drop(monitor); + tracing::info!("dropped."); + + Ok(()) +} + +#[cfg(wasm_browser)] +fn setup_logging() { + tracing_subscriber::fmt() + .with_max_level(tracing::level_filters::LevelFilter::DEBUG) + .with_writer( + // To avoide trace events in the browser from showing their JS backtrace + tracing_subscriber_wasm::MakeConsoleWriter::default() + .map_trace_level_to(tracing::Level::DEBUG), + ) + // If we don't do this in the browser, we get a runtime error. + .without_time() + .with_ansi(false) + .init(); +} + +#[cfg(not(wasm_browser))] +fn setup_logging() { + tracing_subscriber::fmt().init(); +} diff --git a/recall-contracts/crates/facade/Cargo.lock b/recall-contracts/crates/facade/Cargo.lock new file mode 100644 index 0000000000..bb197ccf1f --- /dev/null +++ b/recall-contracts/crates/facade/Cargo.lock @@ -0,0 +1,2089 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "alloy-json-abi" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24acd2f5ba97c7a320e67217274bc81fe3c3174b8e6144ec875d9d54e760e278" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec878088ec6283ce1e90d280316aadd3d6ce3de06ff63d68953c855e7e447e92" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "foldhash", + "hashbrown", + "indexmap", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" +dependencies = [ + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d039d267aa5cbb7732fa6ce1fd9b5e9e29368f580f80ba9d7a8450c794de4b2" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "620ae5eee30ee7216a38027dec34e0585c55099f827f92f50d11e3d2d3a4a954" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck", + "indexmap", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad9f7d057e00f8c5994e4ff4492b76532c51ead39353aa2ed63f8c50c0f4d52e" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.96", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74e60b084fe1aef8acecda2743ff2d93c18ff3eb67a2d3b12f62582a1e66ef5e" +dependencies = [ + "serde", + "winnow", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1382302752cd751efd275f4d6ef65877ddf61e0e6f5ac84ef4302b79a33a31a" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "anyhow" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "auto_impl" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + +[[package]] +name = "bitflags" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +dependencies = [ + "serde", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2b_simd" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06e903a20b159e944f91ec8499fe1e55651480c541ea0a584f5d967c49ad9d99" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +dependencies = [ + "serde", +] + +[[package]] +name = "cbor4ii" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544cf8c89359205f4f990d0e6f3828db42df85b5dac95d09157a250eb0749c4" +dependencies = [ + "serde", +] + +[[package]] +name = "cc" +version = "1.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cid" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" +dependencies = [ + "core2", + "multibase", + "multihash", + "serde", + "serde_bytes", + "unsigned-varint", +] + +[[package]] +name = "const-hex" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "data-encoding" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" + +[[package]] +name = "data-encoding-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b16d9d0d88a5273d830dac8b78ceb217ffc9b1d5404e5597a3542515329405b" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1145d32e826a7748b69ee8fc62d3e6355ff7f1051df53141e7048162fc90481b" +dependencies = [ + "data-encoding", + "syn 2.0.96", +] + +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core", + "subtle", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "fvm_ipld_blockstore" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d064b957420f5ecc137a153baaa6c32e2eb19b674135317200b6f2537eabdbfd" +dependencies = [ + "anyhow", + "cid", + "multihash", +] + +[[package]] +name = "fvm_ipld_encoding" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90608092e31d9a06236268c58f7c36668ab4b2a48afafe3a97e08f094ad7ae50" +dependencies = [ + "anyhow", + "cid", + "fvm_ipld_blockstore", + "multihash", + "serde", + "serde_ipld_dagcbor", + "serde_repr", + "serde_tuple", + "thiserror 1.0.69", +] + +[[package]] +name = "fvm_shared" +version = "4.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d3355d3bd2eb159a734a06d67dbb21b067a99540f5aefaf7d0d26503ccc73e3" +dependencies = [ + "anyhow", + "bitflags", + "blake2b_simd", + "cid", + "data-encoding", + "data-encoding-macro", + "fvm_ipld_encoding", + "lazy_static", + "multihash", + "num-bigint", + "num-derive", + "num-integer", + "num-traits", + "serde", + "serde_tuple", + "thiserror 1.0.69", + "unsigned-varint", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "foldhash", + "serde", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +dependencies = [ + "equivalent", + "hashbrown", + "serde", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.169" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" + +[[package]] +name = "libm" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" +dependencies = [ + "blake2b_simd", + "core2", + "multihash-derive", + "serde", + "serde-big-array", + "unsigned-varint", +] + +[[package]] +name = "multihash-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" +dependencies = [ + "proc-macro-crate 1.1.3", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "parity-scale-codec" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91c2d9a6a6004e205b7e881856fb1a0f5022d382acc2c01b52185f7b6f65997" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "const_format", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "rustversion", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77555fd9d578b6470470463fded832619a5fec5ad6cbc551fe4d7507ce50cd3a" +dependencies = [ + "proc-macro-crate 3.2.0", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pest" +version = "2.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" +dependencies = [ + "memchr", + "thiserror 2.0.11", + "ucd-trie", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" +dependencies = [ + "proc-macro2", + "syn 2.0.96", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +dependencies = [ + "thiserror 1.0.69", + "toml", +] + +[[package]] +name = "proc-macro-crate" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "proc-macro2" +version = "1.0.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quote" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", + "serde", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.15", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] + +[[package]] +name = "recall_sol_facade" +version = "0.1.2" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "alloy-sol-types", + "anyhow", + "dunce", + "eyre", + "fvm_ipld_encoding", + "fvm_shared", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "serde", + "serde_json", + "syn 2.0.96", + "thiserror 2.0.11", + "walkdir", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "ruint" +version = "1.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5ef8fb1dd8de3870cb8400d51b4c2023854bbafd5431a3ac7e7317243e22d2f" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rustc-hash" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.25", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustversion" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "ryu" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + +[[package]] +name = "serde" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-big-array" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd31f59f6fe2b0c055371bb2f16d7f0aa7d8881676c04a55b1596d1a17cd10a4" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_bytes" +version = "0.11.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "serde_ipld_dagcbor" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e880e0b1f9c7a8db874642c1217f7e19b29e325f24ab9f0fcb11818adec7f01" +dependencies = [ + "cbor4ii", + "cid", + "scopeguard", + "serde", +] + +[[package]] +name = "serde_json" +version = "1.0.138" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "serde_tuple" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f025b91216f15a2a32aa39669329a475733590a015835d1783549a56d09427" +dependencies = [ + "serde", + "serde_tuple_macros", +] + +[[package]] +name = "serde_tuple_macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4076151d1a2b688e25aaf236997933c66e18b870d0369f8b248b8ab2be630d7e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.96" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn-solidity" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b84e4d83a0a6704561302b917a932484e1cae2d8c6354c64be8b7bac1c1fe057" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "unicode-xid", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" +dependencies = [ + "cfg-if", + "fastrand", + "getrandom 0.3.1", + "once_cell", + "rustix", + "windows-sys", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +dependencies = [ + "thiserror-impl 2.0.11", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "toml" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" + +[[package]] +name = "toml_edit" +version = "0.22.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-ident" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad699df48212c6cc6eb4435f35500ac6fd3b9913324f938aea302022ce19d310" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags", +] + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] diff --git a/recall-contracts/crates/facade/Cargo.toml b/recall-contracts/crates/facade/Cargo.toml new file mode 100644 index 0000000000..d0d99133c6 --- /dev/null +++ b/recall-contracts/crates/facade/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "recall_sol_facade" +authors = ["Recall Contributors"] +description = "Rust bindings for the Recall Solidity Facades" +edition = "2021" +homepage = "https://github.com/recallnet/contracts/" +license = "MIT OR Apache-2.0" +repository = "https://github.com/recallnet/contracts/" +keywords = ["recall", "rust"] +version = "0.1.2" + +[dependencies] +anyhow = "1.0.95" +alloy-primitives = { version = "~0.8.19", features = ["std"] } +alloy-sol-types = { version = "~0.8.19", features = ["std"] } +# Upgraded to FVM 4.7 for IPC main branch compatibility +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } + +[build-dependencies] +alloy-primitives = { version = "0.8.19" } +alloy-sol-macro-expander = { version = "0.8.19", features = ["json"] } +alloy-sol-macro-input = { version = "0.8.19", features = ["json"] } +alloy-sol-types = { version = "0.8.19", features = ["json"] } +dunce = "1.0.5" +eyre = "0.6.12" +prettyplease = "0.2.29" +proc-macro2 = "1.0.93" +quote = "1.0.38" +regex = "1.11.1" +syn = "2.0.96" +serde = "1.0.217" +serde_json = "1.0.138" +thiserror = "2.0.11" +walkdir = "2.5.0" + +[features] +blob-reader = [] +blobs = [] +bucket = [] +config = [] +credit = [] +gas = [] +machine = [] +timehub = [] diff --git a/recall-contracts/crates/facade/README.md b/recall-contracts/crates/facade/README.md new file mode 100644 index 0000000000..cec882e25e --- /dev/null +++ b/recall-contracts/crates/facade/README.md @@ -0,0 +1,3 @@ +# Recall Solidity Facade + +https://github.com/recallnet/contracts/tree/main/crates/facade diff --git a/recall-contracts/crates/facade/build.rs b/recall-contracts/crates/facade/build.rs new file mode 100644 index 0000000000..139f0d9706 --- /dev/null +++ b/recall-contracts/crates/facade/build.rs @@ -0,0 +1,169 @@ +//! Adapted from https://github.com/foundry-rs/foundry/blob/60f0b692acae47a4933bb4a0bc4a29cab8831ba1/crates/forge/bin/cmd/bind.rs +//! +//! This build script generates Rust bindings for Solidity contracts using Forge. +//! +//! Ideally, this script would programmatically execute `forge install` and `forge build` +//! to avoid committing generated artifacts (the bindings) to version control. +//! This is the standard practice for build outputs. +//! +//! Currently, downstream crates can use the pre-generated bindings directly. +//! However, this requires developers to manually run `make rust-bindings` (which performs the +//! Forge build and bind) whenever the Solidity facades change and then commit the resulting +//! changes to version control. +//! +//! While convenient for downstream users, this approach is suboptimal. +//! A future improvement would be to implement programmatic `forge install` and `forge build` +//! within this script, eliminating the manual steps and the need to commit build +//! artifacts. +//! This would ensure that downstream crates always use up-to-date bindings without relying on +//! potentially outdated committed versions and would streamline the development workflow. +//! +//! SPDX-License-Identifier: Apache-2.0, MIT + +use std::path::{Path, PathBuf}; + +use alloy_primitives::map::HashSet; +use eyre::Result; +use forge::{fs::json_files, MultiSolMacroGen, SolMacroGen}; +use regex::Regex; + +mod forge; + +const FACADES: &[&str] = &[ + "BlobReader", + "Blobs", + "Bucket", + "Config", + "Credit", + "Gas", + "Machine", + "Timehub", +]; + +fn main() { + if std::env::var("BUILD_BINDINGS").unwrap_or("0".to_string()) == "0" { + return; + } + + let cargo_dir = env!("CARGO_MANIFEST_DIR"); + let artifacts_dir = PathBuf::from(format!("{}/../../out", cargo_dir)); + + for facade in FACADES { + let out_dir = PathBuf::from(format!( + "{}/src/{}_facade", + cargo_dir, + facade.to_lowercase() + )); + let select = Regex::new(format!("I{}Facade", facade).as_str()).unwrap(); + let binder = ForgeBinder { + artifacts: artifacts_dir.clone(), + out: out_dir, + select: vec![select], + }; + binder + .run() + .unwrap_or_else(|_| panic!("failed to generate {} bindings", facade)); + } +} + +#[derive(Clone, Debug)] +pub struct ForgeBinder { + pub artifacts: PathBuf, + pub out: PathBuf, + pub select: Vec, +} + +impl ForgeBinder { + pub fn run(self) -> Result<()> { + self.generate_bindings(&self.artifacts, &self.out)?; + Ok(()) + } + + fn get_filter(&self) -> Result { + Ok(Filter::Select(self.select.clone())) + } + + /// Returns an iterator over the JSON files and the contract name in the `artifacts` directory. + fn get_json_files(&self, artifacts: &Path) -> Result> { + let filter = self.get_filter()?; + Ok(json_files(artifacts) + .filter_map(|path| { + // Ignore the build info JSON. + if path.to_str()?.contains("build-info") { + return None; + } + + // We don't want `.metadata.json` files. + let stem = path.file_stem()?.to_str()?; + if stem.ends_with(".metadata") { + return None; + } + + let name = stem.split('.').next().unwrap(); + + // Best effort identifier cleanup. + let name = name.replace(char::is_whitespace, "").replace('-', "_"); + + Some((name, path)) + }) + .filter(move |(name, _path)| filter.is_match(name))) + } + + fn get_solmacrogen(&self, artifacts: &Path) -> Result { + let mut dup = HashSet::::default(); + let instances = self + .get_json_files(artifacts)? + .filter_map(|(name, path)| { + if dup.insert(name.clone()) { + Some(SolMacroGen::new(path, name)) + } else { + None + } + }) + .collect::>(); + + let multi = MultiSolMacroGen::new(instances); + eyre::ensure!(!multi.instances.is_empty(), "No contract artifacts found"); + Ok(multi) + } + + /// Generate the bindings + fn generate_bindings(&self, artifacts: &Path, bindings_root: &Path) -> Result<()> { + let mut solmacrogen = self.get_solmacrogen(artifacts)?; + solmacrogen.write_to_module(bindings_root, false) + } +} + +pub enum Filter { + All, + Select(Vec), + Skip(Vec), +} + +impl Filter { + pub fn is_match(&self, name: &str) -> bool { + match self { + Self::All => true, + Self::Select(regexes) => regexes.iter().any(|regex| regex.is_match(name)), + Self::Skip(regexes) => !regexes.iter().any(|regex| regex.is_match(name)), + } + } + + pub fn skip_default() -> Self { + let skip = [ + ".*Test.*", + ".*Script", + "console[2]?", + "CommonBase", + "Components", + "[Ss]td(Chains|Math|Error|Json|Utils|Cheats|Style|Invariant|Assertions|Toml|Storage(Safe)?)", + "[Vv]m.*", + "IMulticall3", + ] + .iter() + .map(|pattern| Regex::new(pattern).unwrap()) + .collect::>(); + + Self::Skip(skip) + } +} diff --git a/recall-contracts/crates/facade/forge/forge_sol_macro_gen/mod.rs b/recall-contracts/crates/facade/forge/forge_sol_macro_gen/mod.rs new file mode 100644 index 0000000000..fd61733bc5 --- /dev/null +++ b/recall-contracts/crates/facade/forge/forge_sol_macro_gen/mod.rs @@ -0,0 +1,2 @@ +mod sol_macro_gen; +pub use sol_macro_gen::*; diff --git a/recall-contracts/crates/facade/forge/forge_sol_macro_gen/sol_macro_gen.rs b/recall-contracts/crates/facade/forge/forge_sol_macro_gen/sol_macro_gen.rs new file mode 100644 index 0000000000..8086c059c3 --- /dev/null +++ b/recall-contracts/crates/facade/forge/forge_sol_macro_gen/sol_macro_gen.rs @@ -0,0 +1,154 @@ +//! Partially copied from https://github.com/foundry-rs/foundry/blob/60f0b692acae47a4933bb4a0bc4a29cab8831ba1/crates/sol-macro-gen/src/sol_macro_gen.rs +//! +//! SolMacroGen and MultiSolMacroGen +//! +//! This type encapsulates the logic for expansion of a Rust TokenStream from Solidity tokens. It +//! uses the `expand` method from `alloy_sol_macro_expander` underneath. +//! +//! It holds info such as `path` to the ABI file, `name` of the file and the rust binding being +//! generated, and lastly the `expansion` itself, i.e the Rust binding for the provided ABI. +//! +//! It contains methods to read the json abi, generate rust bindings from the abi and ultimately +//! write the bindings to a crate or modules. +//! +//! SPDX-License-Identifier: Apache-2.0, MIT + +use alloy_sol_macro_expander::expand::expand; +use alloy_sol_macro_input::{SolInput, SolInputKind}; +use eyre::{Context, Result}; +use proc_macro2::{Span, TokenStream}; +use std::{ + fmt::Write, + path::{Path, PathBuf}, +}; + +use crate::forge::fs; + +pub struct SolMacroGen { + pub path: PathBuf, + pub name: String, + pub expansion: Option, +} + +impl SolMacroGen { + pub fn new(path: PathBuf, name: String) -> Self { + Self { + path, + name, + expansion: None, + } + } + + pub fn get_sol_input(&self) -> Result { + let path = self.path.to_string_lossy().into_owned(); + let name = proc_macro2::Ident::new(&self.name, Span::call_site()); + let tokens = quote::quote! { + #name, + #path + }; + + let sol_input: SolInput = syn::parse2(tokens).wrap_err("failed to parse input")?; + + Ok(sol_input) + } +} + +pub struct MultiSolMacroGen { + pub instances: Vec, +} + +impl MultiSolMacroGen { + pub fn new(instances: Vec) -> Self { + Self { instances } + } + + pub fn generate_bindings(&mut self) -> Result<()> { + for instance in &mut self.instances { + Self::generate_binding(instance).wrap_err_with(|| { + format!( + "failed to generate bindings for {}:{}", + instance.path.display(), + instance.name + ) + })?; + } + + Ok(()) + } + + fn generate_binding(instance: &mut SolMacroGen) -> Result<()> { + let input = instance.get_sol_input()?.normalize_json()?; + + let SolInput { + attrs: _, + path: _, + kind, + } = input; + + let tokens = match kind { + SolInputKind::Sol(mut file) => { + let sol_attr: syn::Attribute = syn::parse_quote! { + #[sol()] + }; + file.attrs.push(sol_attr); + expand(file).wrap_err("failed to expand")? + } + _ => unreachable!(), + }; + + instance.expansion = Some(tokens); + Ok(()) + } + + pub fn write_to_module(&mut self, bindings_path: &Path, single_file: bool) -> Result<()> { + self.generate_bindings()?; + + let _ = fs::create_dir_all(bindings_path); + + let mut mod_contents = r#"#![allow(unused_imports, clippy::all, rustdoc::all)] + //! This module contains the sol! generated bindings for solidity contracts. + //! This is autogenerated code. + //! Do not manually edit these files. + //! These files may be overwritten by the codegen system at any time. + "# + .to_string(); + + for instance in &self.instances { + let name = instance.name.to_lowercase(); + if !single_file { + // Module + write_mod_name(&mut mod_contents, &name)?; + let mut contents = String::new(); + + write!(contents, "{}", instance.expansion.as_ref().unwrap())?; + let file = syn::parse_file(&contents)?; + + let contents = prettyplease::unparse(&file); + fs::write(bindings_path.join(format!("{name}.rs")), contents) + .wrap_err("Failed to write file")?; + } else { + // Single File + let mut contents = String::new(); + write!(contents, "{}\n\n", instance.expansion.as_ref().unwrap())?; + write!(mod_contents, "{contents}")?; + } + } + + let mod_path = bindings_path.join("mod.rs"); + let mod_file = syn::parse_file(&mod_contents)?; + let mod_contents = prettyplease::unparse(&mod_file); + + fs::write(mod_path, mod_contents).wrap_err("Failed to write mod.rs")?; + + Ok(()) + } +} + +fn write_mod_name(contents: &mut String, name: &str) -> Result<()> { + if syn::parse_str::(&format!("pub mod {name};")).is_ok() { + write!(contents, "pub mod {name};")?; + } else { + write!(contents, "pub mod r#{name};")?; + } + Ok(()) +} diff --git a/recall-contracts/crates/facade/forge/foundry_common/errors/fs.rs b/recall-contracts/crates/facade/forge/foundry_common/errors/fs.rs new file mode 100644 index 0000000000..387e70b038 --- /dev/null +++ b/recall-contracts/crates/facade/forge/foundry_common/errors/fs.rs @@ -0,0 +1,174 @@ +//! Copied from https://github.com/foundry-rs/foundry/blob/60f0b692acae47a4933bb4a0bc4a29cab8831ba1/crates/common/src/errors/fs.rs +//! +//! SPDX-License-Identifier: Apache-2.0, MIT + +use std::{ + io, + path::{Path, PathBuf}, +}; + +#[allow(unused_imports)] +use std::fs::{self, File}; + +/// Various error variants for `fs` operations that serve as an addition to the io::Error which +/// does not provide any information about the path. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum FsPathError { + /// Provides additional path context for [`fs::write`]. + #[error("failed to write to {path:?}: {source}")] + Write { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::read`]. + #[error("failed to read from {path:?}: {source}")] + Read { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::copy`]. + #[error("failed to copy from {from:?} to {to:?}: {source}")] + Copy { + source: io::Error, + from: PathBuf, + to: PathBuf, + }, + /// Provides additional path context for [`fs::read_link`]. + #[error("failed to read from {path:?}: {source}")] + ReadLink { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`File::create`]. + #[error("failed to create file {path:?}: {source}")] + CreateFile { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::remove_file`]. + #[error("failed to remove file {path:?}: {source}")] + RemoveFile { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::create_dir`]. + #[error("failed to create dir {path:?}: {source}")] + CreateDir { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::remove_dir`]. + #[error("failed to remove dir {path:?}: {source}")] + RemoveDir { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`File::open`]. + #[error("failed to open file {path:?}: {source}")] + Open { source: io::Error, path: PathBuf }, + /// Provides additional path context for the file whose contents should be parsed as JSON. + #[error("failed to parse json file: {path:?}: {source}")] + ReadJson { + source: serde_json::Error, + path: PathBuf, + }, + /// Provides additional path context for the new JSON file. + #[error("failed to write to json file: {path:?}: {source}")] + WriteJson { + source: serde_json::Error, + path: PathBuf, + }, +} + +impl FsPathError { + /// Returns the complementary error variant for [`fs::write`]. + pub fn write(source: io::Error, path: impl Into) -> Self { + Self::Write { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::read`]. + pub fn read(source: io::Error, path: impl Into) -> Self { + Self::Read { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::copy`]. + pub fn copy(source: io::Error, from: impl Into, to: impl Into) -> Self { + Self::Copy { + source, + from: from.into(), + to: to.into(), + } + } + + /// Returns the complementary error variant for [`fs::read_link`]. + pub fn read_link(source: io::Error, path: impl Into) -> Self { + Self::ReadLink { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`File::create`]. + pub fn create_file(source: io::Error, path: impl Into) -> Self { + Self::CreateFile { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::remove_file`]. + pub fn remove_file(source: io::Error, path: impl Into) -> Self { + Self::RemoveFile { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::create_dir`]. + pub fn create_dir(source: io::Error, path: impl Into) -> Self { + Self::CreateDir { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::remove_dir`]. + pub fn remove_dir(source: io::Error, path: impl Into) -> Self { + Self::RemoveDir { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`File::open`]. + pub fn open(source: io::Error, path: impl Into) -> Self { + Self::Open { + source, + path: path.into(), + } + } +} + +impl AsRef for FsPathError { + fn as_ref(&self) -> &Path { + match self { + Self::Write { path, .. } + | Self::Read { path, .. } + | Self::ReadLink { path, .. } + | Self::Copy { from: path, .. } + | Self::CreateDir { path, .. } + | Self::RemoveDir { path, .. } + | Self::CreateFile { path, .. } + | Self::RemoveFile { path, .. } + | Self::Open { path, .. } + | Self::ReadJson { path, .. } + | Self::WriteJson { path, .. } => path, + } + } +} + +impl From for io::Error { + fn from(value: FsPathError) -> Self { + match value { + FsPathError::Write { source, .. } + | FsPathError::Read { source, .. } + | FsPathError::ReadLink { source, .. } + | FsPathError::Copy { source, .. } + | FsPathError::CreateDir { source, .. } + | FsPathError::RemoveDir { source, .. } + | FsPathError::CreateFile { source, .. } + | FsPathError::RemoveFile { source, .. } + | FsPathError::Open { source, .. } => source, + + FsPathError::ReadJson { source, .. } | FsPathError::WriteJson { source, .. } => { + source.into() + } + } + } +} diff --git a/recall-contracts/crates/facade/forge/foundry_common/errors/mod.rs b/recall-contracts/crates/facade/forge/foundry_common/errors/mod.rs new file mode 100644 index 0000000000..45cc7b5a55 --- /dev/null +++ b/recall-contracts/crates/facade/forge/foundry_common/errors/mod.rs @@ -0,0 +1,2 @@ +mod fs; +pub use fs::FsPathError; diff --git a/recall-contracts/crates/facade/forge/foundry_common/fs.rs b/recall-contracts/crates/facade/forge/foundry_common/fs.rs new file mode 100644 index 0000000000..cac70f025f --- /dev/null +++ b/recall-contracts/crates/facade/forge/foundry_common/fs.rs @@ -0,0 +1,190 @@ +//! Copied from https://github.com/foundry-rs/foundry/blob/60f0b692acae47a4933bb4a0bc4a29cab8831ba1/crates/common/src/fs.rs +//! +//! Contains various `std::fs` wrapper functions that also contain the target path in their errors. +//! +//! SPDX-License-Identifier: Apache-2.0, MIT + +use crate::forge::errors::FsPathError; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + fs::{self, File}, + io::{BufWriter, Write}, + path::{Component, Path, PathBuf}, +}; + +/// The [`fs`](self) result type. +pub type Result = std::result::Result; + +/// Wrapper for [`File::create`]. +pub fn create_file(path: impl AsRef) -> Result { + let path = path.as_ref(); + File::create(path).map_err(|err| FsPathError::create_file(err, path)) +} + +/// Wrapper for [`std::fs::remove_file`]. +pub fn remove_file(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::remove_file(path).map_err(|err| FsPathError::remove_file(err, path)) +} + +/// Wrapper for [`std::fs::read`]. +pub fn read(path: impl AsRef) -> Result> { + let path = path.as_ref(); + fs::read(path).map_err(|err| FsPathError::read(err, path)) +} + +/// Wrapper for [`std::fs::read_link`]. +pub fn read_link(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::read_link(path).map_err(|err| FsPathError::read_link(err, path)) +} + +/// Wrapper for [`std::fs::read_to_string`]. +pub fn read_to_string(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::read_to_string(path).map_err(|err| FsPathError::read(err, path)) +} + +/// Reads the JSON file and deserialize it into the provided type. +pub fn read_json_file(path: &Path) -> Result { + // read the file into a byte array first + // https://github.com/serde-rs/json/issues/160 + let s = read_to_string(path)?; + serde_json::from_str(&s).map_err(|source| FsPathError::ReadJson { + source, + path: path.into(), + }) +} + +/// Writes the object as a JSON object. +pub fn write_json_file(path: &Path, obj: &T) -> Result<()> { + let file = create_file(path)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer(&mut writer, obj).map_err(|source| FsPathError::WriteJson { + source, + path: path.into(), + })?; + writer.flush().map_err(|e| FsPathError::write(e, path)) +} + +/// Writes the object as a pretty JSON object. +pub fn write_pretty_json_file(path: &Path, obj: &T) -> Result<()> { + let file = create_file(path)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer_pretty(&mut writer, obj).map_err(|source| FsPathError::WriteJson { + source, + path: path.into(), + })?; + writer.flush().map_err(|e| FsPathError::write(e, path)) +} + +/// Wrapper for `std::fs::write` +pub fn write(path: impl AsRef, contents: impl AsRef<[u8]>) -> Result<()> { + let path = path.as_ref(); + fs::write(path, contents).map_err(|err| FsPathError::write(err, path)) +} + +/// Wrapper for `std::fs::copy` +pub fn copy(from: impl AsRef, to: impl AsRef) -> Result { + let from = from.as_ref(); + let to = to.as_ref(); + fs::copy(from, to).map_err(|err| FsPathError::copy(err, from, to)) +} + +/// Wrapper for `std::fs::create_dir` +pub fn create_dir(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::create_dir(path).map_err(|err| FsPathError::create_dir(err, path)) +} + +/// Wrapper for `std::fs::create_dir_all` +pub fn create_dir_all(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::create_dir_all(path).map_err(|err| FsPathError::create_dir(err, path)) +} + +/// Wrapper for `std::fs::remove_dir` +pub fn remove_dir(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::remove_dir(path).map_err(|err| FsPathError::remove_dir(err, path)) +} + +/// Wrapper for `std::fs::remove_dir_all` +pub fn remove_dir_all(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::remove_dir_all(path).map_err(|err| FsPathError::remove_dir(err, path)) +} + +/// Wrapper for `std::fs::File::open` +pub fn open(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::File::open(path).map_err(|err| FsPathError::open(err, path)) +} + +/// Normalize a path, removing things like `.` and `..`. +/// +/// NOTE: This does not return symlinks and does not touch the filesystem at all (unlike +/// [`std::fs::canonicalize`]) +/// +/// ref: +pub fn normalize_path(path: &Path) -> PathBuf { + let mut components = path.components().peekable(); + let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() { + components.next(); + PathBuf::from(c.as_os_str()) + } else { + PathBuf::new() + }; + + for component in components { + match component { + Component::Prefix(..) => unreachable!(), + Component::RootDir => { + ret.push(component.as_os_str()); + } + Component::CurDir => {} + Component::ParentDir => { + ret.pop(); + } + Component::Normal(c) => { + ret.push(c); + } + } + } + ret +} + +/// Returns an iterator over all files with the given extension under the `root` dir. +pub fn files_with_ext<'a>(root: &Path, ext: &'a str) -> impl Iterator + 'a { + walkdir::WalkDir::new(root) + .sort_by_file_name() + .into_iter() + .filter_map(walkdir::Result::ok) + .filter(|e| e.file_type().is_file() && e.path().extension() == Some(ext.as_ref())) + .map(walkdir::DirEntry::into_path) +} + +/// Returns an iterator over all JSON files under the `root` dir. +pub fn json_files(root: &Path) -> impl Iterator { + files_with_ext(root, "json") +} + +/// Canonicalize a path, returning an error if the path does not exist. +/// +/// Mainly useful to apply canonicalization to paths obtained from project files but still error +/// properly instead of flattening the errors. +pub fn canonicalize_path(path: impl AsRef) -> std::io::Result { + dunce::canonicalize(path) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_normalize_path() { + let p = Path::new("/a/../file.txt"); + let normalized = normalize_path(p); + assert_eq!(normalized, PathBuf::from("/file.txt")); + } +} diff --git a/recall-contracts/crates/facade/forge/foundry_common/mod.rs b/recall-contracts/crates/facade/forge/foundry_common/mod.rs new file mode 100644 index 0000000000..c99fb1cd03 --- /dev/null +++ b/recall-contracts/crates/facade/forge/foundry_common/mod.rs @@ -0,0 +1,2 @@ +pub mod errors; +pub mod fs; diff --git a/recall-contracts/crates/facade/forge/mod.rs b/recall-contracts/crates/facade/forge/mod.rs new file mode 100644 index 0000000000..d28619d6ec --- /dev/null +++ b/recall-contracts/crates/facade/forge/mod.rs @@ -0,0 +1,7 @@ +#![allow(dead_code)] + +mod forge_sol_macro_gen; +mod foundry_common; + +pub use forge_sol_macro_gen::*; +pub use foundry_common::*; diff --git a/recall-contracts/crates/facade/src/blobreader_facade/iblobreaderfacade.rs b/recall-contracts/crates/facade/src/blobreader_facade/iblobreaderfacade.rs new file mode 100644 index 0000000000..224a1765f4 --- /dev/null +++ b/recall-contracts/crates/facade/src/blobreader_facade/iblobreaderfacade.rs @@ -0,0 +1,554 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface IBlobReaderFacade { + event ReadRequestClosed(bytes32 id); + event ReadRequestOpened(bytes32 id, bytes32 blobHash, uint256 readOffset, uint256 readLength, address callbackAddress, uint256 callbackMethod); + event ReadRequestPending(bytes32 id); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "event", + "name": "ReadRequestClosed", + "inputs": [ + { + "name": "id", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ReadRequestOpened", + "inputs": [ + { + "name": "id", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "blobHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "readOffset", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "readLength", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "callbackAddress", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "callbackMethod", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ReadRequestPending", + "inputs": [ + { + "name": "id", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IBlobReaderFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**Event with signature `ReadRequestClosed(bytes32)` and selector `0x9a8c63a9b921adb4983af5ca5dd1649500a411a34894cb1c0f9fab740b6f75ed`. + ```solidity + event ReadRequestClosed(bytes32 id); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ReadRequestClosed { + #[allow(missing_docs)] + pub id: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ReadRequestClosed { + type DataTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ReadRequestClosed(bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 154u8, 140u8, 99u8, 169u8, 185u8, 33u8, 173u8, 180u8, 152u8, 58u8, 245u8, + 202u8, 93u8, 209u8, 100u8, 149u8, 0u8, 164u8, 17u8, 163u8, 72u8, 148u8, 203u8, + 28u8, 15u8, 159u8, 171u8, 116u8, 11u8, 111u8, 117u8, 237u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { id: data.0 } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.id), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ReadRequestClosed { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ReadRequestClosed> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ReadRequestClosed) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ReadRequestOpened(bytes32,bytes32,uint256,uint256,address,uint256)` and selector `0xd540be3f3450d40e6b169d0adac00a1e18cba05ee46950b4de6383b76c780f59`. + ```solidity + event ReadRequestOpened(bytes32 id, bytes32 blobHash, uint256 readOffset, uint256 readLength, address callbackAddress, uint256 callbackMethod); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ReadRequestOpened { + #[allow(missing_docs)] + pub id: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub readOffset: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub readLength: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub callbackAddress: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub callbackMethod: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ReadRequestOpened { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = + "ReadRequestOpened(bytes32,bytes32,uint256,uint256,address,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 213u8, 64u8, 190u8, 63u8, 52u8, 80u8, 212u8, 14u8, 107u8, 22u8, 157u8, 10u8, + 218u8, 192u8, 10u8, 30u8, 24u8, 203u8, 160u8, 94u8, 228u8, 105u8, 80u8, 180u8, + 222u8, 99u8, 131u8, 183u8, 108u8, 120u8, 15u8, 89u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + id: data.0, + blobHash: data.1, + readOffset: data.2, + readLength: data.3, + callbackAddress: data.4, + callbackMethod: data.5, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.id), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.readOffset), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.readLength), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.callbackAddress, + ), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.callbackMethod), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ReadRequestOpened { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ReadRequestOpened> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ReadRequestOpened) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ReadRequestPending(bytes32)` and selector `0x6b9c9f2ecba3015efc370b4e57621c55d8c1f17805015860f0b337a0288512e4`. + ```solidity + event ReadRequestPending(bytes32 id); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ReadRequestPending { + #[allow(missing_docs)] + pub id: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ReadRequestPending { + type DataTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ReadRequestPending(bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 107u8, 156u8, 159u8, 46u8, 203u8, 163u8, 1u8, 94u8, 252u8, 55u8, 11u8, 78u8, + 87u8, 98u8, 28u8, 85u8, 216u8, 193u8, 241u8, 120u8, 5u8, 1u8, 88u8, 96u8, + 240u8, 179u8, 55u8, 160u8, 40u8, 133u8, 18u8, 228u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { id: data.0 } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.id), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ReadRequestPending { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ReadRequestPending> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ReadRequestPending) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + ///Container for all the [`IBlobReaderFacade`](self) events. + pub enum IBlobReaderFacadeEvents { + #[allow(missing_docs)] + ReadRequestClosed(ReadRequestClosed), + #[allow(missing_docs)] + ReadRequestOpened(ReadRequestOpened), + #[allow(missing_docs)] + ReadRequestPending(ReadRequestPending), + } + #[automatically_derived] + impl IBlobReaderFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 107u8, 156u8, 159u8, 46u8, 203u8, 163u8, 1u8, 94u8, 252u8, 55u8, 11u8, 78u8, 87u8, + 98u8, 28u8, 85u8, 216u8, 193u8, 241u8, 120u8, 5u8, 1u8, 88u8, 96u8, 240u8, 179u8, + 55u8, 160u8, 40u8, 133u8, 18u8, 228u8, + ], + [ + 154u8, 140u8, 99u8, 169u8, 185u8, 33u8, 173u8, 180u8, 152u8, 58u8, 245u8, 202u8, + 93u8, 209u8, 100u8, 149u8, 0u8, 164u8, 17u8, 163u8, 72u8, 148u8, 203u8, 28u8, 15u8, + 159u8, 171u8, 116u8, 11u8, 111u8, 117u8, 237u8, + ], + [ + 213u8, 64u8, 190u8, 63u8, 52u8, 80u8, 212u8, 14u8, 107u8, 22u8, 157u8, 10u8, 218u8, + 192u8, 10u8, 30u8, 24u8, 203u8, 160u8, 94u8, 228u8, 105u8, 80u8, 180u8, 222u8, + 99u8, 131u8, 183u8, 108u8, 120u8, 15u8, 89u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IBlobReaderFacadeEvents { + const NAME: &'static str = "IBlobReaderFacadeEvents"; + const COUNT: usize = 3usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ReadRequestClosed) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ReadRequestOpened) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ReadRequestPending) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IBlobReaderFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::ReadRequestClosed(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ReadRequestOpened(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ReadRequestPending(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::ReadRequestClosed(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ReadRequestOpened(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ReadRequestPending(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/recall-contracts/crates/facade/src/blobreader_facade/mod.rs b/recall-contracts/crates/facade/src/blobreader_facade/mod.rs new file mode 100644 index 0000000000..80b3587bde --- /dev/null +++ b/recall-contracts/crates/facade/src/blobreader_facade/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#iblobreaderfacade; diff --git a/recall-contracts/crates/facade/src/blobs_facade/iblobsfacade.rs b/recall-contracts/crates/facade/src/blobs_facade/iblobsfacade.rs new file mode 100644 index 0000000000..99cf72b6fe --- /dev/null +++ b/recall-contracts/crates/facade/src/blobs_facade/iblobsfacade.rs @@ -0,0 +1,3415 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface IBlobsFacade { + type BlobStatus is uint8; + struct Blob { + uint64 size; + bytes32 metadataHash; + Subscription[] subscriptions; + BlobStatus status; + } + struct SubnetStats { + uint256 balance; + uint64 capacityFree; + uint64 capacityUsed; + uint256 creditSold; + uint256 creditCommitted; + uint256 creditDebited; + uint256 tokenCreditRate; + uint64 numAccounts; + uint64 numBlobs; + uint64 numAdded; + uint64 bytesAdded; + uint64 numResolving; + uint64 bytesResolving; + } + struct Subscription { + string subscriptionId; + uint64 expiry; + } + struct TrimBlobExpiries { + uint32 processed; + bytes32 nextKey; + } + + event BlobAdded(address indexed subscriber, bytes32 hash, uint256 size, uint256 expiry, uint256 bytesUsed); + event BlobDeleted(address indexed subscriber, bytes32 hash, uint256 size, uint256 bytesReleased); + event BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved); + event BlobPending(address indexed subscriber, bytes32 hash, bytes32 sourceId); + + function addBlob(address sponsor, bytes32 source, bytes32 blobHash, bytes32 metadataHash, string memory subscriptionId, uint64 size, uint64 ttl) external; + function deleteBlob(address subscriber, bytes32 blobHash, string memory subscriptionId) external; + function getBlob(bytes32 blobHash) external view returns (Blob memory blob); + function getStats() external view returns (SubnetStats memory stats); + function overwriteBlob(bytes32 oldHash, address sponsor, bytes32 source, bytes32 blobHash, bytes32 metadataHash, string memory subscriptionId, uint64 size, uint64 ttl) external; + function trimBlobExpiries(address subscriber, bytes32 startingHash, uint32 limit) external returns (TrimBlobExpiries memory); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "addBlob", + "inputs": [ + { + "name": "sponsor", + "type": "address", + "internalType": "address" + }, + { + "name": "source", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "metadataHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "subscriptionId", + "type": "string", + "internalType": "string" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "ttl", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "deleteBlob", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "internalType": "address" + }, + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "subscriptionId", + "type": "string", + "internalType": "string" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "getBlob", + "inputs": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [ + { + "name": "blob", + "type": "tuple", + "internalType": "struct Blob", + "components": [ + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadataHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "subscriptions", + "type": "tuple[]", + "internalType": "struct Subscription[]", + "components": [ + { + "name": "subscriptionId", + "type": "string", + "internalType": "string" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + } + ] + }, + { + "name": "status", + "type": "uint8", + "internalType": "enum BlobStatus" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getStats", + "inputs": [], + "outputs": [ + { + "name": "stats", + "type": "tuple", + "internalType": "struct SubnetStats", + "components": [ + { + "name": "balance", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "capacityFree", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "capacityUsed", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditSold", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "creditCommitted", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "creditDebited", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "tokenCreditRate", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "numAccounts", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "numBlobs", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "numAdded", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "bytesAdded", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "numResolving", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "bytesResolving", + "type": "uint64", + "internalType": "uint64" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "overwriteBlob", + "inputs": [ + { + "name": "oldHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "sponsor", + "type": "address", + "internalType": "address" + }, + { + "name": "source", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "metadataHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "subscriptionId", + "type": "string", + "internalType": "string" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "ttl", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "trimBlobExpiries", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "internalType": "address" + }, + { + "name": "startingHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "limit", + "type": "uint32", + "internalType": "uint32" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct TrimBlobExpiries", + "components": [ + { + "name": "processed", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "nextKey", + "type": "bytes32", + "internalType": "bytes32" + } + ] + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "BlobAdded", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "hash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "bytesUsed", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "BlobDeleted", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "hash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "bytesReleased", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "BlobFinalized", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "hash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "resolved", + "type": "bool", + "indexed": false, + "internalType": "bool" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "BlobPending", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "hash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "sourceId", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IBlobsFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct BlobStatus(u8); + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for u8 { + #[inline] + fn stv_to_tokens( + &self, + ) -> <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'_> + { + alloy_sol_types::private::SolTypeValue::< + ::alloy_sol_types::sol_data::Uint<8>, + >::stv_to_tokens(self) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::tokenize(self).0 + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::abi_encode_packed_to(self, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::abi_encoded_size( + self, + ) + } + } + #[automatically_derived] + impl BlobStatus { + /// The Solidity type name. + pub const NAME: &'static str = stringify!(@ name); + /// Convert from the underlying value type. + #[inline] + pub const fn from(value: u8) -> Self { + Self(value) + } + /// Return the underlying value. + #[inline] + pub const fn into(self) -> u8 { + self.0 + } + /// Return the single encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode(&self) -> alloy_sol_types::private::Vec { + ::abi_encode(&self.0) + } + /// Return the packed encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode_packed(&self) -> alloy_sol_types::private::Vec { + ::abi_encode_packed(&self.0) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for BlobStatus { + type RustType = u8; + type Token<'a> = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = Self::NAME; + const ENCODED_SIZE: Option = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + Self::type_check(token).is_ok() + } + #[inline] + fn type_check(token: &Self::Token<'_>) -> alloy_sol_types::Result<()> { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::type_check( + token, + ) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::detokenize( + token, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for BlobStatus { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::topic_preimage_length(rust) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(rust, out) + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::EventTopic>::encode_topic( + rust, + ) + } + } + }; + /**```solidity + struct Blob { uint64 size; bytes32 metadataHash; Subscription[] subscriptions; BlobStatus status; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Blob { + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub metadataHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub subscriptions: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub status: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Array, + BlobStatus, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + u64, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::Vec<::RustType>, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Blob) -> Self { + ( + value.size, + value.metadataHash, + value.subscriptions, + value.status, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Blob { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + size: tuple.0, + metadataHash: tuple.1, + subscriptions: tuple.2, + status: tuple.3, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Blob { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Blob { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.metadataHash), + <::alloy_sol_types::sol_data::Array< + Subscription, + > as alloy_sol_types::SolType>::tokenize(&self.subscriptions), + ::tokenize(&self.status), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Blob { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Blob { + const NAME: &'static str = "Blob"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Blob(uint64 size,bytes32 metadataHash,Subscription[] subscriptions,uint8 status)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components + .extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.size) + .0, + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.metadataHash) + .0, + <::alloy_sol_types::sol_data::Array< + Subscription, + > as alloy_sol_types::SolType>::eip712_data_word(&self.subscriptions) + .0, + ::eip712_data_word( + &self.status, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Blob { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.size) + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.metadataHash, + ) + + <::alloy_sol_types::sol_data::Array< + Subscription, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.subscriptions, + ) + + ::topic_preimage_length( + &rust.status, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.size, + out, + ); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.metadataHash, + out, + ); + <::alloy_sol_types::sol_data::Array< + Subscription, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.subscriptions, + out, + ); + ::encode_topic_preimage( + &rust.status, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct SubnetStats { uint256 balance; uint64 capacityFree; uint64 capacityUsed; uint256 creditSold; uint256 creditCommitted; uint256 creditDebited; uint256 tokenCreditRate; uint64 numAccounts; uint64 numBlobs; uint64 numAdded; uint64 bytesAdded; uint64 numResolving; uint64 bytesResolving; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct SubnetStats { + #[allow(missing_docs)] + pub balance: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub capacityFree: u64, + #[allow(missing_docs)] + pub capacityUsed: u64, + #[allow(missing_docs)] + pub creditSold: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub creditCommitted: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub creditDebited: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub tokenCreditRate: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub numAccounts: u64, + #[allow(missing_docs)] + pub numBlobs: u64, + #[allow(missing_docs)] + pub numAdded: u64, + #[allow(missing_docs)] + pub bytesAdded: u64, + #[allow(missing_docs)] + pub numResolving: u64, + #[allow(missing_docs)] + pub bytesResolving: u64, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + u64, + u64, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + u64, + u64, + u64, + u64, + u64, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: SubnetStats) -> Self { + ( + value.balance, + value.capacityFree, + value.capacityUsed, + value.creditSold, + value.creditCommitted, + value.creditDebited, + value.tokenCreditRate, + value.numAccounts, + value.numBlobs, + value.numAdded, + value.bytesAdded, + value.numResolving, + value.bytesResolving, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for SubnetStats { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + balance: tuple.0, + capacityFree: tuple.1, + capacityUsed: tuple.2, + creditSold: tuple.3, + creditCommitted: tuple.4, + creditDebited: tuple.5, + tokenCreditRate: tuple.6, + numAccounts: tuple.7, + numBlobs: tuple.8, + numAdded: tuple.9, + bytesAdded: tuple.10, + numResolving: tuple.11, + bytesResolving: tuple.12, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for SubnetStats { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for SubnetStats { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.balance, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.capacityFree, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.capacityUsed, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditSold, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditCommitted, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditDebited, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.tokenCreditRate, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.numAccounts, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.numBlobs, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.numAdded, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.bytesAdded, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.numResolving, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.bytesResolving, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for SubnetStats { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for SubnetStats { + const NAME: &'static str = "SubnetStats"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "SubnetStats(uint256 balance,uint64 capacityFree,uint64 capacityUsed,uint256 creditSold,uint256 creditCommitted,uint256 creditDebited,uint256 tokenCreditRate,uint64 numAccounts,uint64 numBlobs,uint64 numAdded,uint64 bytesAdded,uint64 numResolving,uint64 bytesResolving)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.balance) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.capacityFree) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.capacityUsed) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditSold) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.creditCommitted, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditDebited) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.tokenCreditRate, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.numAccounts) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.numBlobs) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.numAdded) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.bytesAdded) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.numResolving) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.bytesResolving, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for SubnetStats { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.balance, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.capacityFree, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.capacityUsed, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditSold, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditCommitted, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditDebited, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.tokenCreditRate, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.numAccounts, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.numBlobs, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.numAdded, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.bytesAdded, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.numResolving, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.bytesResolving, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.balance, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.capacityFree, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.capacityUsed, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditSold, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditCommitted, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditDebited, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.tokenCreditRate, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.numAccounts, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.numBlobs, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.numAdded, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.bytesAdded, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.numResolving, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.bytesResolving, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Subscription { string subscriptionId; uint64 expiry; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Subscription { + #[allow(missing_docs)] + pub subscriptionId: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub expiry: u64, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String, u64); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Subscription) -> Self { + (value.subscriptionId, value.expiry) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Subscription { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + subscriptionId: tuple.0, + expiry: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Subscription { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Subscription { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.subscriptionId, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.expiry, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Subscription { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Subscription { + const NAME: &'static str = "Subscription"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Subscription(string subscriptionId,uint64 expiry)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.subscriptionId, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.expiry) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Subscription { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.subscriptionId, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.expiry, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.subscriptionId, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.expiry, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct TrimBlobExpiries { uint32 processed; bytes32 nextKey; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct TrimBlobExpiries { + #[allow(missing_docs)] + pub processed: u32, + #[allow(missing_docs)] + pub nextKey: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u32, ::alloy_sol_types::private::FixedBytes<32>); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: TrimBlobExpiries) -> Self { + (value.processed, value.nextKey) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for TrimBlobExpiries { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + processed: tuple.0, + nextKey: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for TrimBlobExpiries { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for TrimBlobExpiries { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.processed), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.nextKey), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for TrimBlobExpiries { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for TrimBlobExpiries { + const NAME: &'static str = "TrimBlobExpiries"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "TrimBlobExpiries(uint32 processed,bytes32 nextKey)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.processed) + .0, + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.nextKey) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for TrimBlobExpiries { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.processed, + ) + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.nextKey, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.processed, + out, + ); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.nextKey, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**Event with signature `BlobAdded(address,bytes32,uint256,uint256,uint256)` and selector `0xd42c7814518f1b7f5919557d327e88cddb7b02fc91085b402e94083243a06a8d`. + ```solidity + event BlobAdded(address indexed subscriber, bytes32 hash, uint256 size, uint256 expiry, uint256 bytesUsed); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct BlobAdded { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub expiry: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub bytesUsed: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for BlobAdded { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "BlobAdded(address,bytes32,uint256,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 212u8, 44u8, 120u8, 20u8, 81u8, 143u8, 27u8, 127u8, 89u8, 25u8, 85u8, 125u8, + 50u8, 126u8, 136u8, 205u8, 219u8, 123u8, 2u8, 252u8, 145u8, 8u8, 91u8, 64u8, + 46u8, 148u8, 8u8, 50u8, 67u8, 160u8, 106u8, 141u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + subscriber: topics.1, + hash: data.0, + size: data.1, + expiry: data.2, + bytesUsed: data.3, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.expiry), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.bytesUsed), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.subscriber.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.subscriber, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for BlobAdded { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&BlobAdded> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &BlobAdded) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `BlobDeleted(address,bytes32,uint256,uint256)` and selector `0x2e6567b73082b547dc70b1e1697dc20d2c21c44915c3af4efd6ce7cc9905a1ce`. + ```solidity + event BlobDeleted(address indexed subscriber, bytes32 hash, uint256 size, uint256 bytesReleased); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct BlobDeleted { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub bytesReleased: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for BlobDeleted { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "BlobDeleted(address,bytes32,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 46u8, 101u8, 103u8, 183u8, 48u8, 130u8, 181u8, 71u8, 220u8, 112u8, 177u8, + 225u8, 105u8, 125u8, 194u8, 13u8, 44u8, 33u8, 196u8, 73u8, 21u8, 195u8, 175u8, + 78u8, 253u8, 108u8, 231u8, 204u8, 153u8, 5u8, 161u8, 206u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + subscriber: topics.1, + hash: data.0, + size: data.1, + bytesReleased: data.2, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.bytesReleased), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.subscriber.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.subscriber, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for BlobDeleted { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&BlobDeleted> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &BlobDeleted) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `BlobFinalized(address,bytes32,bool)` and selector `0x74accb1da870635a4e757ed45bf2f8016f9b08bfb46a9f6183bb74b2a362c280`. + ```solidity + event BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct BlobFinalized { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub resolved: bool, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for BlobFinalized { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Bool, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "BlobFinalized(address,bytes32,bool)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 116u8, 172u8, 203u8, 29u8, 168u8, 112u8, 99u8, 90u8, 78u8, 117u8, 126u8, 212u8, + 91u8, 242u8, 248u8, 1u8, 111u8, 155u8, 8u8, 191u8, 180u8, 106u8, 159u8, 97u8, + 131u8, 187u8, 116u8, 178u8, 163u8, 98u8, 194u8, 128u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + subscriber: topics.1, + hash: data.0, + resolved: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::Bool as alloy_sol_types::SolType>::tokenize( + &self.resolved, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.subscriber.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.subscriber, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for BlobFinalized { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&BlobFinalized> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &BlobFinalized) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `BlobPending(address,bytes32,bytes32)` and selector `0x57e4769774fa6b36c8faf32c5b177a5c15d70775d3729a530b8ec17009f31122`. + ```solidity + event BlobPending(address indexed subscriber, bytes32 hash, bytes32 sourceId); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct BlobPending { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub sourceId: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for BlobPending { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "BlobPending(address,bytes32,bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 87u8, 228u8, 118u8, 151u8, 116u8, 250u8, 107u8, 54u8, 200u8, 250u8, 243u8, + 44u8, 91u8, 23u8, 122u8, 92u8, 21u8, 215u8, 7u8, 117u8, 211u8, 114u8, 154u8, + 83u8, 11u8, 142u8, 193u8, 112u8, 9u8, 243u8, 17u8, 34u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + subscriber: topics.1, + hash: data.0, + sourceId: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.sourceId), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.subscriber.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.subscriber, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for BlobPending { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&BlobPending> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &BlobPending) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `addBlob(address,bytes32,bytes32,bytes32,string,uint64,uint64)` and selector `0x5b5cc14f`. + ```solidity + function addBlob(address sponsor, bytes32 source, bytes32 blobHash, bytes32 metadataHash, string memory subscriptionId, uint64 size, uint64 ttl) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addBlobCall { + #[allow(missing_docs)] + pub sponsor: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub source: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub metadataHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub subscriptionId: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub ttl: u64, + } + ///Container type for the return parameters of the [`addBlob(address,bytes32,bytes32,bytes32,string,uint64,uint64)`](addBlobCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addBlobReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + u64, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addBlobCall) -> Self { + ( + value.sponsor, + value.source, + value.blobHash, + value.metadataHash, + value.subscriptionId, + value.size, + value.ttl, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addBlobCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + sponsor: tuple.0, + source: tuple.1, + blobHash: tuple.2, + metadataHash: tuple.3, + subscriptionId: tuple.4, + size: tuple.5, + ttl: tuple.6, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addBlobReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addBlobReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for addBlobCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = addBlobReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = + "addBlob(address,bytes32,bytes32,bytes32,string,uint64,uint64)"; + const SELECTOR: [u8; 4] = [91u8, 92u8, 193u8, 79u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.sponsor, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.source), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.metadataHash), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.subscriptionId, + ), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.ttl), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `deleteBlob(address,bytes32,string)` and selector `0xbea9016a`. + ```solidity + function deleteBlob(address subscriber, bytes32 blobHash, string memory subscriptionId) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct deleteBlobCall { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub subscriptionId: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`deleteBlob(address,bytes32,string)`](deleteBlobCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct deleteBlobReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deleteBlobCall) -> Self { + (value.subscriber, value.blobHash, value.subscriptionId) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deleteBlobCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + subscriber: tuple.0, + blobHash: tuple.1, + subscriptionId: tuple.2, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deleteBlobReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deleteBlobReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for deleteBlobCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = deleteBlobReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "deleteBlob(address,bytes32,string)"; + const SELECTOR: [u8; 4] = [190u8, 169u8, 1u8, 106u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.subscriber, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.subscriptionId, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getBlob(bytes32)` and selector `0x8a4d1ad4`. + ```solidity + function getBlob(bytes32 blobHash) external view returns (Blob memory blob); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getBlobCall { + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + } + ///Container type for the return parameters of the [`getBlob(bytes32)`](getBlobCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getBlobReturn { + #[allow(missing_docs)] + pub blob: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::FixedBytes<32>,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getBlobCall) -> Self { + (value.blobHash,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getBlobCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { blobHash: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Blob,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getBlobReturn) -> Self { + (value.blob,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getBlobReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { blob: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getBlobCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getBlobReturn; + type ReturnTuple<'a> = (Blob,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getBlob(bytes32)"; + const SELECTOR: [u8; 4] = [138u8, 77u8, 26u8, 212u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getStats()` and selector `0xc59d4847`. + ```solidity + function getStats() external view returns (SubnetStats memory stats); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getStatsCall {} + ///Container type for the return parameters of the [`getStats()`](getStatsCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getStatsReturn { + #[allow(missing_docs)] + pub stats: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getStatsCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getStatsCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (SubnetStats,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getStatsReturn) -> Self { + (value.stats,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getStatsReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { stats: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getStatsCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getStatsReturn; + type ReturnTuple<'a> = (SubnetStats,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getStats()"; + const SELECTOR: [u8; 4] = [197u8, 157u8, 72u8, 71u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `overwriteBlob(bytes32,address,bytes32,bytes32,bytes32,string,uint64,uint64)` and selector `0x434fc5a4`. + ```solidity + function overwriteBlob(bytes32 oldHash, address sponsor, bytes32 source, bytes32 blobHash, bytes32 metadataHash, string memory subscriptionId, uint64 size, uint64 ttl) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct overwriteBlobCall { + #[allow(missing_docs)] + pub oldHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub sponsor: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub source: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub metadataHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub subscriptionId: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub ttl: u64, + } + ///Container type for the return parameters of the [`overwriteBlob(bytes32,address,bytes32,bytes32,bytes32,string,uint64,uint64)`](overwriteBlobCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct overwriteBlobReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + u64, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: overwriteBlobCall) -> Self { + ( + value.oldHash, + value.sponsor, + value.source, + value.blobHash, + value.metadataHash, + value.subscriptionId, + value.size, + value.ttl, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for overwriteBlobCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + oldHash: tuple.0, + sponsor: tuple.1, + source: tuple.2, + blobHash: tuple.3, + metadataHash: tuple.4, + subscriptionId: tuple.5, + size: tuple.6, + ttl: tuple.7, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: overwriteBlobReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for overwriteBlobReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for overwriteBlobCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = overwriteBlobReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = + "overwriteBlob(bytes32,address,bytes32,bytes32,bytes32,string,uint64,uint64)"; + const SELECTOR: [u8; 4] = [67u8, 79u8, 197u8, 164u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.oldHash), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.sponsor, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.source), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.metadataHash), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.subscriptionId, + ), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.ttl), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `trimBlobExpiries(address,bytes32,uint32)` and selector `0x78f8af85`. + ```solidity + function trimBlobExpiries(address subscriber, bytes32 startingHash, uint32 limit) external returns (TrimBlobExpiries memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct trimBlobExpiriesCall { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub startingHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub limit: u32, + } + ///Container type for the return parameters of the [`trimBlobExpiries(address,bytes32,uint32)`](trimBlobExpiriesCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct trimBlobExpiriesReturn { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<32>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::FixedBytes<32>, + u32, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: trimBlobExpiriesCall) -> Self { + (value.subscriber, value.startingHash, value.limit) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for trimBlobExpiriesCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + subscriber: tuple.0, + startingHash: tuple.1, + limit: tuple.2, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (TrimBlobExpiries,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = + (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: trimBlobExpiriesReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for trimBlobExpiriesReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for trimBlobExpiriesCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<32>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = trimBlobExpiriesReturn; + type ReturnTuple<'a> = (TrimBlobExpiries,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "trimBlobExpiries(address,bytes32,uint32)"; + const SELECTOR: [u8; 4] = [120u8, 248u8, 175u8, 133u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.subscriber, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.startingHash), + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.limit), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`IBlobsFacade`](self) function calls. + pub enum IBlobsFacadeCalls { + #[allow(missing_docs)] + addBlob(addBlobCall), + #[allow(missing_docs)] + deleteBlob(deleteBlobCall), + #[allow(missing_docs)] + getBlob(getBlobCall), + #[allow(missing_docs)] + getStats(getStatsCall), + #[allow(missing_docs)] + overwriteBlob(overwriteBlobCall), + #[allow(missing_docs)] + trimBlobExpiries(trimBlobExpiriesCall), + } + #[automatically_derived] + impl IBlobsFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [67u8, 79u8, 197u8, 164u8], + [91u8, 92u8, 193u8, 79u8], + [120u8, 248u8, 175u8, 133u8], + [138u8, 77u8, 26u8, 212u8], + [190u8, 169u8, 1u8, 106u8], + [197u8, 157u8, 72u8, 71u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for IBlobsFacadeCalls { + const NAME: &'static str = "IBlobsFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 6usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::addBlob(_) => ::SELECTOR, + Self::deleteBlob(_) => ::SELECTOR, + Self::getBlob(_) => ::SELECTOR, + Self::getStats(_) => ::SELECTOR, + Self::overwriteBlob(_) => ::SELECTOR, + Self::trimBlobExpiries(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn overwriteBlob( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBlobsFacadeCalls::overwriteBlob) + } + overwriteBlob + }, + { + fn addBlob( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBlobsFacadeCalls::addBlob) + } + addBlob + }, + { + fn trimBlobExpiries( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBlobsFacadeCalls::trimBlobExpiries) + } + trimBlobExpiries + }, + { + fn getBlob( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBlobsFacadeCalls::getBlob) + } + getBlob + }, + { + fn deleteBlob( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBlobsFacadeCalls::deleteBlob) + } + deleteBlob + }, + { + fn getStats( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBlobsFacadeCalls::getStats) + } + getStats + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::addBlob(inner) => { + ::abi_encoded_size(inner) + } + Self::deleteBlob(inner) => { + ::abi_encoded_size(inner) + } + Self::getBlob(inner) => { + ::abi_encoded_size(inner) + } + Self::getStats(inner) => { + ::abi_encoded_size(inner) + } + Self::overwriteBlob(inner) => { + ::abi_encoded_size(inner) + } + Self::trimBlobExpiries(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::addBlob(inner) => { + ::abi_encode_raw(inner, out) + } + Self::deleteBlob(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getBlob(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getStats(inner) => { + ::abi_encode_raw(inner, out) + } + Self::overwriteBlob(inner) => { + ::abi_encode_raw(inner, out) + } + Self::trimBlobExpiries(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`IBlobsFacade`](self) events. + pub enum IBlobsFacadeEvents { + #[allow(missing_docs)] + BlobAdded(BlobAdded), + #[allow(missing_docs)] + BlobDeleted(BlobDeleted), + #[allow(missing_docs)] + BlobFinalized(BlobFinalized), + #[allow(missing_docs)] + BlobPending(BlobPending), + } + #[automatically_derived] + impl IBlobsFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 46u8, 101u8, 103u8, 183u8, 48u8, 130u8, 181u8, 71u8, 220u8, 112u8, 177u8, 225u8, + 105u8, 125u8, 194u8, 13u8, 44u8, 33u8, 196u8, 73u8, 21u8, 195u8, 175u8, 78u8, + 253u8, 108u8, 231u8, 204u8, 153u8, 5u8, 161u8, 206u8, + ], + [ + 87u8, 228u8, 118u8, 151u8, 116u8, 250u8, 107u8, 54u8, 200u8, 250u8, 243u8, 44u8, + 91u8, 23u8, 122u8, 92u8, 21u8, 215u8, 7u8, 117u8, 211u8, 114u8, 154u8, 83u8, 11u8, + 142u8, 193u8, 112u8, 9u8, 243u8, 17u8, 34u8, + ], + [ + 116u8, 172u8, 203u8, 29u8, 168u8, 112u8, 99u8, 90u8, 78u8, 117u8, 126u8, 212u8, + 91u8, 242u8, 248u8, 1u8, 111u8, 155u8, 8u8, 191u8, 180u8, 106u8, 159u8, 97u8, + 131u8, 187u8, 116u8, 178u8, 163u8, 98u8, 194u8, 128u8, + ], + [ + 212u8, 44u8, 120u8, 20u8, 81u8, 143u8, 27u8, 127u8, 89u8, 25u8, 85u8, 125u8, 50u8, + 126u8, 136u8, 205u8, 219u8, 123u8, 2u8, 252u8, 145u8, 8u8, 91u8, 64u8, 46u8, 148u8, + 8u8, 50u8, 67u8, 160u8, 106u8, 141u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IBlobsFacadeEvents { + const NAME: &'static str = "IBlobsFacadeEvents"; + const COUNT: usize = 4usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log(topics, data, validate) + .map(Self::BlobAdded) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::BlobDeleted) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::BlobFinalized) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::BlobPending) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IBlobsFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::BlobAdded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), + Self::BlobDeleted(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::BlobFinalized(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::BlobPending(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::BlobAdded(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::BlobDeleted(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::BlobFinalized(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::BlobPending(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/recall-contracts/crates/facade/src/blobs_facade/mod.rs b/recall-contracts/crates/facade/src/blobs_facade/mod.rs new file mode 100644 index 0000000000..3c5cc216f9 --- /dev/null +++ b/recall-contracts/crates/facade/src/blobs_facade/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#iblobsfacade; diff --git a/recall-contracts/crates/facade/src/bucket_facade/ibucketfacade.rs b/recall-contracts/crates/facade/src/bucket_facade/ibucketfacade.rs new file mode 100644 index 0000000000..4f09ce6d20 --- /dev/null +++ b/recall-contracts/crates/facade/src/bucket_facade/ibucketfacade.rs @@ -0,0 +1,4016 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface IBucketFacade { + struct KeyValue { + string key; + string value; + } + struct Object { + string key; + ObjectState state; + } + struct ObjectState { + bytes32 blobHash; + uint64 size; + uint64 expiry; + KeyValue[] metadata; + } + struct ObjectValue { + bytes32 blobHash; + bytes32 recoveryHash; + uint64 size; + uint64 expiry; + KeyValue[] metadata; + } + struct Query { + Object[] objects; + string[] commonPrefixes; + string nextKey; + } + + event ObjectAdded(bytes key, bytes32 blobHash, bytes metadata); + event ObjectDeleted(bytes key, bytes32 blobHash); + event ObjectMetadataUpdated(bytes key, bytes metadata); + + function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size) external; + function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size, uint64 ttl, KeyValue[] memory metadata, bool overwrite) external; + function deleteObject(string memory key) external; + function getObject(string memory key) external view returns (ObjectValue memory); + function queryObjects(string memory prefix, string memory delimiter, string memory startKey, uint64 limit) external view returns (Query memory); + function queryObjects(string memory prefix, string memory delimiter, string memory startKey) external view returns (Query memory); + function queryObjects(string memory prefix) external view returns (Query memory); + function queryObjects() external view returns (Query memory); + function queryObjects(string memory prefix, string memory delimiter) external view returns (Query memory); + function updateObjectMetadata(string memory key, KeyValue[] memory metadata) external; +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "addObject", + "inputs": [ + { + "name": "source", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "hash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "recoveryHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "addObject", + "inputs": [ + { + "name": "source", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "hash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "recoveryHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "ttl", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + }, + { + "name": "overwrite", + "type": "bool", + "internalType": "bool" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "deleteObject", + "inputs": [ + { + "name": "key", + "type": "string", + "internalType": "string" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "getObject", + "inputs": [ + { + "name": "key", + "type": "string", + "internalType": "string" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct ObjectValue", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "recoveryHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [ + { + "name": "prefix", + "type": "string", + "internalType": "string" + }, + { + "name": "delimiter", + "type": "string", + "internalType": "string" + }, + { + "name": "startKey", + "type": "string", + "internalType": "string" + }, + { + "name": "limit", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [ + { + "name": "prefix", + "type": "string", + "internalType": "string" + }, + { + "name": "delimiter", + "type": "string", + "internalType": "string" + }, + { + "name": "startKey", + "type": "string", + "internalType": "string" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [ + { + "name": "prefix", + "type": "string", + "internalType": "string" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [ + { + "name": "prefix", + "type": "string", + "internalType": "string" + }, + { + "name": "delimiter", + "type": "string", + "internalType": "string" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "updateObjectMetadata", + "inputs": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "ObjectAdded", + "inputs": [ + { + "name": "key", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + }, + { + "name": "blobHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "metadata", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ObjectDeleted", + "inputs": [ + { + "name": "key", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + }, + { + "name": "blobHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ObjectMetadataUpdated", + "inputs": [ + { + "name": "key", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + }, + { + "name": "metadata", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IBucketFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**```solidity + struct KeyValue { string key; string value; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct KeyValue { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub value: ::alloy_sol_types::private::String, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: KeyValue) -> Self { + (value.key, value.value) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for KeyValue { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + key: tuple.0, + value: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for KeyValue { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for KeyValue { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.value, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for KeyValue { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for KeyValue { + const NAME: &'static str = "KeyValue"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed("KeyValue(string key,string value)") + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.key, + ) + .0, + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.value, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for KeyValue { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.key, + ) + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.value, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.key, + out, + ); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.value, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Object { string key; ObjectState state; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Object { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub state: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String, ObjectState); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Object) -> Self { + (value.key, value.state) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Object { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + key: tuple.0, + state: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Object { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Object { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + ::tokenize(&self.state), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Object { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Object { + const NAME: &'static str = "Object"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed("Object(string key,ObjectState state)") + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.key, + ) + .0, + ::eip712_data_word( + &self.state, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Object { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.key, + ) + + ::topic_preimage_length( + &rust.state, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.key, + out, + ); + ::encode_topic_preimage( + &rust.state, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct ObjectState { bytes32 blobHash; uint64 size; uint64 expiry; KeyValue[] metadata; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct ObjectState { + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub expiry: u64, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + u64, + u64, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: ObjectState) -> Self { + (value.blobHash, value.size, value.expiry, value.metadata) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for ObjectState { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + blobHash: tuple.0, + size: tuple.1, + expiry: tuple.2, + metadata: tuple.3, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for ObjectState { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for ObjectState { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.expiry), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for ObjectState { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for ObjectState { + const NAME: &'static str = "ObjectState"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "ObjectState(bytes32 blobHash,uint64 size,uint64 expiry,KeyValue[] metadata)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.blobHash) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.size) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.expiry) + .0, + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::eip712_data_word(&self.metadata) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for ObjectState { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.blobHash, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.size) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.expiry, + ) + + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.metadata, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.blobHash, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.size, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.expiry, + out, + ); + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.metadata, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct ObjectValue { bytes32 blobHash; bytes32 recoveryHash; uint64 size; uint64 expiry; KeyValue[] metadata; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct ObjectValue { + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub recoveryHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub expiry: u64, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + u64, + u64, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: ObjectValue) -> Self { + ( + value.blobHash, + value.recoveryHash, + value.size, + value.expiry, + value.metadata, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for ObjectValue { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + blobHash: tuple.0, + recoveryHash: tuple.1, + size: tuple.2, + expiry: tuple.3, + metadata: tuple.4, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for ObjectValue { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for ObjectValue { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.recoveryHash), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.expiry), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for ObjectValue { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for ObjectValue { + const NAME: &'static str = "ObjectValue"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "ObjectValue(bytes32 blobHash,bytes32 recoveryHash,uint64 size,uint64 expiry,KeyValue[] metadata)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.blobHash) + .0, + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.recoveryHash) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.size) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.expiry) + .0, + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::eip712_data_word(&self.metadata) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for ObjectValue { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.blobHash, + ) + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.recoveryHash, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.size) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.expiry, + ) + + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.metadata, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.blobHash, + out, + ); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.recoveryHash, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.size, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.expiry, + out, + ); + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.metadata, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Query { Object[] objects; string[] commonPrefixes; string nextKey; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Query { + #[allow(missing_docs)] + pub objects: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub commonPrefixes: ::alloy_sol_types::private::Vec<::alloy_sol_types::private::String>, + #[allow(missing_docs)] + pub nextKey: ::alloy_sol_types::private::String, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::String>, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Vec<::RustType>, + ::alloy_sol_types::private::Vec<::alloy_sol_types::private::String>, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Query) -> Self { + (value.objects, value.commonPrefixes, value.nextKey) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Query { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + objects: tuple.0, + commonPrefixes: tuple.1, + nextKey: tuple.2, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Query { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Query { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Array< + Object, + > as alloy_sol_types::SolType>::tokenize(&self.objects), + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::String, + > as alloy_sol_types::SolType>::tokenize(&self.commonPrefixes), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.nextKey, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Query { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Query { + const NAME: &'static str = "Query"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Query(Object[] objects,string[] commonPrefixes,string nextKey)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Array< + Object, + > as alloy_sol_types::SolType>::eip712_data_word(&self.objects) + .0, + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::String, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.commonPrefixes, + ) + .0, + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.nextKey, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Query { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Array< + Object, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.objects, + ) + + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::String, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.commonPrefixes, + ) + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.nextKey, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Array< + Object, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.objects, + out, + ); + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::String, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.commonPrefixes, + out, + ); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.nextKey, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**Event with signature `ObjectAdded(bytes,bytes32,bytes)` and selector `0x3cf4a57a6c61242c0926d9fc09a382dba36a6e92628c777f1244c459b809793c`. + ```solidity + event ObjectAdded(bytes key, bytes32 blobHash, bytes metadata); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ObjectAdded { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::Bytes, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub metadata: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ObjectAdded { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Bytes, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ObjectAdded(bytes,bytes32,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 60u8, 244u8, 165u8, 122u8, 108u8, 97u8, 36u8, 44u8, 9u8, 38u8, 217u8, 252u8, + 9u8, 163u8, 130u8, 219u8, 163u8, 106u8, 110u8, 146u8, 98u8, 140u8, 119u8, + 127u8, 18u8, 68u8, 196u8, 89u8, 184u8, 9u8, 121u8, 60u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + key: data.0, + blobHash: data.1, + metadata: data.2, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.metadata, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ObjectAdded { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ObjectAdded> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ObjectAdded) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ObjectDeleted(bytes,bytes32)` and selector `0x712864228f369cc20045ca173aab7455af58fa9f6dba07491092c93d2cf7fb06`. + ```solidity + event ObjectDeleted(bytes key, bytes32 blobHash); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ObjectDeleted { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::Bytes, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ObjectDeleted { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ObjectDeleted(bytes,bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 113u8, 40u8, 100u8, 34u8, 143u8, 54u8, 156u8, 194u8, 0u8, 69u8, 202u8, 23u8, + 58u8, 171u8, 116u8, 85u8, 175u8, 88u8, 250u8, 159u8, 109u8, 186u8, 7u8, 73u8, + 16u8, 146u8, 201u8, 61u8, 44u8, 247u8, 251u8, 6u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + key: data.0, + blobHash: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ObjectDeleted { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ObjectDeleted> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ObjectDeleted) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ObjectMetadataUpdated(bytes,bytes)` and selector `0xa53f68921d8ba6356e423077a756ff2a282ae6de5d4ecc617da09b01ead5d640`. + ```solidity + event ObjectMetadataUpdated(bytes key, bytes metadata); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ObjectMetadataUpdated { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::Bytes, + #[allow(missing_docs)] + pub metadata: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ObjectMetadataUpdated { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::Bytes, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ObjectMetadataUpdated(bytes,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 165u8, 63u8, 104u8, 146u8, 29u8, 139u8, 166u8, 53u8, 110u8, 66u8, 48u8, 119u8, + 167u8, 86u8, 255u8, 42u8, 40u8, 42u8, 230u8, 222u8, 93u8, 78u8, 204u8, 97u8, + 125u8, 160u8, 155u8, 1u8, 234u8, 213u8, 214u8, 64u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + key: data.0, + metadata: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.metadata, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ObjectMetadataUpdated { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ObjectMetadataUpdated> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ObjectMetadataUpdated) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `addObject(bytes32,string,bytes32,bytes32,uint64)` and selector `0x2d6f2550`. + ```solidity + function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addObject_0Call { + #[allow(missing_docs)] + pub source: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub recoveryHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: u64, + } + ///Container type for the return parameters of the [`addObject(bytes32,string,bytes32,bytes32,uint64)`](addObject_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addObject_0Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addObject_0Call) -> Self { + ( + value.source, + value.key, + value.hash, + value.recoveryHash, + value.size, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addObject_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + source: tuple.0, + key: tuple.1, + hash: tuple.2, + recoveryHash: tuple.3, + size: tuple.4, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addObject_0Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addObject_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for addObject_0Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = addObject_0Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "addObject(bytes32,string,bytes32,bytes32,uint64)"; + const SELECTOR: [u8; 4] = [45u8, 111u8, 37u8, 80u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.source), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.recoveryHash), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `addObject(bytes32,string,bytes32,bytes32,uint64,uint64,(string,string)[],bool)` and selector `0x774343fe`. + ```solidity + function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size, uint64 ttl, KeyValue[] memory metadata, bool overwrite) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addObject_1Call { + #[allow(missing_docs)] + pub source: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub recoveryHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub ttl: u64, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub overwrite: bool, + } + ///Container type for the return parameters of the [`addObject(bytes32,string,bytes32,bytes32,uint64,uint64,(string,string)[],bool)`](addObject_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addObject_1Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Bool, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + u64, + u64, + ::alloy_sol_types::private::Vec<::RustType>, + bool, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addObject_1Call) -> Self { + ( + value.source, + value.key, + value.hash, + value.recoveryHash, + value.size, + value.ttl, + value.metadata, + value.overwrite, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addObject_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + source: tuple.0, + key: tuple.1, + hash: tuple.2, + recoveryHash: tuple.3, + size: tuple.4, + ttl: tuple.5, + metadata: tuple.6, + overwrite: tuple.7, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addObject_1Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addObject_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for addObject_1Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Bool, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = addObject_1Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = + "addObject(bytes32,string,bytes32,bytes32,uint64,uint64,(string,string)[],bool)"; + const SELECTOR: [u8; 4] = [119u8, 67u8, 67u8, 254u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.source), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.recoveryHash), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.ttl), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + <::alloy_sol_types::sol_data::Bool as alloy_sol_types::SolType>::tokenize( + &self.overwrite, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `deleteObject(string)` and selector `0x2d7cb600`. + ```solidity + function deleteObject(string memory key) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct deleteObjectCall { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`deleteObject(string)`](deleteObjectCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct deleteObjectReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deleteObjectCall) -> Self { + (value.key,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deleteObjectCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { key: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deleteObjectReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deleteObjectReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for deleteObjectCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::String,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = deleteObjectReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "deleteObject(string)"; + const SELECTOR: [u8; 4] = [45u8, 124u8, 182u8, 0u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getObject(string)` and selector `0x0153ea91`. + ```solidity + function getObject(string memory key) external view returns (ObjectValue memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getObjectCall { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`getObject(string)`](getObjectCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getObjectReturn { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getObjectCall) -> Self { + (value.key,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getObjectCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { key: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (ObjectValue,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getObjectReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getObjectReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getObjectCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::String,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getObjectReturn; + type ReturnTuple<'a> = (ObjectValue,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getObject(string)"; + const SELECTOR: [u8; 4] = [1u8, 83u8, 234u8, 145u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects(string,string,string,uint64)` and selector `0x17d352c0`. + ```solidity + function queryObjects(string memory prefix, string memory delimiter, string memory startKey, uint64 limit) external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_0Call { + #[allow(missing_docs)] + pub prefix: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub delimiter: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub startKey: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub limit: u64, + } + ///Container type for the return parameters of the [`queryObjects(string,string,string,uint64)`](queryObjects_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_0Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_0Call) -> Self { + (value.prefix, value.delimiter, value.startKey, value.limit) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + prefix: tuple.0, + delimiter: tuple.1, + startKey: tuple.2, + limit: tuple.3, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_0Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_0Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_0Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects(string,string,string,uint64)"; + const SELECTOR: [u8; 4] = [23u8, 211u8, 82u8, 192u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.prefix, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.delimiter, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.startKey, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.limit, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects(string,string,string)` and selector `0x4c53eab5`. + ```solidity + function queryObjects(string memory prefix, string memory delimiter, string memory startKey) external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_1Call { + #[allow(missing_docs)] + pub prefix: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub delimiter: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub startKey: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`queryObjects(string,string,string)`](queryObjects_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_1Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_1Call) -> Self { + (value.prefix, value.delimiter, value.startKey) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + prefix: tuple.0, + delimiter: tuple.1, + startKey: tuple.2, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_1Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_1Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_1Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects(string,string,string)"; + const SELECTOR: [u8; 4] = [76u8, 83u8, 234u8, 181u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.prefix, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.delimiter, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.startKey, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects(string)` and selector `0x6294e9a3`. + ```solidity + function queryObjects(string memory prefix) external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_2Call { + #[allow(missing_docs)] + pub prefix: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`queryObjects(string)`](queryObjects_2Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_2Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_2Call) -> Self { + (value.prefix,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_2Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { prefix: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_2Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_2Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_2Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::String,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_2Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects(string)"; + const SELECTOR: [u8; 4] = [98u8, 148u8, 233u8, 163u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.prefix, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects()` and selector `0xa443a83f`. + ```solidity + function queryObjects() external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_3Call {} + ///Container type for the return parameters of the [`queryObjects()`](queryObjects_3Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_3Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_3Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_3Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_3Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_3Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_3Call { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_3Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects()"; + const SELECTOR: [u8; 4] = [164u8, 67u8, 168u8, 63u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects(string,string)` and selector `0xc9aeef81`. + ```solidity + function queryObjects(string memory prefix, string memory delimiter) external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_4Call { + #[allow(missing_docs)] + pub prefix: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub delimiter: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`queryObjects(string,string)`](queryObjects_4Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_4Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_4Call) -> Self { + (value.prefix, value.delimiter) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_4Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + prefix: tuple.0, + delimiter: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_4Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_4Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_4Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_4Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects(string,string)"; + const SELECTOR: [u8; 4] = [201u8, 174u8, 239u8, 129u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.prefix, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.delimiter, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `updateObjectMetadata(string,(string,string)[])` and selector `0x6f0a4ff4`. + ```solidity + function updateObjectMetadata(string memory key, KeyValue[] memory metadata) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct updateObjectMetadataCall { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + ///Container type for the return parameters of the [`updateObjectMetadata(string,(string,string)[])`](updateObjectMetadataCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct updateObjectMetadataReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: updateObjectMetadataCall) -> Self { + (value.key, value.metadata) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for updateObjectMetadataCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + key: tuple.0, + metadata: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: updateObjectMetadataReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for updateObjectMetadataReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for updateObjectMetadataCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Array, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = updateObjectMetadataReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "updateObjectMetadata(string,(string,string)[])"; + const SELECTOR: [u8; 4] = [111u8, 10u8, 79u8, 244u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`IBucketFacade`](self) function calls. + pub enum IBucketFacadeCalls { + #[allow(missing_docs)] + addObject_0(addObject_0Call), + #[allow(missing_docs)] + addObject_1(addObject_1Call), + #[allow(missing_docs)] + deleteObject(deleteObjectCall), + #[allow(missing_docs)] + getObject(getObjectCall), + #[allow(missing_docs)] + queryObjects_0(queryObjects_0Call), + #[allow(missing_docs)] + queryObjects_1(queryObjects_1Call), + #[allow(missing_docs)] + queryObjects_2(queryObjects_2Call), + #[allow(missing_docs)] + queryObjects_3(queryObjects_3Call), + #[allow(missing_docs)] + queryObjects_4(queryObjects_4Call), + #[allow(missing_docs)] + updateObjectMetadata(updateObjectMetadataCall), + } + #[automatically_derived] + impl IBucketFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [1u8, 83u8, 234u8, 145u8], + [23u8, 211u8, 82u8, 192u8], + [45u8, 111u8, 37u8, 80u8], + [45u8, 124u8, 182u8, 0u8], + [76u8, 83u8, 234u8, 181u8], + [98u8, 148u8, 233u8, 163u8], + [111u8, 10u8, 79u8, 244u8], + [119u8, 67u8, 67u8, 254u8], + [164u8, 67u8, 168u8, 63u8], + [201u8, 174u8, 239u8, 129u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for IBucketFacadeCalls { + const NAME: &'static str = "IBucketFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 10usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::addObject_0(_) => ::SELECTOR, + Self::addObject_1(_) => ::SELECTOR, + Self::deleteObject(_) => ::SELECTOR, + Self::getObject(_) => ::SELECTOR, + Self::queryObjects_0(_) => { + ::SELECTOR + } + Self::queryObjects_1(_) => { + ::SELECTOR + } + Self::queryObjects_2(_) => { + ::SELECTOR + } + Self::queryObjects_3(_) => { + ::SELECTOR + } + Self::queryObjects_4(_) => { + ::SELECTOR + } + Self::updateObjectMetadata(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn getObject( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBucketFacadeCalls::getObject) + } + getObject + }, + { + fn queryObjects_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_0) + } + queryObjects_0 + }, + { + fn addObject_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::addObject_0) + } + addObject_0 + }, + { + fn deleteObject( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::deleteObject) + } + deleteObject + }, + { + fn queryObjects_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_1) + } + queryObjects_1 + }, + { + fn queryObjects_2( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_2) + } + queryObjects_2 + }, + { + fn updateObjectMetadata( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::updateObjectMetadata) + } + updateObjectMetadata + }, + { + fn addObject_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::addObject_1) + } + addObject_1 + }, + { + fn queryObjects_3( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_3) + } + queryObjects_3 + }, + { + fn queryObjects_4( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_4) + } + queryObjects_4 + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::addObject_0(inner) => { + ::abi_encoded_size(inner) + } + Self::addObject_1(inner) => { + ::abi_encoded_size(inner) + } + Self::deleteObject(inner) => { + ::abi_encoded_size(inner) + } + Self::getObject(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_0(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_1(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_2(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_3(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_4(inner) => { + ::abi_encoded_size(inner) + } + Self::updateObjectMetadata(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::addObject_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::addObject_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::deleteObject(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getObject(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_2(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_3(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_4(inner) => { + ::abi_encode_raw(inner, out) + } + Self::updateObjectMetadata(inner) => { + ::abi_encode_raw( + inner, out, + ) + } + } + } + } + ///Container for all the [`IBucketFacade`](self) events. + pub enum IBucketFacadeEvents { + #[allow(missing_docs)] + ObjectAdded(ObjectAdded), + #[allow(missing_docs)] + ObjectDeleted(ObjectDeleted), + #[allow(missing_docs)] + ObjectMetadataUpdated(ObjectMetadataUpdated), + } + #[automatically_derived] + impl IBucketFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 60u8, 244u8, 165u8, 122u8, 108u8, 97u8, 36u8, 44u8, 9u8, 38u8, 217u8, 252u8, 9u8, + 163u8, 130u8, 219u8, 163u8, 106u8, 110u8, 146u8, 98u8, 140u8, 119u8, 127u8, 18u8, + 68u8, 196u8, 89u8, 184u8, 9u8, 121u8, 60u8, + ], + [ + 113u8, 40u8, 100u8, 34u8, 143u8, 54u8, 156u8, 194u8, 0u8, 69u8, 202u8, 23u8, 58u8, + 171u8, 116u8, 85u8, 175u8, 88u8, 250u8, 159u8, 109u8, 186u8, 7u8, 73u8, 16u8, + 146u8, 201u8, 61u8, 44u8, 247u8, 251u8, 6u8, + ], + [ + 165u8, 63u8, 104u8, 146u8, 29u8, 139u8, 166u8, 53u8, 110u8, 66u8, 48u8, 119u8, + 167u8, 86u8, 255u8, 42u8, 40u8, 42u8, 230u8, 222u8, 93u8, 78u8, 204u8, 97u8, 125u8, + 160u8, 155u8, 1u8, 234u8, 213u8, 214u8, 64u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IBucketFacadeEvents { + const NAME: &'static str = "IBucketFacadeEvents"; + const COUNT: usize = 3usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ObjectAdded) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ObjectDeleted) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ObjectMetadataUpdated) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IBucketFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::ObjectAdded(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ObjectDeleted(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ObjectMetadataUpdated(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::ObjectAdded(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ObjectDeleted(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ObjectMetadataUpdated(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/recall-contracts/crates/facade/src/bucket_facade/mod.rs b/recall-contracts/crates/facade/src/bucket_facade/mod.rs new file mode 100644 index 0000000000..f770fc93b6 --- /dev/null +++ b/recall-contracts/crates/facade/src/bucket_facade/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#ibucketfacade; diff --git a/recall-contracts/crates/facade/src/config_facade/iconfigfacade.rs b/recall-contracts/crates/facade/src/config_facade/iconfigfacade.rs new file mode 100644 index 0000000000..246a8a4f00 --- /dev/null +++ b/recall-contracts/crates/facade/src/config_facade/iconfigfacade.rs @@ -0,0 +1,432 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface IConfigFacade { + event ConfigAdminSet(address admin); + event ConfigSet(uint256 blobCapacity, uint256 tokenCreditRate, uint256 blobCreditDebitInterval, uint256 blobMinTtl, uint256 blobDefaultTtl, uint256 blobDeleteBatchSize, uint256 accountDebitBatchSize); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "event", + "name": "ConfigAdminSet", + "inputs": [ + { + "name": "admin", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ConfigSet", + "inputs": [ + { + "name": "blobCapacity", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "tokenCreditRate", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "blobCreditDebitInterval", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "blobMinTtl", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "blobDefaultTtl", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "blobDeleteBatchSize", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "accountDebitBatchSize", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IConfigFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**Event with signature `ConfigAdminSet(address)` and selector `0x17e2ccbcd78b64c943d403837b55290b3de8fd19c8df1c0ab9cf665b934292d4`. + ```solidity + event ConfigAdminSet(address admin); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ConfigAdminSet { + #[allow(missing_docs)] + pub admin: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ConfigAdminSet { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ConfigAdminSet(address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 23u8, 226u8, 204u8, 188u8, 215u8, 139u8, 100u8, 201u8, 67u8, 212u8, 3u8, 131u8, + 123u8, 85u8, 41u8, 11u8, 61u8, 232u8, 253u8, 25u8, 200u8, 223u8, 28u8, 10u8, + 185u8, 207u8, 102u8, 91u8, 147u8, 66u8, 146u8, 212u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { admin: data.0 } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.admin, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ConfigAdminSet { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ConfigAdminSet> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ConfigAdminSet) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ConfigSet(uint256,uint256,uint256,uint256,uint256,uint256,uint256)` and selector `0x3e8ad89b763b9839647a482aef0ebd06350b9fe255fd58263b81888ff1717488`. + ```solidity + event ConfigSet(uint256 blobCapacity, uint256 tokenCreditRate, uint256 blobCreditDebitInterval, uint256 blobMinTtl, uint256 blobDefaultTtl, uint256 blobDeleteBatchSize, uint256 accountDebitBatchSize); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ConfigSet { + #[allow(missing_docs)] + pub blobCapacity: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub tokenCreditRate: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub blobCreditDebitInterval: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub blobMinTtl: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub blobDefaultTtl: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub blobDeleteBatchSize: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub accountDebitBatchSize: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ConfigSet { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = + "ConfigSet(uint256,uint256,uint256,uint256,uint256,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 62u8, 138u8, 216u8, 155u8, 118u8, 59u8, 152u8, 57u8, 100u8, 122u8, 72u8, 42u8, + 239u8, 14u8, 189u8, 6u8, 53u8, 11u8, 159u8, 226u8, 85u8, 253u8, 88u8, 38u8, + 59u8, 129u8, 136u8, 143u8, 241u8, 113u8, 116u8, 136u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + blobCapacity: data.0, + tokenCreditRate: data.1, + blobCreditDebitInterval: data.2, + blobMinTtl: data.3, + blobDefaultTtl: data.4, + blobDeleteBatchSize: data.5, + accountDebitBatchSize: data.6, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobCapacity, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.tokenCreditRate, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobCreditDebitInterval, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobMinTtl, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobDefaultTtl, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobDeleteBatchSize, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.accountDebitBatchSize, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ConfigSet { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ConfigSet> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ConfigSet) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + ///Container for all the [`IConfigFacade`](self) events. + pub enum IConfigFacadeEvents { + #[allow(missing_docs)] + ConfigAdminSet(ConfigAdminSet), + #[allow(missing_docs)] + ConfigSet(ConfigSet), + } + #[automatically_derived] + impl IConfigFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 23u8, 226u8, 204u8, 188u8, 215u8, 139u8, 100u8, 201u8, 67u8, 212u8, 3u8, 131u8, + 123u8, 85u8, 41u8, 11u8, 61u8, 232u8, 253u8, 25u8, 200u8, 223u8, 28u8, 10u8, 185u8, + 207u8, 102u8, 91u8, 147u8, 66u8, 146u8, 212u8, + ], + [ + 62u8, 138u8, 216u8, 155u8, 118u8, 59u8, 152u8, 57u8, 100u8, 122u8, 72u8, 42u8, + 239u8, 14u8, 189u8, 6u8, 53u8, 11u8, 159u8, 226u8, 85u8, 253u8, 88u8, 38u8, 59u8, + 129u8, 136u8, 143u8, 241u8, 113u8, 116u8, 136u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IConfigFacadeEvents { + const NAME: &'static str = "IConfigFacadeEvents"; + const COUNT: usize = 2usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ConfigAdminSet) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log(topics, data, validate) + .map(Self::ConfigSet) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IConfigFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::ConfigAdminSet(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ConfigSet(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::ConfigAdminSet(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ConfigSet(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/recall-contracts/crates/facade/src/config_facade/mod.rs b/recall-contracts/crates/facade/src/config_facade/mod.rs new file mode 100644 index 0000000000..0014806afc --- /dev/null +++ b/recall-contracts/crates/facade/src/config_facade/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#iconfigfacade; diff --git a/recall-contracts/crates/facade/src/credit_facade/icreditfacade.rs b/recall-contracts/crates/facade/src/credit_facade/icreditfacade.rs new file mode 100644 index 0000000000..b59ba0660e --- /dev/null +++ b/recall-contracts/crates/facade/src/credit_facade/icreditfacade.rs @@ -0,0 +1,3761 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface ICreditFacade { + type TtlStatus is uint8; + struct Account { + uint64 capacityUsed; + uint256 creditFree; + uint256 creditCommitted; + address creditSponsor; + uint64 lastDebitEpoch; + Approval[] approvalsTo; + Approval[] approvalsFrom; + uint64 maxTtl; + uint256 gasAllowance; + } + struct Approval { + address addr; + CreditApproval approval; + } + struct CreditApproval { + uint256 creditLimit; + uint256 gasFeeLimit; + uint64 expiry; + uint256 creditUsed; + uint256 gasFeeUsed; + } + + event CreditApproved(address from, address to, uint256 creditLimit, uint256 gasFeeLimit, uint256 expiry); + event CreditDebited(uint256 amount, uint256 numAccounts, bool moreAccounts); + event CreditPurchased(address from, uint256 amount); + event CreditRevoked(address from, address to); + + function approveCredit(address to) external; + function approveCredit(address to, address[] memory caller, uint256 creditLimit, uint256 gasFeeLimit, uint64 ttl) external; + function approveCredit(address to, address[] memory caller) external; + function buyCredit() external payable; + function buyCredit(address recipient) external payable; + function getAccount(address addr) external view returns (Account memory account); + function getCreditApproval(address from, address to) external view returns (CreditApproval memory approval); + function revokeCredit(address to, address caller) external; + function revokeCredit(address to) external; + function setAccountSponsor(address sponsor) external; + function setAccountStatus(address subscriber, TtlStatus ttlStatus) external; +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "approveCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "approveCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "caller", + "type": "address[]", + "internalType": "address[]" + }, + { + "name": "creditLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "ttl", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "approveCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "caller", + "type": "address[]", + "internalType": "address[]" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "buyCredit", + "inputs": [], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "function", + "name": "buyCredit", + "inputs": [ + { + "name": "recipient", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "function", + "name": "getAccount", + "inputs": [ + { + "name": "addr", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "account", + "type": "tuple", + "internalType": "struct Account", + "components": [ + { + "name": "capacityUsed", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditFree", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "creditCommitted", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "creditSponsor", + "type": "address", + "internalType": "address" + }, + { + "name": "lastDebitEpoch", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "approvalsTo", + "type": "tuple[]", + "internalType": "struct Approval[]", + "components": [ + { + "name": "addr", + "type": "address", + "internalType": "address" + }, + { + "name": "approval", + "type": "tuple", + "internalType": "struct CreditApproval", + "components": [ + { + "name": "creditLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditUsed", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeUsed", + "type": "uint256", + "internalType": "uint256" + } + ] + } + ] + }, + { + "name": "approvalsFrom", + "type": "tuple[]", + "internalType": "struct Approval[]", + "components": [ + { + "name": "addr", + "type": "address", + "internalType": "address" + }, + { + "name": "approval", + "type": "tuple", + "internalType": "struct CreditApproval", + "components": [ + { + "name": "creditLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditUsed", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeUsed", + "type": "uint256", + "internalType": "uint256" + } + ] + } + ] + }, + { + "name": "maxTtl", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasAllowance", + "type": "uint256", + "internalType": "uint256" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getCreditApproval", + "inputs": [ + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "approval", + "type": "tuple", + "internalType": "struct CreditApproval", + "components": [ + { + "name": "creditLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditUsed", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeUsed", + "type": "uint256", + "internalType": "uint256" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "revokeCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "caller", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "revokeCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "setAccountSponsor", + "inputs": [ + { + "name": "sponsor", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "setAccountStatus", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "internalType": "address" + }, + { + "name": "ttlStatus", + "type": "uint8", + "internalType": "enum TtlStatus" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "CreditApproved", + "inputs": [ + { + "name": "from", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "creditLimit", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "CreditDebited", + "inputs": [ + { + "name": "amount", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "numAccounts", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "moreAccounts", + "type": "bool", + "indexed": false, + "internalType": "bool" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "CreditPurchased", + "inputs": [ + { + "name": "from", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "amount", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "CreditRevoked", + "inputs": [ + { + "name": "from", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod ICreditFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct TtlStatus(u8); + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for u8 { + #[inline] + fn stv_to_tokens( + &self, + ) -> <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'_> + { + alloy_sol_types::private::SolTypeValue::< + ::alloy_sol_types::sol_data::Uint<8>, + >::stv_to_tokens(self) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::tokenize(self).0 + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::abi_encode_packed_to(self, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::abi_encoded_size( + self, + ) + } + } + #[automatically_derived] + impl TtlStatus { + /// The Solidity type name. + pub const NAME: &'static str = stringify!(@ name); + /// Convert from the underlying value type. + #[inline] + pub const fn from(value: u8) -> Self { + Self(value) + } + /// Return the underlying value. + #[inline] + pub const fn into(self) -> u8 { + self.0 + } + /// Return the single encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode(&self) -> alloy_sol_types::private::Vec { + ::abi_encode(&self.0) + } + /// Return the packed encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode_packed(&self) -> alloy_sol_types::private::Vec { + ::abi_encode_packed(&self.0) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for TtlStatus { + type RustType = u8; + type Token<'a> = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = Self::NAME; + const ENCODED_SIZE: Option = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + Self::type_check(token).is_ok() + } + #[inline] + fn type_check(token: &Self::Token<'_>) -> alloy_sol_types::Result<()> { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::type_check( + token, + ) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::detokenize( + token, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for TtlStatus { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::topic_preimage_length(rust) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(rust, out) + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::EventTopic>::encode_topic( + rust, + ) + } + } + }; + /**```solidity + struct Account { uint64 capacityUsed; uint256 creditFree; uint256 creditCommitted; address creditSponsor; uint64 lastDebitEpoch; Approval[] approvalsTo; Approval[] approvalsFrom; uint64 maxTtl; uint256 gasAllowance; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Account { + #[allow(missing_docs)] + pub capacityUsed: u64, + #[allow(missing_docs)] + pub creditFree: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub creditCommitted: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub creditSponsor: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub lastDebitEpoch: u64, + #[allow(missing_docs)] + pub approvalsTo: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub approvalsFrom: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub maxTtl: u64, + #[allow(missing_docs)] + pub gasAllowance: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + u64, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::Address, + u64, + ::alloy_sol_types::private::Vec<::RustType>, + ::alloy_sol_types::private::Vec<::RustType>, + u64, + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Account) -> Self { + ( + value.capacityUsed, + value.creditFree, + value.creditCommitted, + value.creditSponsor, + value.lastDebitEpoch, + value.approvalsTo, + value.approvalsFrom, + value.maxTtl, + value.gasAllowance, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Account { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + capacityUsed: tuple.0, + creditFree: tuple.1, + creditCommitted: tuple.2, + creditSponsor: tuple.3, + lastDebitEpoch: tuple.4, + approvalsTo: tuple.5, + approvalsFrom: tuple.6, + maxTtl: tuple.7, + gasAllowance: tuple.8, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Account { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Account { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.capacityUsed), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.creditFree), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.creditCommitted), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.creditSponsor, + ), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.lastDebitEpoch), + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::SolType>::tokenize(&self.approvalsTo), + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::SolType>::tokenize(&self.approvalsFrom), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.maxTtl), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.gasAllowance), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Account { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Account { + const NAME: &'static str = "Account"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Account(uint64 capacityUsed,uint256 creditFree,uint256 creditCommitted,address creditSponsor,uint64 lastDebitEpoch,Approval[] approvalsTo,Approval[] approvalsFrom,uint64 maxTtl,uint256 gasAllowance)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(2); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.capacityUsed) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditFree) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.creditCommitted, + ) + .0, + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::eip712_data_word( + &self.creditSponsor, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.lastDebitEpoch, + ) + .0, + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::SolType>::eip712_data_word(&self.approvalsTo) + .0, + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::SolType>::eip712_data_word(&self.approvalsFrom) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.maxTtl) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.gasAllowance) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Account { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.capacityUsed, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditFree, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditCommitted, + ) + + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditSponsor, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.lastDebitEpoch, + ) + + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.approvalsTo, + ) + + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.approvalsFrom, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.maxTtl, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.gasAllowance, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.capacityUsed, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditFree, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditCommitted, + out, + ); + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditSponsor, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.lastDebitEpoch, + out, + ); + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.approvalsTo, + out, + ); + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.approvalsFrom, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.maxTtl, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.gasAllowance, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Approval { address addr; CreditApproval approval; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Approval { + #[allow(missing_docs)] + pub addr: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub approval: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address, CreditApproval); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Approval) -> Self { + (value.addr, value.approval) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Approval { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + addr: tuple.0, + approval: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Approval { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Approval { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.addr, + ), + ::tokenize(&self.approval), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Approval { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Approval { + const NAME: &'static str = "Approval"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Approval(address addr,CreditApproval approval)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components + .extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::eip712_data_word( + &self.addr, + ) + .0, + ::eip712_data_word( + &self.approval, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Approval { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.addr, + ) + + ::topic_preimage_length( + &rust.approval, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.addr, + out, + ); + ::encode_topic_preimage( + &rust.approval, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct CreditApproval { uint256 creditLimit; uint256 gasFeeLimit; uint64 expiry; uint256 creditUsed; uint256 gasFeeUsed; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct CreditApproval { + #[allow(missing_docs)] + pub creditLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub gasFeeLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub expiry: u64, + #[allow(missing_docs)] + pub creditUsed: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub gasFeeUsed: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + u64, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: CreditApproval) -> Self { + ( + value.creditLimit, + value.gasFeeLimit, + value.expiry, + value.creditUsed, + value.gasFeeUsed, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for CreditApproval { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + creditLimit: tuple.0, + gasFeeLimit: tuple.1, + expiry: tuple.2, + creditUsed: tuple.3, + gasFeeUsed: tuple.4, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for CreditApproval { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for CreditApproval { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditLimit, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.gasFeeLimit, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.expiry, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditUsed, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.gasFeeUsed, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for CreditApproval { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for CreditApproval { + const NAME: &'static str = "CreditApproval"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "CreditApproval(uint256 creditLimit,uint256 gasFeeLimit,uint64 expiry,uint256 creditUsed,uint256 gasFeeUsed)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditLimit) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.gasFeeLimit) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.expiry) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditUsed) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.gasFeeUsed) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for CreditApproval { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditLimit, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.gasFeeLimit, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.expiry, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditUsed, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.gasFeeUsed, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditLimit, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.gasFeeLimit, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.expiry, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditUsed, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.gasFeeUsed, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**Event with signature `CreditApproved(address,address,uint256,uint256,uint256)` and selector `0xc69709e6f767dad7ccb19c605c3c602bf482ecb426059d7cdb5e5737d05b22f8`. + ```solidity + event CreditApproved(address from, address to, uint256 creditLimit, uint256 gasFeeLimit, uint256 expiry); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct CreditApproved { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub creditLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub gasFeeLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub expiry: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for CreditApproved { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = + "CreditApproved(address,address,uint256,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 198u8, 151u8, 9u8, 230u8, 247u8, 103u8, 218u8, 215u8, 204u8, 177u8, 156u8, + 96u8, 92u8, 60u8, 96u8, 43u8, 244u8, 130u8, 236u8, 180u8, 38u8, 5u8, 157u8, + 124u8, 219u8, 94u8, 87u8, 55u8, 208u8, 91u8, 34u8, 248u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + from: data.0, + to: data.1, + creditLimit: data.2, + gasFeeLimit: data.3, + expiry: data.4, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditLimit, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.gasFeeLimit, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.expiry, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for CreditApproved { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&CreditApproved> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &CreditApproved) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `CreditDebited(uint256,uint256,bool)` and selector `0x5cc1b5286143c9d1f8e1c090b5d7302388ab94fb45b1e18e63d8b08ef8c0f7c3`. + ```solidity + event CreditDebited(uint256 amount, uint256 numAccounts, bool moreAccounts); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct CreditDebited { + #[allow(missing_docs)] + pub amount: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub numAccounts: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub moreAccounts: bool, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for CreditDebited { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Bool, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "CreditDebited(uint256,uint256,bool)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 92u8, 193u8, 181u8, 40u8, 97u8, 67u8, 201u8, 209u8, 248u8, 225u8, 192u8, 144u8, + 181u8, 215u8, 48u8, 35u8, 136u8, 171u8, 148u8, 251u8, 69u8, 177u8, 225u8, + 142u8, 99u8, 216u8, 176u8, 142u8, 248u8, 192u8, 247u8, 195u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + amount: data.0, + numAccounts: data.1, + moreAccounts: data.2, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.amount, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.numAccounts, + ), + <::alloy_sol_types::sol_data::Bool as alloy_sol_types::SolType>::tokenize( + &self.moreAccounts, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for CreditDebited { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&CreditDebited> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &CreditDebited) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `CreditPurchased(address,uint256)` and selector `0xacf2bdc99696da35cbfe300e8b7d3d337ffc9918d8547c58ef8b58a20ec075df`. + ```solidity + event CreditPurchased(address from, uint256 amount); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct CreditPurchased { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub amount: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for CreditPurchased { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "CreditPurchased(address,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 172u8, 242u8, 189u8, 201u8, 150u8, 150u8, 218u8, 53u8, 203u8, 254u8, 48u8, + 14u8, 139u8, 125u8, 61u8, 51u8, 127u8, 252u8, 153u8, 24u8, 216u8, 84u8, 124u8, + 88u8, 239u8, 139u8, 88u8, 162u8, 14u8, 192u8, 117u8, 223u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + from: data.0, + amount: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.amount, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for CreditPurchased { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&CreditPurchased> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &CreditPurchased) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `CreditRevoked(address,address)` and selector `0xe63d1a905c0cbc7f25c8f71af5ecb744b771b20f954f39e1654d4d838f93b89e`. + ```solidity + event CreditRevoked(address from, address to); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct CreditRevoked { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for CreditRevoked { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "CreditRevoked(address,address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 230u8, 61u8, 26u8, 144u8, 92u8, 12u8, 188u8, 127u8, 37u8, 200u8, 247u8, 26u8, + 245u8, 236u8, 183u8, 68u8, 183u8, 113u8, 178u8, 15u8, 149u8, 79u8, 57u8, 225u8, + 101u8, 77u8, 77u8, 131u8, 143u8, 147u8, 184u8, 158u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + from: data.0, + to: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for CreditRevoked { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&CreditRevoked> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &CreditRevoked) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `approveCredit(address)` and selector `0x01e98bfa`. + ```solidity + function approveCredit(address to) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_0Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`approveCredit(address)`](approveCredit_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_0Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_0Call) -> Self { + (value.to,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { to: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_0Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for approveCredit_0Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = approveCredit_0Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "approveCredit(address)"; + const SELECTOR: [u8; 4] = [1u8, 233u8, 139u8, 250u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `approveCredit(address,address[],uint256,uint256,uint64)` and selector `0x112b6517`. + ```solidity + function approveCredit(address to, address[] memory caller, uint256 creditLimit, uint256 gasFeeLimit, uint64 ttl) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_1Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub caller: ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Address>, + #[allow(missing_docs)] + pub creditLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub gasFeeLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub ttl: u64, + } + ///Container type for the return parameters of the [`approveCredit(address,address[],uint256,uint256,uint64)`](approveCredit_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_1Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Address>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Address>, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_1Call) -> Self { + ( + value.to, + value.caller, + value.creditLimit, + value.gasFeeLimit, + value.ttl, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + to: tuple.0, + caller: tuple.1, + creditLimit: tuple.2, + gasFeeLimit: tuple.3, + ttl: tuple.4, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_1Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for approveCredit_1Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Address>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = approveCredit_1Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = + "approveCredit(address,address[],uint256,uint256,uint64)"; + const SELECTOR: [u8; 4] = [17u8, 43u8, 101u8, 23u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::Address, + > as alloy_sol_types::SolType>::tokenize(&self.caller), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.creditLimit), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.gasFeeLimit), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.ttl), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `approveCredit(address,address[])` and selector `0xa0aa2b65`. + ```solidity + function approveCredit(address to, address[] memory caller) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_2Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub caller: ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Address>, + } + ///Container type for the return parameters of the [`approveCredit(address,address[])`](approveCredit_2Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_2Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Address>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Address>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_2Call) -> Self { + (value.to, value.caller) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_2Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + to: tuple.0, + caller: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_2Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_2Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for approveCredit_2Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Address>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = approveCredit_2Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "approveCredit(address,address[])"; + const SELECTOR: [u8; 4] = [160u8, 170u8, 43u8, 101u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::Address, + > as alloy_sol_types::SolType>::tokenize(&self.caller), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `buyCredit()` and selector `0x8e4e6f06`. + ```solidity + function buyCredit() external payable; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct buyCredit_0Call {} + ///Container type for the return parameters of the [`buyCredit()`](buyCredit_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct buyCredit_0Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: buyCredit_0Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for buyCredit_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: buyCredit_0Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for buyCredit_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for buyCredit_0Call { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = buyCredit_0Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "buyCredit()"; + const SELECTOR: [u8; 4] = [142u8, 78u8, 111u8, 6u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `buyCredit(address)` and selector `0xa38eae9f`. + ```solidity + function buyCredit(address recipient) external payable; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct buyCredit_1Call { + #[allow(missing_docs)] + pub recipient: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`buyCredit(address)`](buyCredit_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct buyCredit_1Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: buyCredit_1Call) -> Self { + (value.recipient,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for buyCredit_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { recipient: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: buyCredit_1Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for buyCredit_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for buyCredit_1Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = buyCredit_1Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "buyCredit(address)"; + const SELECTOR: [u8; 4] = [163u8, 142u8, 174u8, 159u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.recipient, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getAccount(address)` and selector `0xfbcbc0f1`. + ```solidity + function getAccount(address addr) external view returns (Account memory account); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getAccountCall { + #[allow(missing_docs)] + pub addr: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`getAccount(address)`](getAccountCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getAccountReturn { + #[allow(missing_docs)] + pub account: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getAccountCall) -> Self { + (value.addr,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getAccountCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { addr: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Account,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getAccountReturn) -> Self { + (value.account,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getAccountReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { account: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getAccountCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getAccountReturn; + type ReturnTuple<'a> = (Account,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getAccount(address)"; + const SELECTOR: [u8; 4] = [251u8, 203u8, 192u8, 241u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.addr, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getCreditApproval(address,address)` and selector `0xcd9be80f`. + ```solidity + function getCreditApproval(address from, address to) external view returns (CreditApproval memory approval); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getCreditApprovalCall { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`getCreditApproval(address,address)`](getCreditApprovalCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getCreditApprovalReturn { + #[allow(missing_docs)] + pub approval: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Address, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getCreditApprovalCall) -> Self { + (value.from, value.to) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getCreditApprovalCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + from: tuple.0, + to: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (CreditApproval,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = + (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getCreditApprovalReturn) -> Self { + (value.approval,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getCreditApprovalReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { approval: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getCreditApprovalCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getCreditApprovalReturn; + type ReturnTuple<'a> = (CreditApproval,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getCreditApproval(address,address)"; + const SELECTOR: [u8; 4] = [205u8, 155u8, 232u8, 15u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `revokeCredit(address,address)` and selector `0xa84a1535`. + ```solidity + function revokeCredit(address to, address caller) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct revokeCredit_0Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub caller: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`revokeCredit(address,address)`](revokeCredit_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct revokeCredit_0Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Address, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: revokeCredit_0Call) -> Self { + (value.to, value.caller) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for revokeCredit_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + to: tuple.0, + caller: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: revokeCredit_0Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for revokeCredit_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for revokeCredit_0Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = revokeCredit_0Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "revokeCredit(address,address)"; + const SELECTOR: [u8; 4] = [168u8, 74u8, 21u8, 53u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.caller, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `revokeCredit(address)` and selector `0xa8ef8caf`. + ```solidity + function revokeCredit(address to) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct revokeCredit_1Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`revokeCredit(address)`](revokeCredit_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct revokeCredit_1Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: revokeCredit_1Call) -> Self { + (value.to,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for revokeCredit_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { to: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: revokeCredit_1Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for revokeCredit_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for revokeCredit_1Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = revokeCredit_1Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "revokeCredit(address)"; + const SELECTOR: [u8; 4] = [168u8, 239u8, 140u8, 175u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `setAccountSponsor(address)` and selector `0x8e0948b6`. + ```solidity + function setAccountSponsor(address sponsor) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct setAccountSponsorCall { + #[allow(missing_docs)] + pub sponsor: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`setAccountSponsor(address)`](setAccountSponsorCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct setAccountSponsorReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: setAccountSponsorCall) -> Self { + (value.sponsor,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for setAccountSponsorCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { sponsor: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: setAccountSponsorReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for setAccountSponsorReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for setAccountSponsorCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = setAccountSponsorReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "setAccountSponsor(address)"; + const SELECTOR: [u8; 4] = [142u8, 9u8, 72u8, 182u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.sponsor, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `setAccountStatus(address,uint8)` and selector `0x0ad2b0a1`. + ```solidity + function setAccountStatus(address subscriber, TtlStatus ttlStatus) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct setAccountStatusCall { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub ttlStatus: ::RustType, + } + ///Container type for the return parameters of the [`setAccountStatus(address,uint8)`](setAccountStatusCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct setAccountStatusReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address, TtlStatus); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: setAccountStatusCall) -> Self { + (value.subscriber, value.ttlStatus) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for setAccountStatusCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + subscriber: tuple.0, + ttlStatus: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: setAccountStatusReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for setAccountStatusReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for setAccountStatusCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address, TtlStatus); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = setAccountStatusReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "setAccountStatus(address,uint8)"; + const SELECTOR: [u8; 4] = [10u8, 210u8, 176u8, 161u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.subscriber, + ), + ::tokenize(&self.ttlStatus), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`ICreditFacade`](self) function calls. + pub enum ICreditFacadeCalls { + #[allow(missing_docs)] + approveCredit_0(approveCredit_0Call), + #[allow(missing_docs)] + approveCredit_1(approveCredit_1Call), + #[allow(missing_docs)] + approveCredit_2(approveCredit_2Call), + #[allow(missing_docs)] + buyCredit_0(buyCredit_0Call), + #[allow(missing_docs)] + buyCredit_1(buyCredit_1Call), + #[allow(missing_docs)] + getAccount(getAccountCall), + #[allow(missing_docs)] + getCreditApproval(getCreditApprovalCall), + #[allow(missing_docs)] + revokeCredit_0(revokeCredit_0Call), + #[allow(missing_docs)] + revokeCredit_1(revokeCredit_1Call), + #[allow(missing_docs)] + setAccountSponsor(setAccountSponsorCall), + #[allow(missing_docs)] + setAccountStatus(setAccountStatusCall), + } + #[automatically_derived] + impl ICreditFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [1u8, 233u8, 139u8, 250u8], + [10u8, 210u8, 176u8, 161u8], + [17u8, 43u8, 101u8, 23u8], + [142u8, 9u8, 72u8, 182u8], + [142u8, 78u8, 111u8, 6u8], + [160u8, 170u8, 43u8, 101u8], + [163u8, 142u8, 174u8, 159u8], + [168u8, 74u8, 21u8, 53u8], + [168u8, 239u8, 140u8, 175u8], + [205u8, 155u8, 232u8, 15u8], + [251u8, 203u8, 192u8, 241u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for ICreditFacadeCalls { + const NAME: &'static str = "ICreditFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 11usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::approveCredit_0(_) => { + ::SELECTOR + } + Self::approveCredit_1(_) => { + ::SELECTOR + } + Self::approveCredit_2(_) => { + ::SELECTOR + } + Self::buyCredit_0(_) => ::SELECTOR, + Self::buyCredit_1(_) => ::SELECTOR, + Self::getAccount(_) => ::SELECTOR, + Self::getCreditApproval(_) => { + ::SELECTOR + } + Self::revokeCredit_0(_) => { + ::SELECTOR + } + Self::revokeCredit_1(_) => { + ::SELECTOR + } + Self::setAccountSponsor(_) => { + ::SELECTOR + } + Self::setAccountStatus(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn approveCredit_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::approveCredit_0) + } + approveCredit_0 + }, + { + fn setAccountStatus( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::setAccountStatus) + } + setAccountStatus + }, + { + fn approveCredit_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::approveCredit_1) + } + approveCredit_1 + }, + { + fn setAccountSponsor( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::setAccountSponsor) + } + setAccountSponsor + }, + { + fn buyCredit_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::buyCredit_0) + } + buyCredit_0 + }, + { + fn approveCredit_2( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::approveCredit_2) + } + approveCredit_2 + }, + { + fn buyCredit_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::buyCredit_1) + } + buyCredit_1 + }, + { + fn revokeCredit_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::revokeCredit_0) + } + revokeCredit_0 + }, + { + fn revokeCredit_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::revokeCredit_1) + } + revokeCredit_1 + }, + { + fn getCreditApproval( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::getCreditApproval) + } + getCreditApproval + }, + { + fn getAccount( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ICreditFacadeCalls::getAccount) + } + getAccount + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::approveCredit_0(inner) => { + ::abi_encoded_size(inner) + } + Self::approveCredit_1(inner) => { + ::abi_encoded_size(inner) + } + Self::approveCredit_2(inner) => { + ::abi_encoded_size(inner) + } + Self::buyCredit_0(inner) => { + ::abi_encoded_size(inner) + } + Self::buyCredit_1(inner) => { + ::abi_encoded_size(inner) + } + Self::getAccount(inner) => { + ::abi_encoded_size(inner) + } + Self::getCreditApproval(inner) => { + ::abi_encoded_size(inner) + } + Self::revokeCredit_0(inner) => { + ::abi_encoded_size(inner) + } + Self::revokeCredit_1(inner) => { + ::abi_encoded_size(inner) + } + Self::setAccountSponsor(inner) => { + ::abi_encoded_size(inner) + } + Self::setAccountStatus(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::approveCredit_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::approveCredit_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::approveCredit_2(inner) => { + ::abi_encode_raw(inner, out) + } + Self::buyCredit_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::buyCredit_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getAccount(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getCreditApproval(inner) => { + ::abi_encode_raw(inner, out) + } + Self::revokeCredit_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::revokeCredit_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::setAccountSponsor(inner) => { + ::abi_encode_raw(inner, out) + } + Self::setAccountStatus(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`ICreditFacade`](self) events. + pub enum ICreditFacadeEvents { + #[allow(missing_docs)] + CreditApproved(CreditApproved), + #[allow(missing_docs)] + CreditDebited(CreditDebited), + #[allow(missing_docs)] + CreditPurchased(CreditPurchased), + #[allow(missing_docs)] + CreditRevoked(CreditRevoked), + } + #[automatically_derived] + impl ICreditFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 92u8, 193u8, 181u8, 40u8, 97u8, 67u8, 201u8, 209u8, 248u8, 225u8, 192u8, 144u8, + 181u8, 215u8, 48u8, 35u8, 136u8, 171u8, 148u8, 251u8, 69u8, 177u8, 225u8, 142u8, + 99u8, 216u8, 176u8, 142u8, 248u8, 192u8, 247u8, 195u8, + ], + [ + 172u8, 242u8, 189u8, 201u8, 150u8, 150u8, 218u8, 53u8, 203u8, 254u8, 48u8, 14u8, + 139u8, 125u8, 61u8, 51u8, 127u8, 252u8, 153u8, 24u8, 216u8, 84u8, 124u8, 88u8, + 239u8, 139u8, 88u8, 162u8, 14u8, 192u8, 117u8, 223u8, + ], + [ + 198u8, 151u8, 9u8, 230u8, 247u8, 103u8, 218u8, 215u8, 204u8, 177u8, 156u8, 96u8, + 92u8, 60u8, 96u8, 43u8, 244u8, 130u8, 236u8, 180u8, 38u8, 5u8, 157u8, 124u8, 219u8, + 94u8, 87u8, 55u8, 208u8, 91u8, 34u8, 248u8, + ], + [ + 230u8, 61u8, 26u8, 144u8, 92u8, 12u8, 188u8, 127u8, 37u8, 200u8, 247u8, 26u8, + 245u8, 236u8, 183u8, 68u8, 183u8, 113u8, 178u8, 15u8, 149u8, 79u8, 57u8, 225u8, + 101u8, 77u8, 77u8, 131u8, 143u8, 147u8, 184u8, 158u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for ICreditFacadeEvents { + const NAME: &'static str = "ICreditFacadeEvents"; + const COUNT: usize = 4usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::CreditApproved) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::CreditDebited) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::CreditPurchased) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::CreditRevoked) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ICreditFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::CreditApproved(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::CreditDebited(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::CreditPurchased(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::CreditRevoked(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::CreditApproved(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::CreditDebited(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::CreditPurchased(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::CreditRevoked(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/recall-contracts/crates/facade/src/credit_facade/mod.rs b/recall-contracts/crates/facade/src/credit_facade/mod.rs new file mode 100644 index 0000000000..efa4977731 --- /dev/null +++ b/recall-contracts/crates/facade/src/credit_facade/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#icreditfacade; diff --git a/recall-contracts/crates/facade/src/gas_facade/igasfacade.rs b/recall-contracts/crates/facade/src/gas_facade/igasfacade.rs new file mode 100644 index 0000000000..7cab71e2fb --- /dev/null +++ b/recall-contracts/crates/facade/src/gas_facade/igasfacade.rs @@ -0,0 +1,339 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface IGasFacade { + event GasSponsorSet(address sponsor); + event GasSponsorUnset(); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "event", + "name": "GasSponsorSet", + "inputs": [ + { + "name": "sponsor", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "GasSponsorUnset", + "inputs": [], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IGasFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**Event with signature `GasSponsorSet(address)` and selector `0xe9c438da6edc711056efd08e60609c24627b30c4a355a568d36d3cc0add0bfe1`. + ```solidity + event GasSponsorSet(address sponsor); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct GasSponsorSet { + #[allow(missing_docs)] + pub sponsor: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for GasSponsorSet { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "GasSponsorSet(address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 233u8, 196u8, 56u8, 218u8, 110u8, 220u8, 113u8, 16u8, 86u8, 239u8, 208u8, + 142u8, 96u8, 96u8, 156u8, 36u8, 98u8, 123u8, 48u8, 196u8, 163u8, 85u8, 165u8, + 104u8, 211u8, 109u8, 60u8, 192u8, 173u8, 208u8, 191u8, 225u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { sponsor: data.0 } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.sponsor, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for GasSponsorSet { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&GasSponsorSet> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &GasSponsorSet) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `GasSponsorUnset()` and selector `0xd10f5c7821677a4b8658a83a5d5ac1c78324b2a44a9f634d5c53fbebc13674c4`. + ```solidity + event GasSponsorUnset(); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct GasSponsorUnset {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for GasSponsorUnset { + type DataTuple<'a> = (); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "GasSponsorUnset()"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 209u8, 15u8, 92u8, 120u8, 33u8, 103u8, 122u8, 75u8, 134u8, 88u8, 168u8, 58u8, + 93u8, 90u8, 193u8, 199u8, 131u8, 36u8, 178u8, 164u8, 74u8, 159u8, 99u8, 77u8, + 92u8, 83u8, 251u8, 235u8, 193u8, 54u8, 116u8, 196u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self {} + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + () + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for GasSponsorUnset { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&GasSponsorUnset> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &GasSponsorUnset) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + ///Container for all the [`IGasFacade`](self) events. + pub enum IGasFacadeEvents { + #[allow(missing_docs)] + GasSponsorSet(GasSponsorSet), + #[allow(missing_docs)] + GasSponsorUnset(GasSponsorUnset), + } + #[automatically_derived] + impl IGasFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 209u8, 15u8, 92u8, 120u8, 33u8, 103u8, 122u8, 75u8, 134u8, 88u8, 168u8, 58u8, 93u8, + 90u8, 193u8, 199u8, 131u8, 36u8, 178u8, 164u8, 74u8, 159u8, 99u8, 77u8, 92u8, 83u8, + 251u8, 235u8, 193u8, 54u8, 116u8, 196u8, + ], + [ + 233u8, 196u8, 56u8, 218u8, 110u8, 220u8, 113u8, 16u8, 86u8, 239u8, 208u8, 142u8, + 96u8, 96u8, 156u8, 36u8, 98u8, 123u8, 48u8, 196u8, 163u8, 85u8, 165u8, 104u8, + 211u8, 109u8, 60u8, 192u8, 173u8, 208u8, 191u8, 225u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IGasFacadeEvents { + const NAME: &'static str = "IGasFacadeEvents"; + const COUNT: usize = 2usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::GasSponsorSet) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::GasSponsorUnset) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IGasFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::GasSponsorSet(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::GasSponsorUnset(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::GasSponsorSet(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::GasSponsorUnset(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/recall-contracts/crates/facade/src/gas_facade/mod.rs b/recall-contracts/crates/facade/src/gas_facade/mod.rs new file mode 100644 index 0000000000..34f35cb62f --- /dev/null +++ b/recall-contracts/crates/facade/src/gas_facade/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#igasfacade; diff --git a/recall-contracts/crates/facade/src/lib.rs b/recall-contracts/crates/facade/src/lib.rs new file mode 100644 index 0000000000..bf624837a4 --- /dev/null +++ b/recall-contracts/crates/facade/src/lib.rs @@ -0,0 +1,216 @@ +// Copyright 2025 Recall Contributors +// SPDX-License-Identifier: Apache-2.0, MIT + +#![allow(dead_code)] + +pub use alloy_primitives as primitives; + +pub mod types; + +#[cfg(feature = "blob-reader")] +mod blobreader_facade; +#[cfg(feature = "blob-reader")] +pub mod blob_reader { + pub type Events = + crate::blobreader_facade::iblobreaderfacade::IBlobReaderFacade::IBlobReaderFacadeEvents; + pub type ReadRequestClosed = + crate::blobreader_facade::iblobreaderfacade::IBlobReaderFacade::ReadRequestClosed; + pub type ReadRequestOpened = + crate::blobreader_facade::iblobreaderfacade::IBlobReaderFacade::ReadRequestOpened; + pub type ReadRequestPending = + crate::blobreader_facade::iblobreaderfacade::IBlobReaderFacade::ReadRequestPending; +} + +#[cfg(feature = "blobs")] +mod blobs_facade; +#[cfg(feature = "blobs")] +pub mod blobs { + pub type Events = crate::blobs_facade::iblobsfacade::IBlobsFacade::IBlobsFacadeEvents; + pub type BlobAdded = crate::blobs_facade::iblobsfacade::IBlobsFacade::BlobAdded; + pub type BlobDeleted = crate::blobs_facade::iblobsfacade::IBlobsFacade::BlobDeleted; + pub type BlobFinalized = crate::blobs_facade::iblobsfacade::IBlobsFacade::BlobFinalized; + pub type BlobPending = crate::blobs_facade::iblobsfacade::IBlobsFacade::BlobPending; + + pub type Calls = crate::blobs_facade::iblobsfacade::IBlobsFacade::IBlobsFacadeCalls; + #[allow(non_camel_case_types)] + pub type addBlobCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::addBlobCall; + #[allow(non_camel_case_types)] + pub type deleteBlobCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::deleteBlobCall; + #[allow(non_camel_case_types)] + pub type getBlobCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::getBlobCall; + #[allow(non_camel_case_types)] + pub type getStatsCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::getStatsCall; + #[allow(non_camel_case_types)] + pub type overwriteBlobCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::overwriteBlobCall; + #[allow(non_camel_case_types)] + pub type trimBlobExpiriesCall = + crate::blobs_facade::iblobsfacade::IBlobsFacade::trimBlobExpiriesCall; + + pub type Subscription = crate::blobs_facade::iblobsfacade::IBlobsFacade::Subscription; + pub type Blob = crate::blobs_facade::iblobsfacade::IBlobsFacade::Blob; + pub type SubnetStats = crate::blobs_facade::iblobsfacade::IBlobsFacade::SubnetStats; + pub type TrimBlobExpiries = crate::blobs_facade::iblobsfacade::IBlobsFacade::TrimBlobExpiries; +} + +#[cfg(feature = "bucket")] +mod bucket_facade; +#[cfg(feature = "bucket")] +pub mod bucket { + pub type Events = crate::bucket_facade::ibucketfacade::IBucketFacade::IBucketFacadeEvents; + pub type ObjectAdded = crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectAdded; + pub type ObjectDeleted = crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectDeleted; + pub type ObjectMetadataUpdated = + crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectMetadataUpdated; + + pub type Calls = crate::bucket_facade::ibucketfacade::IBucketFacade::IBucketFacadeCalls; + #[allow(non_camel_case_types)] + pub type addObject_0Call = crate::bucket_facade::ibucketfacade::IBucketFacade::addObject_0Call; + #[allow(non_camel_case_types)] + pub type addObject_1Call = crate::bucket_facade::ibucketfacade::IBucketFacade::addObject_1Call; + #[allow(non_camel_case_types)] + pub type deleteObjectCall = + crate::bucket_facade::ibucketfacade::IBucketFacade::deleteObjectCall; + #[allow(non_camel_case_types)] + pub type getObjectCall = crate::bucket_facade::ibucketfacade::IBucketFacade::getObjectCall; + #[allow(non_camel_case_types)] + pub type queryObjects_0Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_0Call; + #[allow(non_camel_case_types)] + pub type queryObjects_1Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_1Call; + #[allow(non_camel_case_types)] + pub type queryObjects_2Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_2Call; + #[allow(non_camel_case_types)] + pub type queryObjects_3Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_3Call; + #[allow(non_camel_case_types)] + pub type queryObjects_4Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_4Call; + #[allow(non_camel_case_types)] + pub type updateObjectMetadataCall = + crate::bucket_facade::ibucketfacade::IBucketFacade::updateObjectMetadataCall; + + pub type ObjectValue = crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectValue; + pub type KeyValue = crate::bucket_facade::ibucketfacade::IBucketFacade::KeyValue; + pub type Query = crate::bucket_facade::ibucketfacade::IBucketFacade::Query; + pub type Object = crate::bucket_facade::ibucketfacade::IBucketFacade::Object; + pub type ObjectState = crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectState; +} + +#[cfg(feature = "config")] +mod config_facade; +#[cfg(feature = "config")] +pub mod config { + pub type Events = crate::config_facade::iconfigfacade::IConfigFacade::IConfigFacadeEvents; + pub type ConfigAdminSet = crate::config_facade::iconfigfacade::IConfigFacade::ConfigAdminSet; + pub type ConfigSet = crate::config_facade::iconfigfacade::IConfigFacade::ConfigSet; +} + +#[cfg(feature = "credit")] +mod credit_facade; +#[cfg(feature = "credit")] +pub mod credit { + pub type Events = crate::credit_facade::icreditfacade::ICreditFacade::ICreditFacadeEvents; + pub type CreditApproved = crate::credit_facade::icreditfacade::ICreditFacade::CreditApproved; + pub type CreditDebited = crate::credit_facade::icreditfacade::ICreditFacade::CreditDebited; + pub type CreditPurchased = crate::credit_facade::icreditfacade::ICreditFacade::CreditPurchased; + pub type CreditRevoked = crate::credit_facade::icreditfacade::ICreditFacade::CreditRevoked; + + pub type Calls = crate::credit_facade::icreditfacade::ICreditFacade::ICreditFacadeCalls; + #[allow(non_camel_case_types)] + pub type buyCredit_0Call = crate::credit_facade::icreditfacade::ICreditFacade::buyCredit_0Call; + #[allow(non_camel_case_types)] + pub type buyCredit_1Call = crate::credit_facade::icreditfacade::ICreditFacade::buyCredit_1Call; + #[allow(non_camel_case_types)] + pub type approveCredit_0Call = + crate::credit_facade::icreditfacade::ICreditFacade::approveCredit_0Call; + #[allow(non_camel_case_types)] + pub type approveCredit_1Call = + crate::credit_facade::icreditfacade::ICreditFacade::approveCredit_1Call; + #[allow(non_camel_case_types)] + pub type approveCredit_2Call = + crate::credit_facade::icreditfacade::ICreditFacade::approveCredit_2Call; + #[allow(non_camel_case_types)] + pub type revokeCredit_0Call = + crate::credit_facade::icreditfacade::ICreditFacade::revokeCredit_0Call; + #[allow(non_camel_case_types)] + pub type revokeCredit_1Call = + crate::credit_facade::icreditfacade::ICreditFacade::revokeCredit_1Call; + #[allow(non_camel_case_types)] + pub type setAccountSponsorCall = + crate::credit_facade::icreditfacade::ICreditFacade::setAccountSponsorCall; + #[allow(non_camel_case_types)] + pub type getAccountCall = crate::credit_facade::icreditfacade::ICreditFacade::getAccountCall; + #[allow(non_camel_case_types)] + pub type getCreditApprovalCall = + crate::credit_facade::icreditfacade::ICreditFacade::getCreditApprovalCall; + #[allow(non_camel_case_types)] + pub type setAccountStatusCall = + crate::credit_facade::icreditfacade::ICreditFacade::setAccountStatusCall; + + pub type Account = crate::credit_facade::icreditfacade::ICreditFacade::Account; + pub type Approval = crate::credit_facade::icreditfacade::ICreditFacade::Approval; + pub type CreditApproval = crate::credit_facade::icreditfacade::ICreditFacade::CreditApproval; + pub type TtlStatus = crate::credit_facade::icreditfacade::ICreditFacade::TtlStatus; +} + +#[cfg(feature = "gas")] +mod gas_facade; +#[cfg(feature = "gas")] +pub mod gas { + pub type Events = crate::gas_facade::igasfacade::IGasFacade::IGasFacadeEvents; + pub type GasSponsorSet = crate::gas_facade::igasfacade::IGasFacade::GasSponsorSet; + pub type GasSponsorUnset = crate::gas_facade::igasfacade::IGasFacade::GasSponsorUnset; +} + +#[cfg(feature = "machine")] +mod machine_facade; +#[cfg(feature = "machine")] +pub mod machine { + pub type Events = crate::machine_facade::imachinefacade::IMachineFacade::IMachineFacadeEvents; + pub type MachineCreated = crate::machine_facade::imachinefacade::IMachineFacade::MachineCreated; + pub type MachineInitialized = + crate::machine_facade::imachinefacade::IMachineFacade::MachineInitialized; + + pub type Calls = crate::machine_facade::imachinefacade::IMachineFacade::IMachineFacadeCalls; + #[allow(non_camel_case_types)] + pub type createBucket_0Call = + crate::machine_facade::imachinefacade::IMachineFacade::createBucket_0Call; + #[allow(non_camel_case_types)] + pub type createBucket_1Call = + crate::machine_facade::imachinefacade::IMachineFacade::createBucket_1Call; + #[allow(non_camel_case_types)] + pub type createBucket_2Call = + crate::machine_facade::imachinefacade::IMachineFacade::createBucket_2Call; + #[allow(non_camel_case_types)] + pub type listBuckets_0Call = + crate::machine_facade::imachinefacade::IMachineFacade::listBuckets_0Call; + #[allow(non_camel_case_types)] + pub type listBuckets_1Call = + crate::machine_facade::imachinefacade::IMachineFacade::listBuckets_1Call; + + pub type Machine = crate::machine_facade::imachinefacade::IMachineFacade::Machine; + pub type Kind = crate::machine_facade::imachinefacade::IMachineFacade::Kind; + pub type KeyValue = crate::machine_facade::imachinefacade::IMachineFacade::KeyValue; +} + +#[cfg(feature = "timehub")] +mod timehub_facade; +#[cfg(feature = "timehub")] +pub mod timehub { + pub type Events = crate::timehub_facade::itimehubfacade::ITimehubFacade::ITimehubFacadeEvents; + pub type EventPushed = crate::timehub_facade::itimehubfacade::ITimehubFacade::EventPushed; + + pub type Calls = crate::timehub_facade::itimehubfacade::ITimehubFacade::ITimehubFacadeCalls; + #[allow(non_camel_case_types)] + pub type pushCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::pushCall; + #[allow(non_camel_case_types)] + pub type getLeafAtCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::getLeafAtCall; + #[allow(non_camel_case_types)] + pub type getRootCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::getRootCall; + #[allow(non_camel_case_types)] + pub type getPeaksCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::getPeaksCall; + #[allow(non_camel_case_types)] + pub type getCountCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::getCountCall; +} diff --git a/recall-contracts/crates/facade/src/machine_facade/imachinefacade.rs b/recall-contracts/crates/facade/src/machine_facade/imachinefacade.rs new file mode 100644 index 0000000000..107a9b6e69 --- /dev/null +++ b/recall-contracts/crates/facade/src/machine_facade/imachinefacade.rs @@ -0,0 +1,1869 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface IMachineFacade { + type Kind is uint8; + struct KeyValue { + string key; + string value; + } + struct Machine { + Kind kind; + address addr; + KeyValue[] metadata; + } + + event MachineCreated(uint8 indexed kind, address indexed owner, bytes metadata); + event MachineInitialized(uint8 indexed kind, address machineAddress); + + function createBucket() external returns (address); + function createBucket(address owner, KeyValue[] memory metadata) external returns (address); + function createBucket(address owner) external returns (address); + function listBuckets() external view returns (Machine[] memory); + function listBuckets(address owner) external view returns (Machine[] memory); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "createBucket", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "createBucket", + "inputs": [ + { + "name": "owner", + "type": "address", + "internalType": "address" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "createBucket", + "inputs": [ + { + "name": "owner", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "listBuckets", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "tuple[]", + "internalType": "struct Machine[]", + "components": [ + { + "name": "kind", + "type": "uint8", + "internalType": "enum Kind" + }, + { + "name": "addr", + "type": "address", + "internalType": "address" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "listBuckets", + "inputs": [ + { + "name": "owner", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple[]", + "internalType": "struct Machine[]", + "components": [ + { + "name": "kind", + "type": "uint8", + "internalType": "enum Kind" + }, + { + "name": "addr", + "type": "address", + "internalType": "address" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "event", + "name": "MachineCreated", + "inputs": [ + { + "name": "kind", + "type": "uint8", + "indexed": true, + "internalType": "uint8" + }, + { + "name": "owner", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "metadata", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "MachineInitialized", + "inputs": [ + { + "name": "kind", + "type": "uint8", + "indexed": true, + "internalType": "uint8" + }, + { + "name": "machineAddress", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IMachineFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Kind(u8); + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for u8 { + #[inline] + fn stv_to_tokens( + &self, + ) -> <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'_> + { + alloy_sol_types::private::SolTypeValue::< + ::alloy_sol_types::sol_data::Uint<8>, + >::stv_to_tokens(self) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::tokenize(self).0 + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::abi_encode_packed_to(self, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::abi_encoded_size( + self, + ) + } + } + #[automatically_derived] + impl Kind { + /// The Solidity type name. + pub const NAME: &'static str = stringify!(@ name); + /// Convert from the underlying value type. + #[inline] + pub const fn from(value: u8) -> Self { + Self(value) + } + /// Return the underlying value. + #[inline] + pub const fn into(self) -> u8 { + self.0 + } + /// Return the single encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode(&self) -> alloy_sol_types::private::Vec { + ::abi_encode(&self.0) + } + /// Return the packed encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode_packed(&self) -> alloy_sol_types::private::Vec { + ::abi_encode_packed(&self.0) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Kind { + type RustType = u8; + type Token<'a> = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = Self::NAME; + const ENCODED_SIZE: Option = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + Self::type_check(token).is_ok() + } + #[inline] + fn type_check(token: &Self::Token<'_>) -> alloy_sol_types::Result<()> { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::type_check( + token, + ) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::detokenize( + token, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Kind { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::topic_preimage_length(rust) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(rust, out) + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::EventTopic>::encode_topic( + rust, + ) + } + } + }; + /**```solidity + struct KeyValue { string key; string value; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct KeyValue { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub value: ::alloy_sol_types::private::String, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: KeyValue) -> Self { + (value.key, value.value) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for KeyValue { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + key: tuple.0, + value: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for KeyValue { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for KeyValue { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.value, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for KeyValue { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for KeyValue { + const NAME: &'static str = "KeyValue"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed("KeyValue(string key,string value)") + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.key, + ) + .0, + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.value, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for KeyValue { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.key, + ) + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.value, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.key, + out, + ); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.value, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Machine { Kind kind; address addr; KeyValue[] metadata; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Machine { + #[allow(missing_docs)] + pub kind: ::RustType, + #[allow(missing_docs)] + pub addr: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + Kind, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::RustType, + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Machine) -> Self { + (value.kind, value.addr, value.metadata) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Machine { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + kind: tuple.0, + addr: tuple.1, + metadata: tuple.2, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Machine { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Machine { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + ::tokenize(&self.kind), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.addr, + ), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Machine { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Machine { + const NAME: &'static str = "Machine"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Machine(uint8 kind,address addr,KeyValue[] metadata)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + ::eip712_data_word(&self.kind).0, + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::eip712_data_word( + &self.addr, + ) + .0, + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::eip712_data_word(&self.metadata) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Machine { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + ::topic_preimage_length( + &rust.kind, + ) + + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.addr, + ) + + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.metadata, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + ::encode_topic_preimage(&rust.kind, out); + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.addr, + out, + ); + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.metadata, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**Event with signature `MachineCreated(uint8,address,bytes)` and selector `0x78344973573899e5da988496ab97476b3702ecfca371c6b25a61460f989d40d1`. + ```solidity + event MachineCreated(uint8 indexed kind, address indexed owner, bytes metadata); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct MachineCreated { + #[allow(missing_docs)] + pub kind: u8, + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub metadata: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for MachineCreated { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<8>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "MachineCreated(uint8,address,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 120u8, 52u8, 73u8, 115u8, 87u8, 56u8, 153u8, 229u8, 218u8, 152u8, 132u8, 150u8, + 171u8, 151u8, 71u8, 107u8, 55u8, 2u8, 236u8, 252u8, 163u8, 113u8, 198u8, 178u8, + 90u8, 97u8, 70u8, 15u8, 152u8, 157u8, 64u8, 209u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + kind: topics.1, + owner: topics.2, + metadata: data.0, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.metadata, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + ( + Self::SIGNATURE_HASH.into(), + self.kind.clone(), + self.owner.clone(), + ) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic(&self.kind); + out[2usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.owner, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for MachineCreated { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&MachineCreated> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &MachineCreated) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `MachineInitialized(uint8,address)` and selector `0x8f7252642373d5f0b89a0c5cd9cd242e5cd5bb1a36aec623756e4f52a8c1ea6e`. + ```solidity + event MachineInitialized(uint8 indexed kind, address machineAddress); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct MachineInitialized { + #[allow(missing_docs)] + pub kind: u8, + #[allow(missing_docs)] + pub machineAddress: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for MachineInitialized { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<8>, + ); + const SIGNATURE: &'static str = "MachineInitialized(uint8,address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 143u8, 114u8, 82u8, 100u8, 35u8, 115u8, 213u8, 240u8, 184u8, 154u8, 12u8, 92u8, + 217u8, 205u8, 36u8, 46u8, 92u8, 213u8, 187u8, 26u8, 54u8, 174u8, 198u8, 35u8, + 117u8, 110u8, 79u8, 82u8, 168u8, 193u8, 234u8, 110u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + kind: topics.1, + machineAddress: data.0, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.machineAddress, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.kind.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic(&self.kind); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for MachineInitialized { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&MachineInitialized> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &MachineInitialized) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `createBucket()` and selector `0x4aa82ff5`. + ```solidity + function createBucket() external returns (address); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_0Call {} + ///Container type for the return parameters of the [`createBucket()`](createBucket_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_0Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_0Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_0Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for createBucket_0Call { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = createBucket_0Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "createBucket()"; + const SELECTOR: [u8; 4] = [74u8, 168u8, 47u8, 245u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `createBucket(address,(string,string)[])` and selector `0xe129ed90`. + ```solidity + function createBucket(address owner, KeyValue[] memory metadata) external returns (address); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_1Call { + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + ///Container type for the return parameters of the [`createBucket(address,(string,string)[])`](createBucket_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_1Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_1Call) -> Self { + (value.owner, value.metadata) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + owner: tuple.0, + metadata: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_1Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for createBucket_1Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = createBucket_1Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "createBucket(address,(string,string)[])"; + const SELECTOR: [u8; 4] = [225u8, 41u8, 237u8, 144u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.owner, + ), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `createBucket(address)` and selector `0xf6d6c420`. + ```solidity + function createBucket(address owner) external returns (address); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_2Call { + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`createBucket(address)`](createBucket_2Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_2Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_2Call) -> Self { + (value.owner,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_2Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { owner: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_2Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_2Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for createBucket_2Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = createBucket_2Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "createBucket(address)"; + const SELECTOR: [u8; 4] = [246u8, 214u8, 196u8, 32u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.owner, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `listBuckets()` and selector `0x63c244c2`. + ```solidity + function listBuckets() external view returns (Machine[] memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct listBuckets_0Call {} + ///Container type for the return parameters of the [`listBuckets()`](listBuckets_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct listBuckets_0Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: listBuckets_0Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for listBuckets_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Array,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: listBuckets_0Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for listBuckets_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for listBuckets_0Call { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = listBuckets_0Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Array,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "listBuckets()"; + const SELECTOR: [u8; 4] = [99u8, 194u8, 68u8, 194u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `listBuckets(address)` and selector `0xd120303f`. + ```solidity + function listBuckets(address owner) external view returns (Machine[] memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct listBuckets_1Call { + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`listBuckets(address)`](listBuckets_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct listBuckets_1Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: listBuckets_1Call) -> Self { + (value.owner,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for listBuckets_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { owner: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Array,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: listBuckets_1Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for listBuckets_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for listBuckets_1Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = listBuckets_1Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Array,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "listBuckets(address)"; + const SELECTOR: [u8; 4] = [209u8, 32u8, 48u8, 63u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.owner, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`IMachineFacade`](self) function calls. + pub enum IMachineFacadeCalls { + #[allow(missing_docs)] + createBucket_0(createBucket_0Call), + #[allow(missing_docs)] + createBucket_1(createBucket_1Call), + #[allow(missing_docs)] + createBucket_2(createBucket_2Call), + #[allow(missing_docs)] + listBuckets_0(listBuckets_0Call), + #[allow(missing_docs)] + listBuckets_1(listBuckets_1Call), + } + #[automatically_derived] + impl IMachineFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [74u8, 168u8, 47u8, 245u8], + [99u8, 194u8, 68u8, 194u8], + [209u8, 32u8, 48u8, 63u8], + [225u8, 41u8, 237u8, 144u8], + [246u8, 214u8, 196u8, 32u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for IMachineFacadeCalls { + const NAME: &'static str = "IMachineFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 5usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::createBucket_0(_) => { + ::SELECTOR + } + Self::createBucket_1(_) => { + ::SELECTOR + } + Self::createBucket_2(_) => { + ::SELECTOR + } + Self::listBuckets_0(_) => ::SELECTOR, + Self::listBuckets_1(_) => ::SELECTOR, + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn createBucket_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::createBucket_0) + } + createBucket_0 + }, + { + fn listBuckets_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::listBuckets_0) + } + listBuckets_0 + }, + { + fn listBuckets_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::listBuckets_1) + } + listBuckets_1 + }, + { + fn createBucket_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::createBucket_1) + } + createBucket_1 + }, + { + fn createBucket_2( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::createBucket_2) + } + createBucket_2 + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::createBucket_0(inner) => { + ::abi_encoded_size(inner) + } + Self::createBucket_1(inner) => { + ::abi_encoded_size(inner) + } + Self::createBucket_2(inner) => { + ::abi_encoded_size(inner) + } + Self::listBuckets_0(inner) => { + ::abi_encoded_size(inner) + } + Self::listBuckets_1(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::createBucket_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::createBucket_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::createBucket_2(inner) => { + ::abi_encode_raw(inner, out) + } + Self::listBuckets_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::listBuckets_1(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`IMachineFacade`](self) events. + pub enum IMachineFacadeEvents { + #[allow(missing_docs)] + MachineCreated(MachineCreated), + #[allow(missing_docs)] + MachineInitialized(MachineInitialized), + } + #[automatically_derived] + impl IMachineFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 120u8, 52u8, 73u8, 115u8, 87u8, 56u8, 153u8, 229u8, 218u8, 152u8, 132u8, 150u8, + 171u8, 151u8, 71u8, 107u8, 55u8, 2u8, 236u8, 252u8, 163u8, 113u8, 198u8, 178u8, + 90u8, 97u8, 70u8, 15u8, 152u8, 157u8, 64u8, 209u8, + ], + [ + 143u8, 114u8, 82u8, 100u8, 35u8, 115u8, 213u8, 240u8, 184u8, 154u8, 12u8, 92u8, + 217u8, 205u8, 36u8, 46u8, 92u8, 213u8, 187u8, 26u8, 54u8, 174u8, 198u8, 35u8, + 117u8, 110u8, 79u8, 82u8, 168u8, 193u8, 234u8, 110u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IMachineFacadeEvents { + const NAME: &'static str = "IMachineFacadeEvents"; + const COUNT: usize = 2usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::MachineCreated) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::MachineInitialized) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IMachineFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::MachineCreated(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::MachineInitialized(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::MachineCreated(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::MachineInitialized(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/recall-contracts/crates/facade/src/machine_facade/mod.rs b/recall-contracts/crates/facade/src/machine_facade/mod.rs new file mode 100644 index 0000000000..8cac9630b5 --- /dev/null +++ b/recall-contracts/crates/facade/src/machine_facade/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#imachinefacade; diff --git a/recall-contracts/crates/facade/src/timehub_facade/itimehubfacade.rs b/recall-contracts/crates/facade/src/timehub_facade/itimehubfacade.rs new file mode 100644 index 0000000000..f1f9e6aa1e --- /dev/null +++ b/recall-contracts/crates/facade/src/timehub_facade/itimehubfacade.rs @@ -0,0 +1,1101 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface ITimehubFacade { + event EventPushed(uint256 index, uint256 timestamp, bytes cid); + + function getCount() external view returns (uint64); + function getLeafAt(uint64 index) external view returns (uint64 timestamp, bytes memory witnessed); + function getPeaks() external view returns (bytes[] memory cids); + function getRoot() external view returns (bytes memory cid); + function push(bytes memory cid) external returns (bytes memory root, uint64 index); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "getCount", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint64", + "internalType": "uint64" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getLeafAt", + "inputs": [ + { + "name": "index", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [ + { + "name": "timestamp", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "witnessed", + "type": "bytes", + "internalType": "bytes" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getPeaks", + "inputs": [], + "outputs": [ + { + "name": "cids", + "type": "bytes[]", + "internalType": "bytes[]" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getRoot", + "inputs": [], + "outputs": [ + { + "name": "cid", + "type": "bytes", + "internalType": "bytes" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "push", + "inputs": [ + { + "name": "cid", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "root", + "type": "bytes", + "internalType": "bytes" + }, + { + "name": "index", + "type": "uint64", + "internalType": "uint64" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "EventPushed", + "inputs": [ + { + "name": "index", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "timestamp", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "cid", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod ITimehubFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**Event with signature `EventPushed(uint256,uint256,bytes)` and selector `0x9f2453a8c6b2912a42d606880c3eeaadcc940925c2af1349422a17b816155415`. + ```solidity + event EventPushed(uint256 index, uint256 timestamp, bytes cid); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct EventPushed { + #[allow(missing_docs)] + pub index: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub timestamp: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub cid: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for EventPushed { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Bytes, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "EventPushed(uint256,uint256,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 159u8, 36u8, 83u8, 168u8, 198u8, 178u8, 145u8, 42u8, 66u8, 214u8, 6u8, 136u8, + 12u8, 62u8, 234u8, 173u8, 204u8, 148u8, 9u8, 37u8, 194u8, 175u8, 19u8, 73u8, + 66u8, 42u8, 23u8, 184u8, 22u8, 21u8, 84u8, 21u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + index: data.0, + timestamp: data.1, + cid: data.2, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.index, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.timestamp, + ), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.cid, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for EventPushed { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&EventPushed> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &EventPushed) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `getCount()` and selector `0xa87d942c`. + ```solidity + function getCount() external view returns (uint64); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getCountCall {} + ///Container type for the return parameters of the [`getCount()`](getCountCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getCountReturn { + #[allow(missing_docs)] + pub _0: u64, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getCountCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getCountCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<64>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u64,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getCountReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getCountReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getCountCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getCountReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<64>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getCount()"; + const SELECTOR: [u8; 4] = [168u8, 125u8, 148u8, 44u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getLeafAt(uint64)` and selector `0x19fa4966`. + ```solidity + function getLeafAt(uint64 index) external view returns (uint64 timestamp, bytes memory witnessed); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getLeafAtCall { + #[allow(missing_docs)] + pub index: u64, + } + ///Container type for the return parameters of the [`getLeafAt(uint64)`](getLeafAtCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getLeafAtReturn { + #[allow(missing_docs)] + pub timestamp: u64, + #[allow(missing_docs)] + pub witnessed: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<64>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u64,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getLeafAtCall) -> Self { + (value.index,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getLeafAtCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { index: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Bytes, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u64, ::alloy_sol_types::private::Bytes); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getLeafAtReturn) -> Self { + (value.timestamp, value.witnessed) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getLeafAtReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + timestamp: tuple.0, + witnessed: tuple.1, + } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getLeafAtCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Uint<64>,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getLeafAtReturn; + type ReturnTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Bytes, + ); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getLeafAt(uint64)"; + const SELECTOR: [u8; 4] = [25u8, 250u8, 73u8, 102u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.index, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getPeaks()` and selector `0x0ae06fba`. + ```solidity + function getPeaks() external view returns (bytes[] memory cids); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getPeaksCall {} + ///Container type for the return parameters of the [`getPeaks()`](getPeaksCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getPeaksReturn { + #[allow(missing_docs)] + pub cids: ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Bytes>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getPeaksCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getPeaksCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = + (::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Bytes>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = + (::alloy_sol_types::private::Vec<::alloy_sol_types::private::Bytes>,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getPeaksReturn) -> Self { + (value.cids,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getPeaksReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { cids: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getPeaksCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getPeaksReturn; + type ReturnTuple<'a> = + (::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Bytes>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getPeaks()"; + const SELECTOR: [u8; 4] = [10u8, 224u8, 111u8, 186u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getRoot()` and selector `0x5ca1e165`. + ```solidity + function getRoot() external view returns (bytes memory cid); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getRootCall {} + ///Container type for the return parameters of the [`getRoot()`](getRootCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getRootReturn { + #[allow(missing_docs)] + pub cid: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getRootCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getRootCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getRootReturn) -> Self { + (value.cid,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getRootReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { cid: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getRootCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getRootReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getRoot()"; + const SELECTOR: [u8; 4] = [92u8, 161u8, 225u8, 101u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `push(bytes)` and selector `0x7dacda03`. + ```solidity + function push(bytes memory cid) external returns (bytes memory root, uint64 index); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct pushCall { + #[allow(missing_docs)] + pub cid: ::alloy_sol_types::private::Bytes, + } + ///Container type for the return parameters of the [`push(bytes)`](pushCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct pushReturn { + #[allow(missing_docs)] + pub root: ::alloy_sol_types::private::Bytes, + #[allow(missing_docs)] + pub index: u64, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: pushCall) -> Self { + (value.cid,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for pushCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { cid: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes, u64); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: pushReturn) -> Self { + (value.root, value.index) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for pushReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + root: tuple.0, + index: tuple.1, + } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for pushCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Bytes,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = pushReturn; + type ReturnTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "push(bytes)"; + const SELECTOR: [u8; 4] = [125u8, 172u8, 218u8, 3u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.cid, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`ITimehubFacade`](self) function calls. + pub enum ITimehubFacadeCalls { + #[allow(missing_docs)] + getCount(getCountCall), + #[allow(missing_docs)] + getLeafAt(getLeafAtCall), + #[allow(missing_docs)] + getPeaks(getPeaksCall), + #[allow(missing_docs)] + getRoot(getRootCall), + #[allow(missing_docs)] + push(pushCall), + } + #[automatically_derived] + impl ITimehubFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [10u8, 224u8, 111u8, 186u8], + [25u8, 250u8, 73u8, 102u8], + [92u8, 161u8, 225u8, 101u8], + [125u8, 172u8, 218u8, 3u8], + [168u8, 125u8, 148u8, 44u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for ITimehubFacadeCalls { + const NAME: &'static str = "ITimehubFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 5usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::getCount(_) => ::SELECTOR, + Self::getLeafAt(_) => ::SELECTOR, + Self::getPeaks(_) => ::SELECTOR, + Self::getRoot(_) => ::SELECTOR, + Self::push(_) => ::SELECTOR, + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn getPeaks( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::getPeaks) + } + getPeaks + }, + { + fn getLeafAt( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::getLeafAt) + } + getLeafAt + }, + { + fn getRoot( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::getRoot) + } + getRoot + }, + { + fn push( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::push) + } + push + }, + { + fn getCount( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::getCount) + } + getCount + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::getCount(inner) => { + ::abi_encoded_size(inner) + } + Self::getLeafAt(inner) => { + ::abi_encoded_size(inner) + } + Self::getPeaks(inner) => { + ::abi_encoded_size(inner) + } + Self::getRoot(inner) => { + ::abi_encoded_size(inner) + } + Self::push(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::getCount(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getLeafAt(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getPeaks(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getRoot(inner) => { + ::abi_encode_raw(inner, out) + } + Self::push(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`ITimehubFacade`](self) events. + pub enum ITimehubFacadeEvents { + #[allow(missing_docs)] + EventPushed(EventPushed), + } + #[automatically_derived] + impl ITimehubFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[[ + 159u8, 36u8, 83u8, 168u8, 198u8, 178u8, 145u8, 42u8, 66u8, 214u8, 6u8, 136u8, 12u8, + 62u8, 234u8, 173u8, 204u8, 148u8, 9u8, 37u8, 194u8, 175u8, 19u8, 73u8, 66u8, 42u8, + 23u8, 184u8, 22u8, 21u8, 84u8, 21u8, + ]]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for ITimehubFacadeEvents { + const NAME: &'static str = "ITimehubFacadeEvents"; + const COUNT: usize = 1usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::EventPushed) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ITimehubFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::EventPushed(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::EventPushed(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/recall-contracts/crates/facade/src/timehub_facade/mod.rs b/recall-contracts/crates/facade/src/timehub_facade/mod.rs new file mode 100644 index 0000000000..924d28bfee --- /dev/null +++ b/recall-contracts/crates/facade/src/timehub_facade/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#itimehubfacade; diff --git a/recall-contracts/crates/facade/src/types.rs b/recall-contracts/crates/facade/src/types.rs new file mode 100644 index 0000000000..ac16d20816 --- /dev/null +++ b/recall-contracts/crates/facade/src/types.rs @@ -0,0 +1,169 @@ +// Copyright 2025 Recall Contributors +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt; + +use alloy_primitives::{Sign, I256, U256}; +use anyhow::anyhow; +use fvm_shared::{ + address::{Address as FvmAddress, Payload}, + bigint::{BigInt, BigUint, Sign as BigSign}, + econ::TokenAmount, + ActorID, +}; + +pub use alloy_primitives::Address; +pub use alloy_sol_types::SolCall; +pub use alloy_sol_types::SolInterface; + +const EAM_ACTOR_ID: ActorID = 10; + +/// Fixed-size uninterpreted hash type with 20 bytes (160 bits) size. +#[derive(Default)] +pub struct H160([u8; 20]); + +impl H160 { + pub fn from_slice(slice: &[u8]) -> Self { + if slice.len() != 20 { + panic!("slice length must be exactly 20 bytes"); + } + let mut buf = [0u8; 20]; + buf.copy_from_slice(slice); + H160(buf) + } + + pub fn from_actor_id(id: ActorID) -> Self { + let mut buf = [0u8; 20]; + buf[0] = 0xff; + buf[12..].copy_from_slice(&id.to_be_bytes()); + H160(buf) + } + + pub fn to_fixed_bytes(&self) -> [u8; 20] { + self.0 + } + + /// Return true if it is a "0x00" address. + pub fn is_null(&self) -> bool { + self.0 == [0; 20] + } + + pub fn as_option(&self) -> Option { + if self.is_null() { + None + } else { + Some(H160(self.0)) + } + } +} + +impl TryFrom<&[u8]> for H160 { + type Error = anyhow::Error; + fn try_from(slice: &[u8]) -> Result { + if slice.len() != 20 { + return Err(anyhow!("slice length must be exactly 20 bytes")); + } + let mut buf = [0u8; 20]; + buf.copy_from_slice(slice); + Ok(H160(buf)) + } +} + +impl fmt::Debug for H160 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "H160({:?})", &self.0) + } +} + +impl TryFrom for H160 { + type Error = anyhow::Error; + + fn try_from(value: FvmAddress) -> Result { + match value.payload() { + Payload::Delegated(d) + if d.namespace() == EAM_ACTOR_ID && d.subaddress().len() == 20 => + { + Ok(H160::from_slice(d.subaddress())) + } + Payload::ID(id) => Ok(H160::from_actor_id(*id)), + _ => Err(anyhow!("not an evm address: {}", value)), + } + } +} + +impl From for FvmAddress { + fn from(value: H160) -> Self { + // Copied from fil_actors_evm_shared + let bytes = value.to_fixed_bytes(); + if bytes[0] == 0xff && bytes[1..12].iter().all(|&b| b == 0x00) { + let id = u64::from_be_bytes(bytes[12..].try_into().unwrap()); + FvmAddress::new_id(id) + } else { + FvmAddress::new_delegated(EAM_ACTOR_ID, bytes.as_slice()).unwrap() + } + } +} + +impl From
for H160 { + fn from(address: Address) -> Self { + H160::from_slice(address.as_ref()) + } +} + +impl From for Address { + fn from(value: H160) -> Self { + Address::from(value.to_fixed_bytes()) + } +} + +#[derive(Default)] +pub struct BigUintWrapper(pub BigUint); + +impl From for BigUintWrapper { + fn from(value: TokenAmount) -> Self { + let signed: BigInt = value.atto().clone(); + let unsigned = signed.to_biguint().unwrap_or_default(); + BigUintWrapper(unsigned) + } +} + +impl From for BigUintWrapper { + fn from(value: U256) -> Self { + BigUintWrapper(BigUint::from_bytes_be( + &value.to_be_bytes::<{ U256::BYTES }>(), + )) + } +} + +impl From for TokenAmount { + fn from(value: BigUintWrapper) -> Self { + TokenAmount::from_atto(value.0) + } +} + +impl From for U256 { + fn from(value: BigUintWrapper) -> Self { + let digits = value.0.to_u64_digits(); + match U256::overflowing_from_limbs_slice(&digits) { + (n, false) => n, + (_, true) => U256::MAX, + } + } +} + +pub struct BigIntWrapper(pub BigInt); + +impl From for I256 { + fn from(value: BigIntWrapper) -> Self { + let (sign, digits) = value.0.to_u64_digits(); + let sign = match sign { + BigSign::Minus => Sign::Negative, + BigSign::NoSign | BigSign::Plus => Sign::Positive, + }; + let uint = U256::saturating_from_limbs_slice(&digits); + match I256::overflowing_from_sign_and_abs(sign, uint) { + (n, false) => n, + (_, true) => I256::MAX, + } + } +} diff --git a/recall/Makefile b/recall/Makefile new file mode 100644 index 0000000000..8c9a62f53f --- /dev/null +++ b/recall/Makefile @@ -0,0 +1,28 @@ +.PHONY: all build test clean lint check-fmt check-clippy + +CRATE := recall_actor_sdk recall_kernel recall_kernel_ops recall_syscalls iroh_manager +PACKAGE := $(patsubst %, --package %, $(CRATE)) + +all: test build + +build: + cargo build --locked --release + +test: + cargo test --locked --release $(PACKAGE) + +clean: + cargo clean + +lint: \ + check-fmt \ + check-clippy + +check-fmt: + @# `nightly` is required to support ignore list in rustfmt.toml + rustup install nightly-2024-09-20 + rustup component add --toolchain nightly-2024-09-20 rustfmt + cargo +nightly-2024-09-20 fmt $(PACKAGE) --check + +check-clippy: + cargo clippy $(PACKAGE) --tests --no-deps -- -D clippy::all diff --git a/recall/actor_sdk/Cargo.toml b/recall/actor_sdk/Cargo.toml new file mode 100644 index 0000000000..d14bf619e5 --- /dev/null +++ b/recall/actor_sdk/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "recall_actor_sdk" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +fvm_shared = { workspace = true } +fvm_sdk = { workspace = true } +num-traits = { workspace = true } +fil_actors_runtime = { workspace = true } +fil_actor_adm = { workspace = true } +recall_sol_facade = { workspace = true, features = [] } +anyhow = { workspace = true } +fvm_ipld_encoding = { workspace = true } +serde = { workspace = true } +cid = { workspace = true } diff --git a/recall/actor_sdk/src/caller.rs b/recall/actor_sdk/src/caller.rs new file mode 100644 index 0000000000..45c06703d1 --- /dev/null +++ b/recall/actor_sdk/src/caller.rs @@ -0,0 +1,162 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::{extract_send_result, runtime::Runtime, ActorError}; +use fvm_shared::{address::Address, bigint::Zero, econ::TokenAmount, error::ExitCode, METHOD_SEND}; + +use crate::util::{to_id_address, to_id_and_delegated_address}; + +/// Helper stuct for managing actor message caller and sponsor addresses. +#[derive(Debug)] +pub struct Caller { + /// Caller ID-address. + id_addr: Address, + /// Caller delegated address. + delegated_addr: Option
, + /// Caller's sponsor ID-address. + sponsor_id_addr: Option
, + /// Caller's sponsor delegated address. + sponsor_delegated_addr: Option
, + /// Whether the caller actor was created. + created: bool, +} + +/// Caller option (authenticate or create). +#[derive(Debug, Default)] +pub enum CallerOption { + #[default] + None, + /// The target address must be the runtime's message origin or caller. + Auth, + /// Create the target address if it's not found. + Create, +} + +impl Caller { + /// Returns a new caller. + /// TODO: Remove origin authentication after the solidity facades are complete. + pub fn new( + rt: &impl Runtime, + address: Address, + sponsor: Option
, + option: CallerOption, + ) -> Result { + let mut created = false; + let id_addr = match to_id_address(rt, address, false) { + Ok(addr) => Ok(addr), + Err(e) + if matches!(option, CallerOption::Create) + && e.exit_code() == ExitCode::USR_NOT_FOUND => + { + create_actor(rt, address)?; + created = true; + to_id_address(rt, address, false) + } + Err(e) => Err(e), + }?; + + let caller = match sponsor { + Some(sponsor) => { + let sponsor_id_addr = to_id_address(rt, sponsor, false)?; + Self { + id_addr, + delegated_addr: None, + sponsor_id_addr: Some(sponsor_id_addr), + sponsor_delegated_addr: None, + created, + } + } + None => Self { + id_addr, + delegated_addr: None, + sponsor_id_addr: None, + sponsor_delegated_addr: None, + created, + }, + }; + Ok(caller) + } + + /// Returns a new caller. + /// Caller and sponsor must have a delegated address. + /// TODO: Remove origin authentication after the solidity facades are complete. + pub fn new_delegated( + rt: &impl Runtime, + address: Address, + sponsor: Option
, + option: CallerOption, + ) -> Result { + let mut created = false; + let (id_addr, delegated_addr) = match to_id_and_delegated_address(rt, address) { + Ok(addrs) => Ok(addrs), + Err(e) + if matches!(option, CallerOption::Create) + && e.exit_code() == ExitCode::USR_NOT_FOUND => + { + create_actor(rt, address)?; + created = true; + to_id_and_delegated_address(rt, address) + } + Err(e) => Err(e), + }?; + + let caller = match sponsor { + Some(sponsor) => { + let (sponsor_id_addr, sponsor_delegated_addr) = + to_id_and_delegated_address(rt, sponsor)?; + Self { + id_addr, + delegated_addr: Some(delegated_addr), + sponsor_id_addr: Some(sponsor_id_addr), + sponsor_delegated_addr: Some(sponsor_delegated_addr), + created, + } + } + None => Self { + id_addr, + delegated_addr: Some(delegated_addr), + sponsor_id_addr: None, + sponsor_delegated_addr: None, + created, + }, + }; + Ok(caller) + } + + /// Returns the caller delegated address. + pub fn address(&self) -> Address { + self.delegated_addr.unwrap_or(self.id_addr) + } + + /// Returns the caller address that should be used with actor state methods. + pub fn state_address(&self) -> Address { + self.id_addr + } + + /// Returns the sponsor address that should be used with actor state methods. + pub fn sponsor_state_address(&self) -> Option
{ + self.sponsor_id_addr + } + + /// Returns the sponsor delegated address. + pub fn sponsor_address(&self) -> Option
{ + self.sponsor_delegated_addr + } + + /// Returns the address that should be used with events. + pub fn event_address(&self) -> Address { + self.sponsor_delegated_addr.unwrap_or(self.address()) + } + + /// Returns whether the caller actor was created. + pub fn created(&self) -> bool { + self.created + } +} + +/// Creates a new placeholder actor by sending zero tokens to the address. +fn create_actor(rt: &impl Runtime, address: Address) -> Result<(), ActorError> { + extract_send_result(rt.send_simple(&address, METHOD_SEND, None, TokenAmount::zero()))?; + Ok(()) +} diff --git a/recall/actor_sdk/src/constants.rs b/recall/actor_sdk/src/constants.rs new file mode 100644 index 0000000000..16c063133b --- /dev/null +++ b/recall/actor_sdk/src/constants.rs @@ -0,0 +1,11 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Constants for Recall actors + +use fvm_shared::address::Address; + +/// ADM (Autonomous Data Management) actor address +/// Actor ID 17 is reserved for ADM in Recall networks +pub const ADM_ACTOR_ADDR: Address = Address::new_id(17); diff --git a/recall/actor_sdk/src/evm.rs b/recall/actor_sdk/src/evm.rs new file mode 100644 index 0000000000..61e05d2391 --- /dev/null +++ b/recall/actor_sdk/src/evm.rs @@ -0,0 +1,152 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::{actor_error, runtime::Runtime, ActorError}; +use fvm_ipld_encoding::{strict_bytes, tuple::*}; +use fvm_shared::event::{ActorEvent, Entry, Flags}; +use fvm_shared::IPLD_RAW; +use recall_sol_facade::primitives::IntoLogData; + +/// The event key prefix for the Ethereum log topics. +const EVENT_TOPIC_KEY_PREFIX: &str = "t"; + +/// The event key for the Ethereum log data. +const EVENT_DATA_KEY: &str = "d"; + +pub trait TryIntoEVMEvent { + type Target: IntoLogData; + fn try_into_evm_event(self) -> Result; +} + +/// Returns an [`ActorEvent`] from an EVM event. +pub fn to_actor_event(event: T) -> Result { + let event = event + .try_into_evm_event() + .map_err(|e| actor_error!(illegal_argument; "failed to build evm event: {}", e))?; + let log = event.to_log_data(); + let num_entries = log.topics().len() + 1; // +1 for log data + + let mut entries: Vec = Vec::with_capacity(num_entries); + for (i, topic) in log.topics().iter().enumerate() { + let key = format!("{}{}", EVENT_TOPIC_KEY_PREFIX, i + 1); + entries.push(Entry { + flags: Flags::FLAG_INDEXED_ALL, + key, + codec: IPLD_RAW, + value: topic.to_vec(), + }); + } + entries.push(Entry { + flags: Flags::FLAG_INDEXED_ALL, + key: EVENT_DATA_KEY.to_owned(), + codec: IPLD_RAW, + value: log.data.to_vec(), + }); + + Ok(entries.into()) +} + +/// Emits an [`ActorEvent`] from an EVM event. +pub fn emit_evm_event(rt: &impl Runtime, event: T) -> Result<(), ActorError> { + let actor_event = to_actor_event(event)?; + rt.emit_event(&actor_event) +} + +/// Params for invoking a contract. +#[derive(Default, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractParams { + #[serde(with = "strict_bytes")] + pub input_data: Vec, +} + +/// EVM call with selector (first 4 bytes) and calldata (remaining bytes). +pub struct InputData(Vec); + +impl InputData { + /// Returns the selector bytes. + pub fn selector(&self) -> [u8; 4] { + let mut selector = [0u8; 4]; + selector.copy_from_slice(&self.0[0..4]); + selector + } + + /// Returns the calldata bytes. + pub fn calldata(&self) -> &[u8] { + &self.0[4..] + } +} + +impl TryFrom for InputData { + type Error = ActorError; + + fn try_from(value: InvokeContractParams) -> Result { + if value.input_data.len() < 4 { + return Err(ActorError::illegal_argument("input too short".to_string())); + } + Ok(InputData(value.input_data)) + } +} + +#[macro_export] +macro_rules! declare_abi_call { + () => { + pub trait AbiCall { + type Params; + type Returns; + type Output; + fn params(&self) -> Self::Params; + fn returns(&self, returns: Self::Returns) -> Self::Output; + } + + pub trait AbiCallRuntime { + type Params; + type Returns; + type Output; + fn params(&self, rt: &impl fil_actors_runtime::runtime::Runtime) -> Self::Params; + fn returns(&self, returns: Self::Returns) -> Self::Output; + } + + #[derive(Debug, Clone)] + pub struct AbiEncodeError { + message: String, + } + + impl From for AbiEncodeError { + fn from(error: anyhow::Error) -> Self { + Self { + message: format!("failed to abi encode {}", error), + } + } + } + + impl From for AbiEncodeError { + fn from(message: String) -> Self { + Self { message } + } + } + + impl From for AbiEncodeError { + fn from(error: fil_actors_runtime::ActorError) -> Self { + Self { + message: format!("{}", error), + } + } + } + + impl From for fil_actors_runtime::ActorError { + fn from(error: AbiEncodeError) -> Self { + fil_actors_runtime::actor_error!(serialization, error.message) + } + } + }; +} + +/// Returned when invoking a contract. +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractReturn { + #[serde(with = "strict_bytes")] + pub output_data: Vec, +} diff --git a/recall/actor_sdk/src/lib.rs b/recall/actor_sdk/src/lib.rs new file mode 100644 index 0000000000..67d3ab6cb2 --- /dev/null +++ b/recall/actor_sdk/src/lib.rs @@ -0,0 +1,9 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +pub mod caller; +pub mod constants; +pub mod evm; +pub mod storage; +pub mod util; diff --git a/recall/actor_sdk/src/storage.rs b/recall/actor_sdk/src/storage.rs new file mode 100644 index 0000000000..41c13a2eaf --- /dev/null +++ b/recall/actor_sdk/src/storage.rs @@ -0,0 +1,21 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::error::ErrorNumber; + +/// Deletes a blob by hash from backing storage. +pub fn delete_blob(hash: [u8; 32]) -> Result<(), ErrorNumber> { + unsafe { sys::delete_blob(hash.as_ptr()) } +} + +mod sys { + use fvm_sdk::sys::fvm_syscalls; + + fvm_syscalls! { + module = "recall"; + + /// Deletes a blob by hash from backing storage. + pub fn delete_blob(hash_ptr: *const u8) -> Result<()>; + } +} diff --git a/recall/actor_sdk/src/util.rs b/recall/actor_sdk/src/util.rs new file mode 100644 index 0000000000..c8acabe036 --- /dev/null +++ b/recall/actor_sdk/src/util.rs @@ -0,0 +1,105 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fil_actors_runtime::{ + deserialize_block, extract_send_result, + runtime::{builtins::Type, Runtime}, + ActorError, +}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::sys::SendFlags; +use fvm_shared::{address::Address, bigint::BigUint, econ::TokenAmount, MethodNum}; +use num_traits::Zero; + +use crate::constants::ADM_ACTOR_ADDR; +pub use fil_actor_adm::Kind; + +/// Resolves ID address of an actor. +/// If `require_delegated` is `true`, the address must be of type +/// EVM (a Solidity contract), EthAccount (an Ethereum-style EOA), or Placeholder (a yet to be +/// determined EOA or Solidity contract). +pub fn to_id_address( + rt: &impl Runtime, + address: Address, + require_delegated: bool, +) -> Result { + let actor_id = rt + .resolve_address(&address) + .ok_or(ActorError::not_found(format!( + "actor {} not found", + address + )))?; + if require_delegated { + let code_cid = rt.get_actor_code_cid(&actor_id).ok_or_else(|| { + ActorError::not_found(format!("actor {} code cid not found", address)) + })?; + if !matches!( + rt.resolve_builtin_actor_type(&code_cid), + Some(Type::Placeholder | Type::EVM | Type::EthAccount) + ) { + return Err(ActorError::forbidden(format!( + "invalid address: address {} is not delegated", + address, + ))); + } + } + Ok(Address::new_id(actor_id)) +} + +/// Resolves an address to its external delegated address. +pub fn to_delegated_address(rt: &impl Runtime, address: Address) -> Result { + Ok(to_id_and_delegated_address(rt, address)?.1) +} + +/// Resolves an address to its ID address and external delegated address. +pub fn to_id_and_delegated_address( + rt: &impl Runtime, + address: Address, +) -> Result<(Address, Address), ActorError> { + let actor_id = rt + .resolve_address(&address) + .ok_or(ActorError::not_found(format!( + "actor {} not found", + address + )))?; + let delegated = rt + .lookup_delegated_address(actor_id) + .ok_or(ActorError::forbidden(format!( + "invalid address: actor {} is not delegated", + address + )))?; + Ok((Address::new_id(actor_id), delegated)) +} + +/// Returns the [`TokenAmount`] as a [`BigUint`]. +/// If the given amount is negative, the value returned will be zero. +pub fn token_to_biguint(amount: Option) -> BigUint { + amount + .unwrap_or_default() + .atto() + .to_biguint() + .unwrap_or_default() +} + +/// Checks if an address is a bucket actor by comparing its code CID +/// with the bucket code CID registered in the ADM actor. +pub fn is_bucket_address(rt: &impl Runtime, address: Address) -> Result { + let caller_code_cid = rt + .resolve_address(&address) + .and_then(|actor_id| rt.get_actor_code_cid(&actor_id)); + if let Some(caller_code_cid) = caller_code_cid { + let bucket_code_cid = deserialize_block::(extract_send_result(rt.send( + &ADM_ACTOR_ADDR, + 2892692559 as MethodNum, + IpldBlock::serialize_cbor(&Kind::Bucket)?, + TokenAmount::zero(), + None, + SendFlags::READ_ONLY, + ))?)?; + Ok(caller_code_cid.eq(&bucket_code_cid)) + } else { + Ok(false) + } +} diff --git a/recall/executor/Cargo.toml b/recall/executor/Cargo.toml new file mode 100644 index 0000000000..ce07282d0a --- /dev/null +++ b/recall/executor/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "recall_executor" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true } +fvm = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-traits = { workspace = true } +replace_with = { workspace = true } +tracing = { workspace = true } + +fendermint_actor_blobs_shared = { path = "../../fendermint/actors/blobs/shared" } +fendermint_vm_actor_interface = { path = "../../fendermint/vm/actor_interface" } + +[features] +testing = [] diff --git a/recall/executor/src/lib.rs b/recall/executor/src/lib.rs new file mode 100644 index 0000000000..8047497fc7 --- /dev/null +++ b/recall/executor/src/lib.rs @@ -0,0 +1,807 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::ops::{Deref, DerefMut}; +use std::result::Result as StdResult; + +use anyhow::{anyhow, bail, Context, Result}; +use cid::Cid; +use fendermint_actor_blobs_shared::{ + credit::{GasAllowance, GetGasAllowanceParams, UpdateGasAllowanceParams}, + method::Method::{GetGasAllowance, UpdateGasAllowance}, + BLOBS_ACTOR_ADDR, BLOBS_ACTOR_ID, +}; +use fendermint_vm_actor_interface::{eam::EAM_ACTOR_ID, system::SYSTEM_ACTOR_ADDR}; +use fvm::call_manager::{backtrace, Backtrace, CallManager, Entrypoint, InvocationResult}; +use fvm::engine::EnginePool; +use fvm::executor::{ApplyFailure, ApplyKind, ApplyRet, Executor}; +use fvm::gas::{Gas, GasCharge}; +use fvm::kernel::{Block, ClassifyResult, Context as _, ExecutionError, Kernel}; +use fvm::machine::{Machine, BURNT_FUNDS_ACTOR_ID, REWARD_ACTOR_ID}; +use fvm::trace::ExecutionTrace; +use fvm_ipld_encoding::{RawBytes, CBOR}; +use fvm_shared::{ + address::{Address, Payload}, + econ::TokenAmount, + error::{ErrorNumber, ExitCode}, + event::StampedEvent, + message::Message, + receipt::Receipt, + ActorID, IPLD_RAW, METHOD_SEND, +}; +use num_traits::Zero; +use tracing::debug; + +mod outputs; + +use crate::outputs::{GasAmounts, GasOutputs}; + +/// The default [`Executor`]. +/// +/// # Warning +/// +/// Message execution might run out of stack and crash (the entire process) if it doesn't have at +/// least 64MiB of stack space. If you can't guarantee 64MiB of stack space, wrap this executor in +/// a [`ThreadedExecutor`][super::ThreadedExecutor]. +pub struct RecallExecutor { + engine_pool: EnginePool, + // If the inner value is `None,` it means the machine got poisoned and is unusable. + machine: Option<::Machine>, +} + +impl Deref for RecallExecutor { + type Target = ::Machine; + + fn deref(&self) -> &Self::Target { + self.machine.as_ref().expect("machine poisoned") + } +} + +impl DerefMut for RecallExecutor { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut *self.machine.as_mut().expect("machine poisoned") + } +} + +impl Executor for RecallExecutor +where + K: Kernel, +{ + type Kernel = K; + + /// This is the entrypoint to execute a message. + fn execute_message( + &mut self, + msg: Message, + apply_kind: ApplyKind, + raw_length: usize, + ) -> Result { + self.execute_message_with_revert(msg, apply_kind, raw_length, false) + } + + /// Flush the state-tree to the underlying blockstore. + fn flush(&mut self) -> Result { + let k = (**self).flush()?; + Ok(k) + } +} + +impl RecallExecutor +where + K: Kernel, +{ + /// Create a new [`RecallExecutor`] for executing messages on the [`Machine`]. + pub fn new( + engine_pool: EnginePool, + machine: ::Machine, + ) -> Result { + // Skip preloading all builtin actors when testing. + #[cfg(not(any(test, feature = "testing")))] + { + // Preload any uncached modules. + // This interface works for now because we know all actor CIDs + // ahead of time, but with user-supplied code, we won't have that + // guarantee. + engine_pool.acquire().preload_all( + machine.blockstore(), + machine.builtin_actors().builtin_actor_codes(), + )?; + } + Ok(Self { + engine_pool, + machine: Some(machine), + }) + } + + /// Consume consumes the executor and returns the Machine. If the Machine had + /// been poisoned during execution, the Option will be None. + pub fn into_machine(self) -> Option<::Machine> { + self.machine + } + + /// This is the entrypoint to execute a message that allows caller to revert the execution. + /// The revert is generally useful for read-only transactions. + pub fn execute_message_with_revert( + &mut self, + msg: Message, + apply_kind: ApplyKind, + raw_length: usize, + always_revert: bool, + ) -> Result { + self.execute_message_internal(msg, apply_kind, raw_length, always_revert) + } + + fn execute_message_internal( + &mut self, + msg: Message, + mut apply_kind: ApplyKind, + raw_length: usize, + always_revert: bool, + ) -> Result { + if always_revert { + // The apply kind is always hard coded to implicit if the call is expected to revert. + // This will bypass some checks and gas deduction in `preflight_messages`. + apply_kind = ApplyKind::Implicit; + } + // Validate if the message was correct, charge for it, and extract some preliminary data. + let (sender_id, sponsor_id, gas_costs, inclusion_cost) = + match self.preflight_message(&msg, apply_kind, raw_length)? { + Ok(res) => res, + Err(apply_ret) => return Ok(apply_ret), + }; + + struct MachineExecRet { + result: fvm::kernel::Result, + gas_used: u64, + backtrace: Backtrace, + exec_trace: ExecutionTrace, + events_root: Option, + events: Vec, // TODO consider removing if nothing in the client ends up using it. + } + + // Pre-resolve the message receiver's address, if known. + let receiver_id = self + .state_tree() + .lookup_id(&msg.to) + .context("failure when looking up message receiver")?; + + // Filecoin caps the premium plus the base-fee at the fee-cap. + // We expose the _effective_ premium to the user. + let effective_premium = msg + .gas_premium + .clone() + .min(&msg.gas_fee_cap - &self.context().base_fee) + .max(TokenAmount::zero()); + + // Acquire an engine from the pool. This may block if there are concurrently executing + // messages inside other executors sharing the same pool. + let engine = self.engine_pool.acquire(); + + // Apply the message. + let ret = self.map_machine(|machine| { + // We're processing a chain message, so the sender is the origin of the call stack. + let mut cm = K::CallManager::new( + machine, + engine, + msg.gas_limit, + sender_id, + msg.from, + receiver_id, + msg.to, + msg.sequence, + effective_premium, + ); + // This error is fatal because it should have already been accounted for inside + // preflight_message. + if let Err(e) = cm.charge_gas(inclusion_cost) { + let (_, machine) = cm.finish(); + return (Err(e), machine); + } + + let params = (!msg.params.is_empty()).then(|| { + Block::new( + if msg.method_num == METHOD_SEND { + // Method zero params are "arbitrary bytes", so we'll just count them as + // raw. + // + // This won't actually affect anything (because no code will see these + // parameters), but it's more correct and makes me happier. + // + // NOTE: this _may_ start to matter once we start _validating_ ipld (m2.2). + IPLD_RAW + } else { + // This is CBOR, not DAG_CBOR, because links sent from off-chain aren't + // reachable. + CBOR + }, + msg.params.bytes(), + // not DAG-CBOR, so we don't have to parse for links. + Vec::new(), + ) + }); + + let result = cm.with_transaction( + |cm| { + // Invoke the message. We charge for the return value internally if the call-stack depth + // is 1. + cm.call_actor::( + sender_id, + msg.to, + Entrypoint::Invoke(msg.method_num), + params, + &msg.value, + None, + false, + ) + }, + always_revert, + ); // FVM 4.7: with_transaction now requires read_only bool parameter + + let (res, machine) = match cm.finish() { + (Ok(res), machine) => (res, machine), + (Err(err), machine) => return (Err(err), machine), + }; + + ( + Ok(MachineExecRet { + result, + gas_used: res.gas_used, + backtrace: res.backtrace, + exec_trace: res.exec_trace, + events_root: res.events_root, + events: res.events, + }), + machine, + ) + })?; + + let MachineExecRet { + result: res, + gas_used, + mut backtrace, + exec_trace, + events_root, + events, + } = ret; + + // Extract the exit code and build the result of the message application. + let receipt = match res { + Ok(InvocationResult { exit_code, value }) => { + // Convert back into a top-level return "value". We throw away the codec here, + // unfortunately. + let return_data = value + .map(|blk| RawBytes::from(blk.data().to_vec())) + .unwrap_or_default(); + + if exit_code.is_success() { + backtrace.clear(); + } + Receipt { + exit_code, + return_data, + gas_used, + events_root, + } + } + Err(ExecutionError::OutOfGas) => Receipt { + exit_code: ExitCode::SYS_OUT_OF_GAS, + return_data: Default::default(), + gas_used, + events_root, + }, + Err(ExecutionError::Syscall(err)) => { + // Errors indicate the message couldn't be dispatched at all + // (as opposed to failing during execution of the receiving actor). + // These errors are mapped to exit codes that persist on chain. + let exit_code = match err.1 { + ErrorNumber::InsufficientFunds => ExitCode::SYS_INSUFFICIENT_FUNDS, + ErrorNumber::NotFound => ExitCode::SYS_INVALID_RECEIVER, + _ => ExitCode::SYS_ASSERTION_FAILED, + }; + + backtrace.begin(backtrace::Cause::from_syscall("send", "send", err)); + Receipt { + exit_code, + return_data: Default::default(), + gas_used, + events_root, + } + } + Err(ExecutionError::Fatal(err)) => { + // We produce a receipt with SYS_ASSERTION_FAILED exit code, and + // we consume the full gas amount so that, in case of a network- + // wide fatal errors, all nodes behave deterministically. + // + // We set the backtrace from the fatal error to aid diagnosis. + // Note that we use backtrace#set_cause instead of backtrace#begin + // because we want to retain the propagation chain that we've + // accumulated on the way out. + let err = err.context(format!( + "[from={}, to={}, seq={}, m={}, h={}]", + msg.from, + msg.to, + msg.sequence, + msg.method_num, + self.context().epoch, + )); + backtrace.set_cause(backtrace::Cause::from_fatal(err)); + Receipt { + exit_code: ExitCode::SYS_ASSERTION_FAILED, + return_data: Default::default(), + gas_used: msg.gas_limit, + events_root, + } + } + }; + + let failure_info = if backtrace.is_empty() || receipt.exit_code.is_success() { + None + } else { + Some(ApplyFailure::MessageBacktrace(backtrace)) + }; + + match apply_kind { + ApplyKind::Explicit => self.finish_message( + sender_id, + sponsor_id, + msg, + receipt, + failure_info, + gas_costs, + exec_trace, + events, + ), + ApplyKind::Implicit => Ok(ApplyRet { + msg_receipt: receipt, + penalty: TokenAmount::zero(), + miner_tip: TokenAmount::zero(), + base_fee_burn: TokenAmount::zero(), + over_estimation_burn: TokenAmount::zero(), + refund: TokenAmount::zero(), + gas_refund: 0, + gas_burned: 0, + failure_info, + exec_trace, + events, + }), + } + } + + // TODO: The return type here is very strange because we have three cases: + // 1. Continue: Return sender ID, & gas. + // 2. Short-circuit: Return ApplyRet. + // 3. Fail: Return an error. + // We could use custom types, but that would be even more annoying. + fn preflight_message( + &mut self, + msg: &Message, + apply_kind: ApplyKind, + raw_length: usize, + ) -> Result, GasAmounts, GasCharge), ApplyRet>> { + msg.check().or_fatal()?; + + // TODO We don't like having price lists _inside_ the FVM, but passing + // these across the boundary is also a no-go. + let pl = &self.context().price_list; + + let (inclusion_cost, miner_penalty_amount) = match apply_kind { + ApplyKind::Implicit => ( + GasCharge::new("none", Gas::zero(), Gas::zero()), + Default::default(), + ), + ApplyKind::Explicit => { + let inclusion_cost = pl.on_chain_message(raw_length); + let inclusion_total = inclusion_cost.total().round_up(); + + // Verify the cost of the message is not over the message gas limit. + if inclusion_total > msg.gas_limit { + return Ok(Err(ApplyRet::prevalidation_fail( + ExitCode::SYS_OUT_OF_GAS, + format!("Out of gas ({} > {})", inclusion_total, msg.gas_limit), + &self.context().base_fee * inclusion_total, + ))); + } + + let miner_penalty_amount = &self.context().base_fee * msg.gas_limit; + (inclusion_cost, miner_penalty_amount) + } + }; + + // Load sender actor state. + let sender_id = match self + .state_tree() + .lookup_id(&msg.from) + .with_context(|| format!("failed to lookup actor {}", &msg.from))? + { + Some(id) => id, + None => { + return Ok(Err(ApplyRet::prevalidation_fail( + ExitCode::SYS_SENDER_INVALID, + "Sender invalid", + miner_penalty_amount, + ))); + } + }; + + if apply_kind == ApplyKind::Implicit { + return Ok(Ok((sender_id, None, GasAmounts::default(), inclusion_cost))); + } + + let mut sender_state = match self + .state_tree() + .get_actor(sender_id) + .with_context(|| format!("failed to lookup actor {}", &msg.from))? + { + Some(act) => act, + None => { + return Ok(Err(ApplyRet::prevalidation_fail( + ExitCode::SYS_SENDER_INVALID, + "Sender invalid", + miner_penalty_amount, + ))); + } + }; + + // Sender is valid if it is: + // - an account actor + // - an Ethereum Externally Owned Address + // - a placeholder actor that has an f4 address in the EAM's namespace + + let mut sender_is_valid = self.builtin_actors().is_account_actor(&sender_state.code) + || self + .builtin_actors() + .is_ethaccount_actor(&sender_state.code); + + if self.builtin_actors().is_placeholder_actor(&sender_state.code) && + sender_state.sequence == 0 && + sender_state + .delegated_address + .map(|a| matches!(a.payload(), Payload::Delegated(da) if da.namespace() == EAM_ACTOR_ID)) + .unwrap_or(false) { + sender_is_valid = true; + sender_state.code = *self.builtin_actors().get_ethaccount_code(); + } + + if !sender_is_valid { + return Ok(Err(ApplyRet::prevalidation_fail( + ExitCode::SYS_SENDER_INVALID, + "Send not from valid sender", + miner_penalty_amount, + ))); + }; + + // Check sequence is correct + if msg.sequence != sender_state.sequence { + return Ok(Err(ApplyRet::prevalidation_fail( + ExitCode::SYS_SENDER_STATE_INVALID, + format!( + "Actor sequence invalid: {} != {}", + msg.sequence, sender_state.sequence + ), + miner_penalty_amount, + ))); + }; + + sender_state.sequence += 1; + + // Get sender's gas allowance for gas fees. + let gas_allowance = self.get_gas_allowance(msg.from)?; + + // Pre-resolve the message sponsor's address, if known. + let sponsor_id = if let Some(sponsor) = gas_allowance.sponsor { + self.state_tree() + .lookup_id(&sponsor) + .context("failure when looking up message sponsor")? + } else { + None + }; + + // Ensure from actor has enough balance to cover the gas cost of the message. + let total_gas_allowance = gas_allowance.total(); + let total_gas_cost: TokenAmount = msg.gas_fee_cap.clone() * msg.gas_limit; + let sender_balance = sender_state.balance.clone(); + if &total_gas_allowance + &sender_balance < total_gas_cost { + return Ok(Err(ApplyRet::prevalidation_fail( + ExitCode::SYS_SENDER_STATE_INVALID, + format!( + "Actor allowance plus balance less than needed: {} + {} < {}", + total_gas_allowance, sender_state.balance, total_gas_cost + ), + miner_penalty_amount, + ))); + } + let gas_costs = if total_gas_allowance.is_zero() { + // The sender is responsible for the entire gas cost + sender_state.deduct_funds(&total_gas_cost)?; + GasAmounts::new(total_gas_cost, TokenAmount::zero(), TokenAmount::zero()) + } else { + // Use the sender's gas allowance from the source actor + let mut source_state = + match self + .state_tree() + .get_actor(BLOBS_ACTOR_ID) + .with_context(|| { + format!( + "failed to lookup gas source actor with id {}", + BLOBS_ACTOR_ID + ) + })? { + Some(act) => act, + None => { + return Ok(Err(ApplyRet::prevalidation_fail( + ExitCode::SYS_SENDER_INVALID, + "Sender invalid", + miner_penalty_amount, + ))); + } + }; + + // Check the source balance + if source_state.balance < total_gas_allowance { + // This should not happen + return Ok(Err(ApplyRet::prevalidation_fail( + ExitCode::SYS_SENDER_STATE_INVALID, + format!( + "Gas allowance source actor balance less than needed: {} < {}", + source_state.balance, total_gas_allowance + ), + miner_penalty_amount, + ))); + } + + let gas_costs = if total_gas_allowance < total_gas_cost { + // Deduct the entire allowance + source_state.deduct_funds(&total_gas_allowance)?; + // Deduct the remainder from sender + let sender_gas_cost = &total_gas_cost - &total_gas_allowance; + sender_state.deduct_funds(&sender_gas_cost)?; + // Consume entire allowance + GasAmounts::new( + sender_gas_cost, + gas_allowance.amount, + gas_allowance.sponsored_amount, + ) + } else { + // Deduct entire gas cost from source + source_state.deduct_funds(&total_gas_cost)?; + // Consume allowances + let (gas_cost, sponsored_gas_cost) = if gas_allowance.sponsored_amount.is_zero() { + // Consume from own allowance + (total_gas_cost, TokenAmount::zero()) + } else { + // Prioritize sponsor allowance when consuming + if gas_allowance.sponsored_amount > total_gas_cost { + // Consume from sponsored allowance + (TokenAmount::zero(), total_gas_cost) + } else { + // Consume entire sponsored allowance + ( + &total_gas_cost - &gas_allowance.sponsored_amount, + gas_allowance.sponsored_amount, + ) + } + }; + GasAmounts::new(TokenAmount::zero(), gas_cost, sponsored_gas_cost) + }; + + // Update the source actor in the state tree + self.state_tree_mut() + .set_actor(BLOBS_ACTOR_ID, source_state); + gas_costs + }; + + // Update the sender actor in the state tree + self.state_tree_mut().set_actor(sender_id, sender_state); + + // Debit gas costs (the unused amount will get refunded) + self.update_gas_allowance(msg.from, None, -gas_costs.from_allowance.clone())?; + self.update_gas_allowance( + msg.from, + gas_allowance.sponsor, + -gas_costs.from_sponsor_allowance.clone(), + )?; + + debug!( + from_balance = ?gas_costs.from_balance, + from_allowance = ?gas_costs.from_allowance, + from_sponsor_allowance = ?gas_costs.from_sponsor_allowance, + "calculated gas costs for tx from {} to {}", + msg.from, + msg.to + ); + + Ok(Ok((sender_id, sponsor_id, gas_costs, inclusion_cost))) + } + + #[allow(clippy::too_many_arguments)] + fn finish_message( + &mut self, + sender_id: ActorID, + sponsor_id: Option, + msg: Message, + receipt: Receipt, + failure_info: Option, + gas_costs: GasAmounts, + exec_trace: ExecutionTrace, + events: Vec, + ) -> Result { + // NOTE: we don't support old network versions in the FVM, so we always burn. + let gas_outputs = GasOutputs::compute( + receipt.gas_used, + msg.gas_limit, + &self.context().base_fee, + &msg.gas_fee_cap, + &msg.gas_premium, + ); + + debug!( + "gas outputs for tx from {} to {}: {:#?}", + msg.from, msg.to, gas_outputs + ); + + let GasOutputs { + base_fee_burn, + over_estimation_burn, + miner_penalty, + miner_tip, + refund, + gas_refund, + gas_burned, + } = gas_outputs; + + let mut transfer_to_actor = |addr: ActorID, amt: &TokenAmount| -> Result<()> { + if amt.is_negative() { + return Err(anyhow!("attempted to transfer negative value into actor")); + } + if amt.is_zero() { + return Ok(()); + } + + self.state_tree_mut() + .mutate_actor(addr, |act| act.deposit_funds(amt).or_fatal()) + .context("failed to lookup actor for transfer")?; + Ok(()) + }; + + transfer_to_actor(BURNT_FUNDS_ACTOR_ID, &base_fee_burn)?; + + transfer_to_actor(REWARD_ACTOR_ID, &miner_tip)?; + + transfer_to_actor(BURNT_FUNDS_ACTOR_ID, &over_estimation_burn)?; + + let gas_refunds = gas_costs.refund(&refund); + transfer_to_actor(sender_id, &gas_refunds.from_balance)?; + transfer_to_actor( + BLOBS_ACTOR_ID, + &(&gas_refunds.from_allowance + &gas_refunds.from_sponsor_allowance), + )?; + + debug!( + balance_refund = ?gas_refunds.from_balance, + gas_refund = ?gas_refunds.from_allowance, + sponsor_gas_refund = ?gas_refunds.from_sponsor_allowance, + "calculated gas refunds for tx from {} to {}", + msg.from, + msg.to + ); + + if (&base_fee_burn + &over_estimation_burn + &refund + &miner_tip) != gas_costs.total() { + // Sanity check. This could be a fatal error. + return Err(anyhow!("Gas handling math is wrong")); + } + + // Refund gas difference + self.update_gas_allowance(msg.from, None, gas_refunds.from_allowance)?; + self.update_gas_allowance( + msg.from, + sponsor_id.map(Address::new_id), + gas_refunds.from_sponsor_allowance, + )?; + + Ok(ApplyRet { + msg_receipt: receipt, + penalty: miner_penalty, + miner_tip, + base_fee_burn, + over_estimation_burn, + refund, + gas_refund, + gas_burned, + failure_info, + exec_trace, + events, + }) + } + + fn map_machine(&mut self, f: F) -> T + where + F: FnOnce( + ::Machine, + ) -> (T, ::Machine), + { + replace_with::replace_with_and_return( + &mut self.machine, + || None, + |m| { + let (ret, machine) = f(m.unwrap()); + (ret, Some(machine)) + }, + ) + } + + /// Returns the gas allowance for the sender. + fn get_gas_allowance(&mut self, from: Address) -> Result { + let params = RawBytes::serialize(GetGasAllowanceParams(from))?; + + let msg = Message { + from: SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, // irrelevant for implicit executions + gas_limit: i64::MAX as u64, + method_num: GetGasAllowance as u64, + params, + value: Default::default(), + version: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + let apply_ret = self.execute_message(msg, ApplyKind::Implicit, 0)?; + if let Some(err) = apply_ret.failure_info { + bail!("failed to get gas allowance for {}: {}", from, err); + } + + fvm_ipld_encoding::from_slice::(&apply_ret.msg_receipt.return_data) + .context("failed to parse gas allowance") + } + + /// Updates gas allowance from the sender. + fn update_gas_allowance( + &mut self, + from: Address, + sponsor: Option
, + add_amount: TokenAmount, + ) -> Result<()> { + if add_amount.is_zero() { + return Ok(()); + } + + let params = RawBytes::serialize(UpdateGasAllowanceParams { + from, + sponsor, + add_amount: add_amount.clone(), + })?; + + let msg = Message { + from: SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, // irrelevant for implicit executions + gas_limit: i64::MAX as u64, + method_num: UpdateGasAllowance as u64, + params, + value: Default::default(), + version: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + let apply_ret = self.execute_message(msg, ApplyKind::Implicit, 0)?; + if let Some(err) = apply_ret.failure_info { + bail!( + "failed to update gas allowance for {} (amount: {}; sponsor: {:?}): {}", + from, + add_amount, + sponsor, + err + ); + } + + debug!( + "updated gas allowance for {} (amount: {}; sponsor: {:?})", + from, add_amount, sponsor + ); + + Ok(()) + } +} diff --git a/recall/executor/src/outputs.rs b/recall/executor/src/outputs.rs new file mode 100644 index 0000000000..a37cb47df2 --- /dev/null +++ b/recall/executor/src/outputs.rs @@ -0,0 +1,213 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::econ::TokenAmount; +use num_traits::Zero; + +#[derive(Clone, Debug, Default)] +pub(crate) struct GasAmounts { + pub from_balance: TokenAmount, + pub from_allowance: TokenAmount, + pub from_sponsor_allowance: TokenAmount, +} + +impl GasAmounts { + pub fn new( + from_balance: TokenAmount, + from_allowance: TokenAmount, + from_sponsor_allowance: TokenAmount, + ) -> Self { + Self { + from_balance, + from_allowance, + from_sponsor_allowance, + } + } + + pub fn total(&self) -> TokenAmount { + &self.from_balance + &self.from_allowance + &self.from_sponsor_allowance + } + + // Calculate refunds, prioritizing the sender + pub fn refund(&self, refund: &TokenAmount) -> GasAmounts { + if refund < &self.from_balance { + // The entire refund goes to the sender balance + GasAmounts::new(refund.clone(), TokenAmount::zero(), TokenAmount::zero()) + } else if refund < &(&self.from_balance + &self.from_allowance) { + // Cap the sender balance refund to its cost + // The remainder goes to the sender's gas credit + let remainder = refund - &self.from_balance; + GasAmounts::new(self.from_balance.clone(), remainder, TokenAmount::zero()) + } else { + // Cap the sender balance refund to its cost + // Cap the sender gas credit refund to its cost + // The remainder goes to the sponsor's gas credit + let remainder = refund - &self.from_balance - &self.from_allowance; + GasAmounts::new( + self.from_balance.clone(), + self.from_allowance.clone(), + remainder, + ) + } + } +} + +#[derive(Clone, Debug, Default)] +pub(crate) struct GasOutputs { + pub base_fee_burn: TokenAmount, + pub over_estimation_burn: TokenAmount, + pub miner_penalty: TokenAmount, + pub miner_tip: TokenAmount, + pub refund: TokenAmount, + + // In whole gas units. + pub gas_refund: u64, + pub gas_burned: u64, +} + +impl GasOutputs { + pub fn compute( + // In whole gas units. + gas_used: u64, + gas_limit: u64, + base_fee: &TokenAmount, + fee_cap: &TokenAmount, + gas_premium: &TokenAmount, + ) -> Self { + let mut base_fee_to_pay = base_fee; + + let mut out = GasOutputs::default(); + + if base_fee > fee_cap { + base_fee_to_pay = fee_cap; + out.miner_penalty = (base_fee - fee_cap) * gas_used + } + + out.base_fee_burn = base_fee_to_pay * gas_used; + + let mut miner_tip = gas_premium.clone(); + if &(base_fee_to_pay + &miner_tip) > fee_cap { + miner_tip = fee_cap - base_fee_to_pay; + } + out.miner_tip = &miner_tip * gas_limit; + + let (out_gas_refund, out_gas_burned) = compute_gas_overestimation_burn(gas_used, gas_limit); + out.gas_refund = out_gas_refund; + out.gas_burned = out_gas_burned; + + if out.gas_burned != 0 { + out.over_estimation_burn = base_fee_to_pay * out.gas_burned; + out.miner_penalty += (base_fee - base_fee_to_pay) * out.gas_burned; + } + let required_funds = fee_cap * gas_limit; + let refund = + required_funds - &out.base_fee_burn - &out.miner_tip - &out.over_estimation_burn; + out.refund = refund; + + out + } +} + +fn compute_gas_overestimation_burn(gas_used: u64, gas_limit: u64) -> (u64, u64) { + const GAS_OVERUSE_NUM: u128 = 11; + const GAS_OVERUSE_DENOM: u128 = 10; + + if gas_used == 0 { + return (0, gas_limit); + } + + // Convert to u128 to prevent overflow on multiply. + let gas_used = gas_used as u128; + let gas_limit = gas_limit as u128; + + // This burns (N-10)% (clamped at 0% and 100%) of the remaining gas where N is the + // overestimation percentage. + let over = gas_limit + .saturating_sub((GAS_OVERUSE_NUM * gas_used) / GAS_OVERUSE_DENOM) + .min(gas_used); + + // We handle the case where the gas used exceeds the gas limit, just in case. + let gas_remaining = gas_limit.saturating_sub(gas_used); + + // This computes the fraction of the "remaining" gas to burn and will never be greater than 100% + // of the remaining gas. + let gas_to_burn = (gas_remaining * over) / gas_used; + + // But... we use saturating sub, just in case. + let refund = gas_remaining.saturating_sub(gas_to_burn); + + (refund as u64, gas_to_burn as u64) +} + +// Adapted from lotus. +#[test] +fn overestimation_burn_test() { + fn do_test(used: u64, limit: u64, refund: u64, toburn: u64) { + let (computed_refund, computed_toburn) = compute_gas_overestimation_burn(used, limit); + assert_eq!(refund, computed_refund, "refund"); + assert_eq!(toburn, computed_toburn, "burned"); + } + + do_test(100, 200, 10, 90); + do_test(100, 150, 30, 20); + do_test(1_000, 1_300, 240, 60); + do_test(500, 700, 140, 60); + do_test(200, 200, 0, 0); + do_test(20_000, 21_000, 1_000, 0); + do_test(0, 2_000, 0, 2_000); + do_test(500, 651, 121, 30); + do_test(500, 5_000, 0, 4_500); + do_test(7_499_000_000, 7_500_000_000, 1_000_000, 0); + do_test(7_500_000_000 / 2, 7_500_000_000, 375_000_000, 3_375_000_000); + do_test(1, 7_500_000_000, 0, 7_499_999_999); +} + +#[test] +fn gas_outputs_test() { + #[allow(clippy::too_many_arguments)] + fn do_test( + used: u64, + limit: u64, + fee_cap: u64, + premium: u64, + base_fee_burn: u64, + over_estimation_burn: u64, + miner_penalty: u64, + miner_tip: u64, + refund: u64, + ) { + let base_fee = TokenAmount::from_atto(10); + let output = GasOutputs::compute( + used, + limit, + &base_fee, + &TokenAmount::from_atto(fee_cap), + &TokenAmount::from_atto(premium), + ); + assert_eq!( + TokenAmount::from_atto(base_fee_burn), + output.base_fee_burn, + "base_fee_burn" + ); + assert_eq!( + TokenAmount::from_atto(over_estimation_burn), + output.over_estimation_burn, + "over_estimation_burn" + ); + assert_eq!( + TokenAmount::from_atto(miner_penalty), + output.miner_penalty, + "miner_penalty" + ); + assert_eq!( + TokenAmount::from_atto(miner_tip), + output.miner_tip, + "miner_tip" + ); + assert_eq!(TokenAmount::from_atto(refund), output.refund, "refund"); + } + do_test(100, 110, 11, 1, 1_000, 0, 0, 110, 100); + do_test(100, 130, 11, 1, 1_000, 60, 0, 130, 240); + do_test(100, 110, 10, 1, 1_000, 0, 0, 0, 100); + do_test(100, 110, 6, 1, 600, 0, 400, 0, 60); +} diff --git a/recall/ipld/Cargo.toml b/recall/ipld/Cargo.toml new file mode 100644 index 0000000000..9d06cb9c47 --- /dev/null +++ b/recall/ipld/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "recall_ipld" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true } +fil_actors_runtime = { workspace = true } +fvm_ipld_amt = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_ipld_hamt = { workspace = true } +fvm_shared = { workspace = true } +fvm_sdk = { workspace = true } +integer-encoding = { workspace = true } +serde = { workspace = true, features = ["derive"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] \ No newline at end of file diff --git a/recall/ipld/src/amt.rs b/recall/ipld/src/amt.rs new file mode 100644 index 0000000000..f3116c91ef --- /dev/null +++ b/recall/ipld/src/amt.rs @@ -0,0 +1,9 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod core; +pub mod vec; + +pub use vec::Root; diff --git a/recall/ipld/src/amt/core.rs b/recall/ipld/src/amt/core.rs new file mode 100644 index 0000000000..2048d7ee39 --- /dev/null +++ b/recall/ipld/src/amt/core.rs @@ -0,0 +1,162 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Debug; + +use anyhow::anyhow; +use cid::Cid; +use fil_actors_runtime::{ActorError, AsActorError}; +use fvm_ipld_amt as amt; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::error::ExitCode; +use serde::de::DeserializeOwned; +use serde::Serialize; + +/// Wraps a HAMT to provide a convenient map API. +/// Any errors are returned with exit code indicating illegal state. +/// The name is not persisted in state, but adorns any error messages. +pub struct Vec +where + BS: Blockstore, + V: DeserializeOwned + Serialize, +{ + amt: amt::Amt, +} + +/// Configuration options for an AMT instance. +#[derive(Debug, Clone)] +pub struct Config { + /// The `bit_width` drives how wide and high the tree is going to be. + /// Each node in the tree will have `2^bit_width` number of slots for child nodes, + /// and consume `bit_width` number of bits from the hashed keys at each level. + pub bit_width: u32, +} + +impl Default for Config { + fn default() -> Self { + Self { + bit_width: AMT_BIT_WIDTH, + } + } +} + +pub const AMT_BIT_WIDTH: u32 = 5; + +pub const DEFAULT_AMT_CONFIG: Config = Config { + bit_width: AMT_BIT_WIDTH, +}; + +impl Vec +where + BS: Blockstore, + V: DeserializeOwned + Serialize, +{ + /// Creates a new, empty vec. + pub fn empty(store: BS, config: Config) -> Self { + Self { + amt: amt::Amt::new_with_bit_width(store, config.bit_width), + } + } + + /// Creates a new empty vec and flushes it to the store. + /// Returns the CID of the empty vec root. + pub fn flush_empty(store: BS, config: Config) -> Result { + Self::empty(store, config).flush() + } + + /// Loads a vec from the store. + pub fn load(store: BS, root: &Cid) -> Result { + Ok(Self { + amt: amt::Amt::load(root, store) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load AMT with root '{}'", root) + })?, + }) + } + + /// Flushes the vec's contents to the store. + /// Returns the root node CID. + pub fn flush(&mut self) -> Result { + self.amt + .flush() + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || "failed to flush AMT") + } + + /// Returns a reference to the value at the given index, if present. + pub fn get(&self, index: u64) -> Result, ActorError> { + self.amt + .get(index) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get from AMT at index {}", index) + }) + } + + /// Inserts a value into the vec at the given index. + pub fn set(&mut self, index: u64, value: V) -> Result<(), ActorError> + where + V: PartialEq, + { + self.amt + .set(index, value) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set AMT at index {}", index) + }) + } + + /// Deletes a value from the vec at the given index. + pub fn delete(&mut self, index: u64) -> Result, ActorError> { + self.amt + .delete(index) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete from AMT at index {}", index) + }) + } + + /// Returns the height of the vec. + pub fn height(&self) -> u32 { + self.amt.height() + } + + /// Returns count of elements in the vec. + pub fn count(&self) -> u64 { + self.amt.count() + } + + /// Iterates and runs a function over values in the vec starting at an index up to a limit. + /// Returns the index if there are more items. + pub fn for_each_while_ranged( + &self, + start_at: Option, + limit: Option, + mut f: F, + ) -> Result<(u64, Option), ActorError> + where + F: FnMut(u64, &V) -> Result, + { + match self + .amt + .for_each_while_ranged(start_at, limit, |i, v| f(i, v).map_err(|e| anyhow!(e))) + { + Ok((traversed, next)) => Ok((traversed, next)), + Err(amt_err) => self.map_amt_error(amt_err), + } + } + + fn map_amt_error(&self, amt_err: amt::Error) -> Result { + match amt_err { + amt::Error::Dynamic(e) => match e.downcast::() { + Ok(actor_error) => Err(actor_error), + Err(e) => Err(ActorError::illegal_state(format!( + "error in callback traversing AMT: {}", + e + ))), + }, + e => Err(ActorError::illegal_state(format!( + "error traversing AMT: {}", + e + ))), + } + } +} diff --git a/recall/ipld/src/amt/vec.rs b/recall/ipld/src/amt/vec.rs new file mode 100644 index 0000000000..5d0030c242 --- /dev/null +++ b/recall/ipld/src/amt/vec.rs @@ -0,0 +1,155 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::marker::PhantomData; + +use super::core::{Vec, DEFAULT_AMT_CONFIG}; + +#[derive(Clone, PartialEq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Root +where + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + cid: Cid, + #[serde(skip)] + value_type: PhantomData, +} + +impl Root +where + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + pub fn new(store: BS) -> Result { + Amt::::flush_empty(store) + } + + pub fn from_cid(cid: Cid) -> Self { + Self { + cid, + value_type: Default::default(), + } + } + + pub fn amt<'a, BS: Blockstore>(&self, store: BS) -> Result, ActorError> { + Amt::load(store, &self.cid) + } + + pub fn cid(&self) -> &Cid { + &self.cid + } +} + +pub struct Amt<'a, BS, V> +where + BS: Blockstore, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + vec: Vec, + _marker: PhantomData<&'a BS>, +} + +#[derive(Debug, Clone)] +pub struct TrackedFlushResult +where + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + pub root: Root, +} + +impl Amt<'_, BS, V> +where + BS: Blockstore, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + fn load(store: BS, root: &Cid) -> Result { + let vec = Vec::::load(store, root)?; + Ok(Self { + vec, + _marker: Default::default(), + }) + } + + pub fn get(&self, index: u64) -> Result, ActorError> { + self.vec.get(index).map(|value| value.cloned()) + } + + pub fn get_or_err(&self, index: u64) -> Result { + self.get(index)? + .ok_or_else(|| ActorError::not_found(format!("value at index {} not found", index))) + } + + pub fn set(&mut self, index: u64, value: V) -> Result<(), ActorError> { + self.vec.set(index, value) + } + + pub fn set_and_flush(&mut self, index: u64, value: V) -> Result, ActorError> { + self.set(index, value)?; + let cid = self.vec.flush()?; + Ok(Root::from_cid(cid)) + } + + pub fn set_and_flush_tracked( + &mut self, + index: u64, + value: V, + ) -> Result, ActorError> { + let root = self.set_and_flush(index, value)?; + Ok(TrackedFlushResult { root }) + } + + pub fn delete(&mut self, index: u64) -> Result, ActorError> { + self.vec.delete(index) + } + + pub fn delete_and_flush(&mut self, index: u64) -> Result, ActorError> { + self.delete(index)?; + let cid = self.vec.flush()?; + Ok(Root::from_cid(cid)) + } + + pub fn delete_and_flush_tracked( + &mut self, + index: u64, + ) -> Result, ActorError> { + let root = self.delete_and_flush(index)?; + Ok(TrackedFlushResult { root }) + } + + pub fn flush(&mut self) -> Result, ActorError> { + let cid = self.vec.flush()?; + Ok(Root::from_cid(cid)) + } + + pub fn flush_empty(store: BS) -> Result, ActorError> { + let cid = Vec::::flush_empty(store, DEFAULT_AMT_CONFIG)?; + Ok(Root::from_cid(cid)) + } + + pub fn height(&self) -> u32 { + self.vec.height() + } + + pub fn count(&self) -> u64 { + self.vec.count() + } + + pub fn for_each_while_ranged( + &self, + start_at: Option, + limit: Option, + mut f: F, + ) -> Result<(u64, Option), ActorError> + where + F: FnMut(u64, &V) -> Result, + { + self.vec.for_each_while_ranged(start_at, limit, &mut f) + } +} diff --git a/recall/ipld/src/hamt.rs b/recall/ipld/src/hamt.rs new file mode 100644 index 0000000000..1cb241d348 --- /dev/null +++ b/recall/ipld/src/hamt.rs @@ -0,0 +1,13 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod core; +pub mod map; + +pub use core::Map; +pub use core::MapKey; +pub use core::DEFAULT_HAMT_CONFIG; +pub use fvm_ipld_hamt::{BytesKey, Error}; +pub use map::Root; diff --git a/recall/ipld/src/hamt/core.rs b/recall/ipld/src/hamt/core.rs new file mode 100644 index 0000000000..c09029fa2f --- /dev/null +++ b/recall/ipld/src/hamt/core.rs @@ -0,0 +1,416 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Debug; +use std::marker::PhantomData; + +use crate::hamt::BytesKey; +use crate::Hasher; +use anyhow::anyhow; +use cid::Cid; +use fil_actors_runtime::{ActorError, AsActorError}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_hamt as hamt; +use fvm_ipld_hamt::Error; +use fvm_shared::address::Address; +use fvm_shared::error::ExitCode; +use integer_encoding::VarInt; +use serde::de::DeserializeOwned; +use serde::Serialize; + +/// Wraps a HAMT to provide a convenient map API. +/// Any errors are returned with exit code indicating illegal state. +/// The name is not persisted in state, but adorns any error messages. +pub struct Map +where + BS: Blockstore, + K: MapKey, + V: DeserializeOwned + Serialize, +{ + hamt: hamt::Hamt, + name: String, + key_type: PhantomData, +} + +pub trait MapKey: Sized + Debug { + fn from_bytes(b: &[u8]) -> Result; + fn to_bytes(&self) -> Result, String>; +} + +pub type Config = hamt::Config; + +pub const DEFAULT_HAMT_CONFIG: Config = Config { + bit_width: 5, + min_data_depth: 2, + max_array_width: 1, +}; + +impl Map +where + BS: Blockstore, + K: MapKey, + V: DeserializeOwned + Serialize, +{ + pub fn name(&self) -> String { + self.name.clone() + } + + /// Creates a new, empty map. + pub fn empty(store: BS, config: Config, name: String) -> Self { + Self { + hamt: hamt::Hamt::new_with_config(store, config), + name, + key_type: Default::default(), + } + } + + /// Creates a new empty map and flushes it to the store. + /// Returns the CID of the empty map root. + pub fn flush_empty(store: BS, config: Config) -> Result { + // This CID is constant regardless of the HAMT's configuration, so as an optimization, + // we could hard-code it and merely check it is already stored. + Self::empty(store, config, "empty".into()).flush() + } + + /// Loads a map from the store. + // There is no version of this method that doesn't take an explicit config parameter. + // The caller must know the configuration to interpret the HAMT correctly. + // Forcing them to provide it makes it harder to accidentally use an incorrect default. + pub fn load(store: BS, root: &Cid, config: Config, name: String) -> Result { + Ok(Self { + hamt: hamt::Hamt::load_with_config(root, store, config) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load HAMT '{}'", name) + })?, + name, + key_type: Default::default(), + }) + } + + /// Flushes the map's contents to the store. + /// Returns the root node CID. + pub fn flush(&mut self) -> Result { + self.hamt + .flush() + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to flush HAMT '{}'", self.name) + }) + } + + /// Returns a reference to the value associated with a key, if present. + pub fn get(&self, key: &K) -> Result, ActorError> { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .get(&k) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get key {key:?} from HAMT '{}'", self.name) + }) + } + + pub fn contains_key(&self, key: &K) -> Result { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .contains_key(&k) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to check key {key:?} in HAMT '{}'", self.name) + }) + } + + /// Inserts a key-value pair into the map. + /// Returns any value previously associated with the key. + pub fn set(&mut self, key: &K, value: V) -> Result, ActorError> + where + V: PartialEq, + { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .set(k.into(), value) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set key {key:?} in HAMT '{}'", self.name) + }) + } + + /// Inserts a key-value pair only if the key does not already exist. + /// Returns whether the map was modified (i.e. key was absent). + pub fn set_if_absent(&mut self, key: &K, value: V) -> Result + where + V: PartialEq, + { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .set_if_absent(k.into(), value) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set key {key:?} in HAMT '{}'", self.name) + }) + } + + pub fn delete(&mut self, key: &K) -> Result, ActorError> { + let k = key + .to_bytes() + .with_context_code(ExitCode::USR_ASSERTION_FAILED, || { + format!("invalid key {key:?}") + })?; + self.hamt + .delete(&k) + .map(|delete_result| delete_result.map(|(_k, v)| v)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete key {key:?} from HAMT '{}'", self.name) + }) + } + + /// Iterates over all key-value pairs in the map. + #[allow(clippy::blocks_in_conditions)] + pub fn for_each(&self, mut f: F) -> Result<(), ActorError> + where + // Note the result type of F uses ActorError. + // The implementation will extract and propagate any ActorError + // wrapped in a hamt::Error::Dynamic. + F: FnMut(K, &V) -> Result<(), ActorError>, + { + match self.hamt.for_each(|k, v| { + let key = K::from_bytes(k).context_code(ExitCode::USR_ILLEGAL_STATE, "invalid key")?; + f(key, v).map_err(|e| anyhow!(e)) + }) { + Ok(_) => Ok(()), + Err(hamt_err) => self.map_hamt_error(hamt_err), + } + } + + /// Iterates over key-value pairs in the map starting at a key up to a max. + /// Returns the next key if there are more items in the map. + #[allow(clippy::blocks_in_conditions)] + pub fn for_each_ranged( + &self, + starting_key: Option<&hamt::BytesKey>, + max: Option, + mut f: F, + ) -> Result<(usize, Option), ActorError> + where + // Note the result type of F uses ActorError. + // The implementation will extract and propagate any ActorError + // wrapped in a hamt::Error::Dynamic. + F: FnMut(K, &V) -> Result, + { + match self.inner_for_each_ranged(starting_key, max, |k, v| { + let key = K::from_bytes(k).context_code(ExitCode::USR_ILLEGAL_STATE, "invalid key")?; + f(key, v).map_err(|e| anyhow!(e)) + }) { + Ok((traversed, next)) => { + let next = if let Some(next) = next { + Some( + K::from_bytes(&next) + .context_code(ExitCode::USR_ILLEGAL_STATE, "invalid key")?, + ) + } else { + None + }; + Ok((traversed, next)) + } + Err(hamt_err) => self.map_hamt_error(hamt_err), + } + } + + fn inner_for_each_ranged( + &self, + starting_key: Option<&hamt::BytesKey>, + max: Option, + mut f: F, + ) -> Result<(usize, Option), Error> + where + F: FnMut(&hamt::BytesKey, &V) -> anyhow::Result, + { + let mut iter = match starting_key { + Some(key) => self.hamt.iter_from(key)?, + None => self.hamt.iter(), + } + .fuse(); + + let mut traversed = 0usize; + let limit = max.unwrap_or(usize::MAX); + loop { + if traversed >= limit { + break; + } + + match iter.next() { + Some(res) => { + let (k, v) = res?; + if !(f)(k, v)? { + continue; + } + traversed += 1; + } + None => break, + } + } + let next = iter.next().transpose()?.map(|kv| kv.0).cloned(); + Ok((traversed, next)) + } + + /// Iterates over key-value pairs in the map starting at a key up to an ending_key (included). + #[allow(clippy::blocks_in_conditions)] + pub fn for_each_until( + &self, + starting_key: Option<&hamt::BytesKey>, + ending_key: &hamt::BytesKey, + mut f: F, + ) -> Result<(), ActorError> + where + F: FnMut(K, &V) -> Result<(), ActorError>, + { + let iter = match starting_key { + Some(key) => self.hamt.iter_from(key).map_err(|error| { + ActorError::illegal_state(format!("error traversing HAMT {}: {}", self.name, error)) + })?, + None => self.hamt.iter(), + }; + for res in iter.fuse().by_ref() { + match res { + Ok((k, v)) => { + if k.le(ending_key) { + let k = K::from_bytes(k) + .context_code(ExitCode::USR_ILLEGAL_STATE, "invalid key")?; + f(k, v)?; + } + } + Err(hamt_err) => { + return self.map_hamt_error(hamt_err); + } + } + } + Ok(()) + } + + pub fn iter(&self) -> hamt::Iter { + self.hamt.iter() + } + + pub fn is_empty(&self) -> bool { + self.hamt.is_empty() + } + + fn map_hamt_error(&self, hamt_err: hamt::Error) -> Result { + match hamt_err { + hamt::Error::Dynamic(e) => match e.downcast::() { + Ok(actor_error) => Err(actor_error), + Err(e) => Err(ActorError::illegal_state(format!( + "error in callback traversing HAMT {}: {}", + self.name, e + ))), + }, + e => Err(ActorError::illegal_state(format!( + "error traversing HAMT {}: {}", + self.name, e + ))), + } + } +} + +impl MapKey for Vec { + fn from_bytes(b: &[u8]) -> Result { + Ok(b.to_vec()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.clone()) + } +} + +impl MapKey for String { + fn from_bytes(b: &[u8]) -> Result { + String::from_utf8(b.to_vec()).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.as_bytes().to_vec()) + } +} + +impl MapKey for u64 { + fn from_bytes(b: &[u8]) -> Result { + if let Some((result, size)) = VarInt::decode_var(b) { + if size != b.len() { + return Err(format!("trailing bytes after varint in {:?}", b)); + } + Ok(result) + } else { + Err(format!("failed to decode varint in {:?}", b)) + } + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.encode_var_vec()) + } +} + +impl MapKey for i64 { + fn from_bytes(b: &[u8]) -> Result { + if let Some((result, size)) = VarInt::decode_var(b) { + if size != b.len() { + return Err(format!("trailing bytes after varint in {:?}", b)); + } + Ok(result) + } else { + Err(format!("failed to decode varint in {:?}", b)) + } + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.encode_var_vec()) + } +} + +impl MapKey for Address { + fn from_bytes(b: &[u8]) -> Result { + Address::from_bytes(b).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(Address::to_bytes(*self)) + } +} + +impl MapKey for Cid { + fn from_bytes(b: &[u8]) -> Result { + Cid::try_from(b).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.to_bytes()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use fvm_ipld_blockstore::MemoryBlockstore; + + #[test] + fn basic_put_get() { + let bs = MemoryBlockstore::new(); + let mut m = Map::<_, u64, String>::empty(bs, DEFAULT_HAMT_CONFIG, "empty".into()); + m.set(&1234, "1234".to_string()).unwrap(); + assert!(m.get(&2222).unwrap().is_none()); + assert_eq!(&"1234".to_string(), m.get(&1234).unwrap().unwrap()); + } + + #[test] + fn for_each_callback_exitcode_propagates() { + let bs = MemoryBlockstore::new(); + let mut m = Map::<_, u64, String>::empty(bs, DEFAULT_HAMT_CONFIG, "empty".into()); + m.set(&1234, "1234".to_string()).unwrap(); + let res = m.for_each(|_, _| Err(ActorError::forbidden("test".to_string()))); + assert!(res.is_err()); + assert_eq!(res.unwrap_err(), ActorError::forbidden("test".to_string())); + } +} diff --git a/recall/ipld/src/hamt/map.rs b/recall/ipld/src/hamt/map.rs new file mode 100644 index 0000000000..10ecb3608a --- /dev/null +++ b/recall/ipld/src/hamt/map.rs @@ -0,0 +1,248 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Display; +use std::marker::PhantomData; + +use cid::Cid; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_hamt::{BytesKey, Iter}; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use super::core::{Map, MapKey, DEFAULT_HAMT_CONFIG}; +use crate::Hasher; + +#[derive(Clone, PartialEq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Root +where + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + cid: Cid, + name: String, + #[serde(skip)] + key_type: PhantomData, + #[serde(skip)] + value_type: PhantomData, +} + +impl Root +where + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + pub fn new(store: BS, name: &str) -> Result { + Hamt::::flush_empty(store, name.to_owned()) + } + + pub fn from_cid(cid: Cid, name: String) -> Self { + Self { + cid, + name, + key_type: Default::default(), + value_type: Default::default(), + } + } + + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + size: u64, + ) -> Result, ActorError> { + Hamt::load(store, &self.cid, self.name.clone(), size) + } + + pub fn cid(&self) -> &Cid { + &self.cid + } + + pub fn name(&self) -> &str { + &self.name + } +} + +pub struct Hamt<'a, BS, K, V> +where + BS: Blockstore, + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + map: Map, + size: u64, + _marker: PhantomData<&'a BS>, +} + +#[derive(Debug, Clone)] +pub struct TrackedFlushResult +where + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + pub root: Root, + pub size: u64, +} + +impl Hamt<'_, BS, K, V> +where + BS: Blockstore, + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + fn load(store: BS, root: &Cid, name: String, size: u64) -> Result { + let map = Map::::load(store, root, DEFAULT_HAMT_CONFIG, name)?; + Ok(Self { + map, + size, + _marker: Default::default(), + }) + } + + pub fn get(&self, key: &K) -> Result, ActorError> { + self.map.get(key).map(|value| value.cloned()) + } + + pub fn set(&mut self, key: &K, value: V) -> Result, ActorError> { + let previous = self.map.set(key, value)?; + if previous.is_none() { + self.size = self.size.saturating_add(1); + } + Ok(previous) + } + + pub fn set_if_absent(&mut self, key: &K, value: V) -> Result { + let was_absent = self.map.set_if_absent(key, value.clone())?; + if was_absent { + self.size = self.size.saturating_add(1); + } + Ok(was_absent) + } + + pub fn set_and_flush(&mut self, key: &K, value: V) -> Result, ActorError> { + self.set(key, value)?; + let cid = self.map.flush()?; + Ok(Root::from_cid(cid, self.map.name())) + } + + pub fn set_and_flush_tracked( + &mut self, + key: &K, + value: V, + ) -> Result, ActorError> { + let root = self.set_and_flush(key, value)?; + Ok(TrackedFlushResult { + root, + size: self.size, + }) + } + + pub fn get_or_err(&self, key: &K) -> Result { + self.get(key)?.ok_or_else(|| { + ActorError::not_found(format!("{} not found in {}", key, self.map.name())) + }) + } + + pub fn get_or_create(&self, key: &K, create_fn: F) -> Result + where + F: FnOnce() -> Result, + { + if let Some(value) = self.map.get(key)? { + Ok(value.clone()) + } else { + Ok(create_fn()?) + } + } + + pub fn contains_key(&self, key: &K) -> Result { + self.map.contains_key(key) + } + + pub fn delete(&mut self, key: &K) -> Result, ActorError> { + let deleted = self.map.delete(key)?; + if deleted.is_some() { + self.size = self.size.saturating_sub(1); + } + Ok(deleted) + } + + pub fn delete_and_flush(&mut self, key: &K) -> Result<(Root, Option), ActorError> { + let deleted = self.delete(key)?; + let cid = self.map.flush()?; + Ok((Root::from_cid(cid, self.map.name()), deleted)) + } + + pub fn delete_and_flush_tracked( + &mut self, + key: &K, + ) -> Result<(TrackedFlushResult, Option), ActorError> { + let (root, deleted) = self.delete_and_flush(key)?; + Ok(( + TrackedFlushResult { + root, + size: self.size, + }, + deleted, + )) + } + + pub fn flush(&mut self) -> Result, ActorError> { + let cid = self.map.flush()?; + Ok(Root::from_cid(cid, self.map.name())) + } + + pub fn flush_empty(store: BS, name: String) -> Result, ActorError> { + let cid = Map::::flush_empty(store, DEFAULT_HAMT_CONFIG)?; + Ok(Root::from_cid(cid, name)) + } + + pub fn flush_tracked(&mut self) -> Result, ActorError> { + let root = self.flush()?; + Ok(TrackedFlushResult { + root, + size: self.size, + }) + } + + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + pub fn for_each(&self, mut f: F) -> Result<(), ActorError> + where + F: FnMut(K, &V) -> Result<(), ActorError>, + { + self.map.for_each(&mut f) + } + + pub fn for_each_ranged( + &self, + starting_key: Option<&BytesKey>, + max: Option, + mut f: F, + ) -> Result<(usize, Option), ActorError> + where + F: FnMut(K, &V) -> Result, + { + self.map.for_each_ranged(starting_key, max, &mut f) + } + + pub fn for_each_until( + &self, + starting_key: Option<&BytesKey>, + ending_key: &BytesKey, + mut f: F, + ) -> Result<(), ActorError> + where + F: FnMut(K, &V) -> Result<(), ActorError>, + { + self.map.for_each_until(starting_key, ending_key, &mut f) + } + + pub fn iter(&self) -> Iter { + self.map.iter() + } +} diff --git a/recall/ipld/src/hash_algorithm.rs b/recall/ipld/src/hash_algorithm.rs new file mode 100644 index 0000000000..a72e58166d --- /dev/null +++ b/recall/ipld/src/hash_algorithm.rs @@ -0,0 +1,44 @@ +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +// use fvm_ipld_hamt::{Hash, HashAlgorithm, HashedKey}; +use fvm_ipld_hamt::{Hash, HashAlgorithm}; +use fvm_sdk as fvm; +use fvm_shared::crypto::hash::SupportedHashes; +use std::hash::Hasher; + +pub type HashedKey = [u8; 32]; + +#[derive(Default)] +struct RuntimeHasherWrapper(pub Vec); + +/// This Hasher impl only intercepts key bytes. Is used only together with FvmHashSha256 below. +impl Hasher for RuntimeHasherWrapper { + fn finish(&self) -> u64 { + // u64 hash not used in hamt + 0 + } + + fn write(&mut self, bytes: &[u8]) { + self.0.extend_from_slice(bytes); + } +} + +#[derive(Default, Debug)] +pub struct FvmHashSha256; + +impl HashAlgorithm for FvmHashSha256 { + fn hash(key: &X) -> HashedKey + where + X: Hash + ?Sized, + { + let mut rval_digest: HashedKey = Default::default(); + let mut hasher = RuntimeHasherWrapper::default(); + key.hash(&mut hasher); + + fvm::crypto::hash_into(SupportedHashes::Sha2_256, &hasher.0, &mut rval_digest); + + rval_digest + } +} diff --git a/recall/ipld/src/lib.rs b/recall/ipld/src/lib.rs new file mode 100644 index 0000000000..b6aef499aa --- /dev/null +++ b/recall/ipld/src/lib.rs @@ -0,0 +1,19 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +#[cfg(feature = "fil-actor")] +use crate::hash_algorithm::FvmHashSha256; +#[cfg(not(feature = "fil-actor"))] +use fvm_ipld_hamt::Sha256; + +pub mod amt; +pub mod hamt; +mod hash_algorithm; + +#[cfg(feature = "fil-actor")] +type Hasher = FvmHashSha256; + +#[cfg(not(feature = "fil-actor"))] +type Hasher = Sha256; diff --git a/recall/iroh_manager/Cargo.toml b/recall/iroh_manager/Cargo.toml new file mode 100644 index 0000000000..623d4ed6ed --- /dev/null +++ b/recall/iroh_manager/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "iroh_manager" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +anyhow = { workspace = true } +iroh = { workspace = true } +iroh-blobs = { workspace = true } +iroh-quinn = { workspace = true } +iroh-relay = { workspace = true } +n0-future = { workspace = true } +num-traits = { workspace = true } +quic-rpc = { workspace = true, features = ["quinn-transport", "test-utils"] } +tokio = { workspace = true } +tracing = { workspace = true } +url = { workspace = true } + +[dev-dependencies] +tempfile = { workspace = true } +tracing-subscriber = { workspace = true } diff --git a/recall/iroh_manager/src/lib.rs b/recall/iroh_manager/src/lib.rs new file mode 100644 index 0000000000..10becf887c --- /dev/null +++ b/recall/iroh_manager/src/lib.rs @@ -0,0 +1,70 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::{anyhow, Result}; +use iroh_blobs::hashseq::HashSeq; +use iroh_blobs::rpc::client::blobs::BlobStatus; +use iroh_blobs::Hash; +use num_traits::Zero; + +mod manager; +mod node; + +pub use self::manager::{connect as connect_rpc, BlobsRpcClient, IrohManager}; +pub use self::node::IrohNode; +pub use quic_rpc::Connector; + +pub type BlobsClient = iroh_blobs::rpc::client::blobs::Client; + +/// Returns the user blob hash and size from the hash sequence. +/// The user blob hash is the first hash in the sequence. +pub async fn get_blob_hash_and_size( + iroh: &BlobsClient, + seq_hash: Hash, +) -> Result<(Hash, u64), anyhow::Error> { + // Get the hash sequence status (it needs to be available) + let status = iroh.status(seq_hash).await.map_err(|e| { + anyhow!( + "failed to get status for hash sequence object: {} {}", + seq_hash, + e + ) + })?; + let BlobStatus::Complete { size } = status else { + return Err(anyhow!( + "hash sequence object {} is not available", + seq_hash + )); + }; + if size.is_zero() { + return Err(anyhow!("hash sequence object {} has zero size", seq_hash)); + } + + // Read the bytes and create a hash sequence + let res = iroh + .read_to_bytes(seq_hash) + .await + .map_err(|e| anyhow!("failed to read hash sequence object: {} {}", seq_hash, e))?; + let hash_seq = HashSeq::try_from(res) + .map_err(|e| anyhow!("failed to parse hash sequence object: {} {}", seq_hash, e))?; + + // Get the user blob status at index 0 (it needs to be available) + let blob_hash = hash_seq.get(0).ok_or_else(|| { + anyhow!( + "failed to get hash with index 0 from hash sequence object: {}", + seq_hash + ) + })?; + let status = iroh + .status(blob_hash) + .await + .map_err(|e| anyhow!("failed to read object: {} {}", blob_hash, e))?; + + // Finally, get the size from the status + let BlobStatus::Complete { size } = status else { + return Err(anyhow!("object {} is not available", blob_hash)); + }; + + Ok((blob_hash, size)) +} diff --git a/recall/iroh_manager/src/manager.rs b/recall/iroh_manager/src/manager.rs new file mode 100644 index 0000000000..af206e3be1 --- /dev/null +++ b/recall/iroh_manager/src/manager.rs @@ -0,0 +1,140 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::path::Path; + +use anyhow::Result; +use iroh_blobs::rpc::proto::RpcService; +use n0_future::task::AbortOnDropHandle; +use quic_rpc::client::QuinnConnector; +use tracing::info; + +use crate::{BlobsClient, IrohNode}; + +#[derive(Debug)] +pub struct IrohManager { + client: IrohNode, + server_key: Vec, + rpc_addr: SocketAddr, + _rpc_task: AbortOnDropHandle<()>, +} + +impl IrohManager { + pub async fn new( + v4_addr: Option, + v6_addr: Option, + path: impl AsRef, + rpc_addr: Option, + ) -> Result { + let storage_path = path.as_ref().to_path_buf(); + let client = IrohNode::persistent(v4_addr, v6_addr, &storage_path).await?; + + // setup an RPC listener + let rpc_addr = rpc_addr.unwrap_or_else(|| "127.0.0.1:0".parse().unwrap()); + + let (config, server_key) = quic_rpc::transport::quinn::configure_server()?; + let endpoint = iroh_quinn::Endpoint::server(config, rpc_addr)?; + let local_addr = endpoint.local_addr()?; + + info!("Iroh RPC listening on {} ({})", local_addr, rpc_addr); + let rpc_server = quic_rpc::transport::quinn::QuinnListener::new(endpoint)?; + let rpc_server = quic_rpc::RpcServer::::new(rpc_server); + let blobs = client.blobs.clone(); + let rpc_task = rpc_server + .spawn_accept_loop(move |msg, chan| blobs.clone().handle_rpc_request(msg, chan)); + + Ok(Self { + client, + server_key, + rpc_addr: local_addr, + _rpc_task: rpc_task, + }) + } + + /// Retrives a blob client, and starts the node if it has not started yet. + pub fn blobs_client(&self) -> BlobsClient { + self.client.blobs_client().boxed() + } + + /// Returns the key for the RPC client. + pub fn rpc_key(&self) -> &[u8] { + &self.server_key + } + + pub fn rpc_addr(&self) -> SocketAddr { + self.rpc_addr + } +} + +pub type BlobsRpcClient = iroh_blobs::rpc::client::blobs::Client>; + +/// Connect to the given rpc listening on this address, with this key. +pub async fn connect(remote_addr: SocketAddr) -> Result { + info!("iroh RPC connecting to {}", remote_addr); + let bind_addr: SocketAddr = "0.0.0.0:0".parse()?; + let client = quic_rpc::transport::quinn::make_insecure_client_endpoint(bind_addr)?; + let client = QuinnConnector::::new(client, remote_addr, "localhost".to_string()); + let client = quic_rpc::RpcClient::::new(client); + let client = iroh_blobs::rpc::client::blobs::Client::new(client); + Ok(client.boxed()) +} + +#[cfg(test)] +mod tests { + use n0_future::StreamExt; + + use super::*; + + #[tokio::test] + async fn test_append_delete() -> Result<()> { + tracing_subscriber::fmt().init(); + let dir = tempfile::tempdir()?; + + let iroh = IrohManager::new(None, None, dir.path(), None).await?; + + let tags: Vec<_> = (0..10).map(|i| format!("tag-{i}")).collect(); + + for tag in &tags { + iroh.blobs_client() + .add_bytes_named(format!("content-for-{tag}"), tag.as_bytes()) + .await?; + } + + let existing_tags: Vec<_> = iroh + .blobs_client() + .tags() + .list() + .await? + .try_collect() + .await?; + assert_eq!(existing_tags.len(), 10); + + let t = tags.clone(); + let rpc_addr = iroh.rpc_addr(); + let task = tokio::task::spawn(async move { + let client = connect(rpc_addr).await?; + + for tag in t { + client.tags().delete(tag).await?; + } + + anyhow::Ok(()) + }); + + task.await??; + + let existing_tags: Vec<_> = iroh + .blobs_client() + .tags() + .list() + .await? + .try_collect() + .await?; + dbg!(&existing_tags); + assert_eq!(existing_tags.len(), 0); + + Ok(()) + } +} diff --git a/recall/iroh_manager/src/node.rs b/recall/iroh_manager/src/node.rs new file mode 100644 index 0000000000..56775a757a --- /dev/null +++ b/recall/iroh_manager/src/node.rs @@ -0,0 +1,208 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; +use std::path::Path; +use std::time::Duration; + +use anyhow::Result; +use iroh::{ + defaults::DEFAULT_STUN_PORT, protocol::Router, Endpoint, RelayMap, RelayMode, RelayNode, +}; +use iroh_blobs::{ + net_protocol::Blobs, rpc::proto::RpcService, store::GcConfig, util::fs::load_secret_key, +}; +use iroh_relay::RelayQuicConfig; +use quic_rpc::server::{ChannelTypes, RpcChannel, RpcServerError}; +use tracing::info; +use url::Url; + +use crate::BlobsClient; + +/// Wrapper around and iroh `Endpoint` and the functionality +/// to handle blobs. +#[derive(Debug, Clone)] +pub struct IrohNode { + router: Router, + pub(crate) blobs: BlobsWrapper, +} + +#[derive(Debug, Clone)] +pub(crate) enum BlobsWrapper { + Mem { + blobs: Blobs, + client: BlobsClient, + }, + Fs { + blobs: Blobs, + client: BlobsClient, + }, +} + +impl BlobsWrapper { + fn client(&self) -> &BlobsClient { + match self { + BlobsWrapper::Mem { ref client, .. } => client, + BlobsWrapper::Fs { ref client, .. } => client, + } + } + + pub(crate) async fn handle_rpc_request( + self, + msg: iroh_blobs::rpc::proto::Request, + chan: RpcChannel, + ) -> std::result::Result<(), RpcServerError> + where + C: ChannelTypes, + { + match self { + BlobsWrapper::Mem { blobs, .. } => blobs.handle_rpc_request(msg, chan).await, + BlobsWrapper::Fs { blobs, .. } => blobs.handle_rpc_request(msg, chan).await, + } + } +} + +/// GC interval duration. +const GC_DURATION: Duration = Duration::from_secs(300); + +const DEFAULT_PORT_V4: u16 = 11204; +const DEFAULT_PORT_V6: u16 = 11205; + +/// Hostname of the default USE relay. +pub const USE_RELAY_HOSTNAME: &str = "use1-1.relay.recallnet.recall.iroh.link."; +/// Hostname of the default USW relay. +pub const USW_RELAY_HOSTNAME: &str = "usw1-1.relay.recallnet.recall.iroh.link."; +/// Hostname of the default EUC relay. +pub const EUC_RELAY_HOSTNAME: &str = "euc1-1.relay.recallnet.recall.iroh.link."; + +/// Get the default [`RelayMap`]. +pub fn default_relay_map() -> RelayMap { + RelayMap::from_iter([ + default_use_relay_node(), + default_usw_relay_node(), + default_euc_relay_node(), + ]) +} + +/// Get the default [`RelayNode`] for USE. +pub fn default_use_relay_node() -> RelayNode { + let url: Url = format!("https://{USE_RELAY_HOSTNAME}") + .parse() + .expect("default url"); + RelayNode { + url: url.into(), + stun_only: false, + stun_port: DEFAULT_STUN_PORT, + quic: Some(RelayQuicConfig::default()), + } +} + +/// Get the default [`RelayNode`] for USW. +pub fn default_usw_relay_node() -> RelayNode { + let url: Url = format!("https://{USW_RELAY_HOSTNAME}") + .parse() + .expect("default_url"); + RelayNode { + url: url.into(), + stun_only: false, + stun_port: DEFAULT_STUN_PORT, + quic: Some(RelayQuicConfig::default()), + } +} + +/// Get the default [`RelayNode`] for EUC +pub fn default_euc_relay_node() -> RelayNode { + // The default Asia-Pacific relay server run by number0. + let url: Url = format!("https://{EUC_RELAY_HOSTNAME}") + .parse() + .expect("default_url"); + RelayNode { + url: url.into(), + stun_only: false, + stun_port: DEFAULT_STUN_PORT, + quic: Some(RelayQuicConfig::default()), + } +} + +impl IrohNode { + /// Creates a new persistent iroh node in the specified location. + /// + /// If the addrs are set to `None` will bind to the unspecified network addr + /// on port `0`, aka a randomport. + pub async fn persistent( + v4_addr: Option, + v6_addr: Option, + path: impl AsRef, + ) -> Result { + // TODO: enable metrics + + let root = path.as_ref(); + info!("creating persistent iroh node in {}", root.display()); + + let blobs_path = root.join("blobs"); + let secret_key_path = root.join("iroh_key"); + + tokio::fs::create_dir_all(&blobs_path).await?; + let secret_key = load_secret_key(secret_key_path).await?; + + let v4 = + v4_addr.unwrap_or_else(|| SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_PORT_V4)); + let v6 = v6_addr + .unwrap_or_else(|| SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, DEFAULT_PORT_V6, 0, 0)); + + let endpoint = Endpoint::builder() + .discovery_n0() + .relay_mode(RelayMode::Custom(default_relay_map())) + .secret_key(secret_key) + .bind_addr_v4(v4) + .bind_addr_v6(v6) + .bind() + .await?; + let blobs = Blobs::persistent(path).await?.build(&endpoint); + blobs.start_gc(GcConfig { + period: GC_DURATION, + done_callback: None, + })?; + + let router = Router::builder(endpoint) + .accept(iroh_blobs::ALPN, blobs.clone()) + .spawn(); + + let client = blobs.client().boxed(); + Ok(Self { + router, + blobs: BlobsWrapper::Fs { blobs, client }, + }) + } + + /// Creates a new in memory based iroh node. + pub async fn memory() -> Result { + info!("creating inmemory iroh node"); + let endpoint = Endpoint::builder().discovery_n0().bind().await?; + let blobs = Blobs::memory().build(&endpoint); + blobs.start_gc(GcConfig { + period: GC_DURATION, + done_callback: None, + })?; + + let router = Router::builder(endpoint) + .accept(iroh_blobs::ALPN, blobs.clone()) + .spawn(); + let client = blobs.client().boxed(); + Ok(Self { + router, + blobs: BlobsWrapper::Mem { blobs, client }, + }) + } + + /// Returns the [`Endpoint`] for this node. + pub fn endpoint(&self) -> &Endpoint { + self.router.endpoint() + } + + /// Returns the blobs client, necessary to interact with the blobs API: + pub fn blobs_client(&self) -> &BlobsClient { + self.blobs.client() + } +} diff --git a/recall/kernel/Cargo.toml b/recall/kernel/Cargo.toml new file mode 100644 index 0000000000..386962a67c --- /dev/null +++ b/recall/kernel/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "recall_kernel" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +ambassador = { workspace = true } +anyhow = { workspace = true } +fvm = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_shared = { workspace = true } + +recall_kernel_ops = { path = "./ops" } +recall_syscalls = { path = "../syscalls" } diff --git a/recall/kernel/ops/Cargo.toml b/recall/kernel/ops/Cargo.toml new file mode 100644 index 0000000000..cb097829f5 --- /dev/null +++ b/recall/kernel/ops/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "recall_kernel_ops" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +fvm = { workspace = true } diff --git a/recall/kernel/ops/src/lib.rs b/recall/kernel/ops/src/lib.rs new file mode 100644 index 0000000000..ee9be59b54 --- /dev/null +++ b/recall/kernel/ops/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm::kernel::prelude::Cid; +use fvm::kernel::Result; + +pub trait RecallOps { + fn block_add(&mut self, cid: Cid, data: &[u8]) -> Result<()>; +} diff --git a/recall/kernel/src/lib.rs b/recall/kernel/src/lib.rs new file mode 100644 index 0000000000..dd05c61255 --- /dev/null +++ b/recall/kernel/src/lib.rs @@ -0,0 +1,132 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use ambassador::Delegate; +use fvm::call_manager::CallManager; +use fvm::gas::Gas; +use fvm::kernel::prelude::*; +use fvm::kernel::{ + ActorOps, CryptoOps, DebugOps, EventOps, IpldBlockOps, MessageOps, NetworkOps, RandomnessOps, + SelfOps, SendOps, SyscallHandler, UpgradeOps, +}; +use fvm::kernel::{ClassifyResult, Result}; +use fvm::syscalls::Linker; +use fvm::DefaultKernel; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::clock::ChainEpoch; +use fvm_shared::randomness::RANDOMNESS_LENGTH; +use fvm_shared::sys::out::network::NetworkContext; +use fvm_shared::sys::out::vm::MessageContext; +use fvm_shared::{address::Address, econ::TokenAmount, ActorID, MethodNum}; +use recall_kernel_ops::RecallOps; + +#[allow(clippy::duplicated_attributes)] +#[derive(Delegate)] +#[delegate(ActorOps, where = "C: CallManager")] +#[delegate(SendOps < K >, generics = "K", where = "K: Kernel")] +#[delegate(UpgradeOps < K >, generics = "K", where = "K: Kernel")] +#[delegate(IpldBlockOps, where = "C: CallManager")] +#[delegate(CryptoOps, where = "C: CallManager")] +#[delegate(DebugOps, where = "C: CallManager")] +#[delegate(EventOps, where = "C: CallManager")] +#[delegate(MessageOps, where = "C: CallManager")] +#[delegate(NetworkOps, where = "C: CallManager")] +#[delegate(RandomnessOps, where = "C: CallManager")] +#[delegate(SelfOps, where = "C: CallManager")] +pub struct RecallKernel(pub DefaultKernel); + +impl RecallOps for RecallKernel +where + C: CallManager, +{ + /// Directly add a block, skipping gas and reachability checks. + fn block_add(&mut self, cid: Cid, data: &[u8]) -> Result<()> { + self.0 + .call_manager + .blockstore() + .put_keyed(&cid, data) + .or_fatal()?; + self.0.blocks.mark_reachable(&cid); + Ok(()) + } +} + +impl SyscallHandler for RecallKernel +where + K: Kernel + + ActorOps + + SendOps + + UpgradeOps + + IpldBlockOps + + CryptoOps + + DebugOps + + EventOps + + MessageOps + + NetworkOps + + RandomnessOps + + SelfOps + + RecallOps, +{ + fn link_syscalls(linker: &mut Linker) -> anyhow::Result<()> { + DefaultKernel::::link_syscalls(linker)?; + linker.link_syscall( + recall_syscalls::MODULE_NAME, + recall_syscalls::DELETE_BLOB_SYSCALL_FUNCTION_NAME, + recall_syscalls::delete_blob, + )?; + + Ok(()) + } +} + +impl Kernel for RecallKernel +where + C: CallManager, +{ + type CallManager = C; + type Limiter = as Kernel>::Limiter; + + fn into_inner(self) -> (Self::CallManager, BlockRegistry) + where + Self: Sized, + { + self.0.into_inner() + } + + fn new( + mgr: C, + blocks: BlockRegistry, + caller: ActorID, + actor_id: ActorID, + method: MethodNum, + value_received: TokenAmount, + read_only: bool, + ) -> Self { + RecallKernel(DefaultKernel::new( + mgr, + blocks, + caller, + actor_id, + method, + value_received, + read_only, + )) + } + + fn machine(&self) -> &::Machine { + self.0.machine() + } + + fn limiter_mut(&mut self) -> &mut Self::Limiter { + self.0.limiter_mut() + } + + fn gas_available(&self) -> Gas { + self.0.gas_available() + } + + fn charge_gas(&self, name: &str, compute: Gas) -> Result { + self.0.charge_gas(name, compute) + } +} diff --git a/recall/syscalls/Cargo.toml b/recall/syscalls/Cargo.toml new file mode 100644 index 0000000000..49d6ce5335 --- /dev/null +++ b/recall/syscalls/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "recall_syscalls" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +fvm = { workspace = true } +fvm_shared = { workspace = true } +iroh-blobs = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } + +recall_kernel_ops = { path = "../kernel/ops" } +iroh_manager = { path = "../iroh_manager" } diff --git a/recall/syscalls/src/lib.rs b/recall/syscalls/src/lib.rs new file mode 100644 index 0000000000..82065321a8 --- /dev/null +++ b/recall/syscalls/src/lib.rs @@ -0,0 +1,60 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::net::SocketAddr; + +use fvm::kernel::{ExecutionError, Result, SyscallError}; +use fvm::syscalls::Context; +use fvm_shared::error::ErrorNumber; +use iroh_blobs::Hash; +use iroh_manager::BlobsClient; +use recall_kernel_ops::RecallOps; +use tokio::sync::Mutex; + +pub const MODULE_NAME: &str = "recall"; +pub const DELETE_BLOB_SYSCALL_FUNCTION_NAME: &str = "delete_blob"; + +const ENV_IROH_RPC_ADDR: &str = "IROH_SYSCALL_RPC_ADDR"; + +async fn connect_rpc() -> Option { + let bind_addr: SocketAddr = std::env::var(ENV_IROH_RPC_ADDR).ok()?.parse().ok()?; + let addr: SocketAddr = format!("127.0.0.1:{}", bind_addr.port()).parse().ok()?; + iroh_manager::connect_rpc(addr).await.ok() +} +static IROH_RPC_CLIENT: Mutex> = Mutex::const_new(None); + +fn hash_source(bytes: &[u8]) -> Result<[u8; 32]> { + bytes + .try_into() + .map_err(|e| ExecutionError::Syscall(SyscallError::new(ErrorNumber::IllegalArgument, e))) +} + +/// Deletes a blob by hash from backing storage. +pub fn delete_blob(context: Context<'_, impl RecallOps>, hash_offset: u32) -> Result<()> { + let hash_bytes = context.memory.try_slice(hash_offset, 32)?; + let seq_hash = Hash::from_bytes(hash_source(hash_bytes)?); + + tracing::debug!("queueing blob {} for deletion", seq_hash); + + // No blocking + tokio::task::spawn(async move { + let mut client_lock = IROH_RPC_CLIENT.lock().await; + if client_lock.is_none() { + let client = connect_rpc().await; + if client.is_none() { + tracing::error!("unable to establish connection to iroh"); + return; + } + *client_lock = client; + } + let Some(client) = &*client_lock else { + return; + }; + if let Err(err) = client.tags().delete(seq_hash).await { + tracing::warn!(hash = %seq_hash, error = err.to_string(), "deleting tag from iroh failed"); + } + }); + + Ok(()) +}