diff --git a/.gitmodules b/.gitmodules index 1d5d56841834..2cc82ba95cb4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,6 @@ [submodule "etc/system-contracts"] path = etc/system-contracts url = https://github.com/matter-labs/era-system-contracts.git -[submodule "etc/openzeppelin-contracts"] - path = etc/openzeppelin-contracts - url = https://github.com/matter-labs/era-openzeppelin.git [submodule "contracts"] path = contracts url = https://github.com/matter-labs/era-contracts.git diff --git a/Cargo.lock b/Cargo.lock index 8796a799cdf0..c16c2a104b50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -64,7 +64,7 @@ dependencies = [ "http", "httparse", "httpdate", - "itoa 1.0.6", + "itoa", "language-tags", "local-channel", "mime", @@ -177,7 +177,7 @@ dependencies = [ "futures-core", "futures-util", "http", - "itoa 1.0.6", + "itoa", "language-tags", "log", "mime", @@ -200,7 +200,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2262160a7ae29e3415554a3f1fc04c764b1540c116aa524683208078b7a75bc9" dependencies = [ "actix-router", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -231,19 +231,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "admin-tools" -version = "0.1.0" -dependencies = [ - "anyhow", - "chrono", - "clap 4.3.4", - "dotenvy", - "tokio", - "zksync_dal", - "zksync_types", -] - [[package]] name = "aes" version = "0.6.0" @@ -530,9 +517,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -541,9 +528,9 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -581,6 +568,54 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.6.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes 1.4.0", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes 1.4.0", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backon" version = "0.4.1" @@ -672,9 +707,9 @@ dependencies = [ [[package]] name = "bigdecimal" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc403c26e6b03005522e6e8053384c4e881dfe5b2bf041c0c2c49be33d64a539" +checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" dependencies = [ "num-bigint 0.3.3", "num-integer", @@ -704,12 +739,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "regex", "rustc-hash", "shlex", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -739,10 +774,22 @@ version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" dependencies = [ - "funty", - "radium", + "funty 1.1.0", + "radium 0.6.2", "tap", - "wyz", + "wyz 0.2.0", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", + "tap", + "wyz 0.5.1", ] [[package]] @@ -1073,7 +1120,7 @@ dependencies = [ [[package]] name = "circuit_testing" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#abd44b507840f836da6e084aaacb2ba8a7cb1df6" +source = "git+https://github.com/matter-labs/circuit_testing.git?branch=main#028864449036071cfb4e9ebe7ee4c5be59893031" dependencies = [ "bellman_ce", ] @@ -1147,9 +1194,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -1179,9 +1226,9 @@ dependencies = [ [[package]] name = "codegen" version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#cad8d38f631691a6b456eb4eb7b410fd129ca006" +source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#07954802c13fb087efb5874c2ce521f843d614fd" dependencies = [ - "ethereum-types", + "ethereum-types 0.14.1", "franklin-crypto", "handlebars", "hex", @@ -1366,7 +1413,6 @@ dependencies = [ "ctrlc", "envy", "futures 0.3.28", - "prometheus_exporter", "serde", "serde_json", "tokio", @@ -1573,23 +1619,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.7", - "subtle", -] - [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#22d9d3a2018df8d4ac4bc0b0ada61c191d0cee30" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "serde", "syn 1.0.109", @@ -1632,7 +1668,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -1666,7 +1702,7 @@ dependencies = [ name = "db_test_macro" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -1708,7 +1744,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -1720,7 +1756,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "rustc_version", "syn 1.0.109", @@ -1757,9 +1793,9 @@ dependencies = [ [[package]] name = "dirs" -version = "3.0.2" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" dependencies = [ "dirs-sys", ] @@ -1781,12 +1817,6 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" -[[package]] -name = "dotenvy" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" - [[package]] name = "ecdsa" version = "0.14.8" @@ -1843,6 +1873,16 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "enumeration_indices_migration" +version = "0.1.0" +dependencies = [ + "tokio", + "vlog", + "zksync_core", + "zksync_dal", +] + [[package]] name = "env_logger" version = "0.9.3" @@ -1856,6 +1896,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + [[package]] name = "envy" version = "0.4.2" @@ -1897,15 +1950,17 @@ dependencies = [ [[package]] name = "ethabi" -version = "16.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c98847055d934070b90e806e12d3936b787d0a115068981c1d8dfd5dfef5a5" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" dependencies = [ - "ethereum-types", + "ethereum-types 0.14.1", "hex", + "once_cell", + "regex", "serde", "serde_json", - "sha3 0.9.1", + "sha3 0.10.6", "thiserror", "uint", ] @@ -1917,9 +1972,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" dependencies = [ "crunchy", - "fixed-hash", + "fixed-hash 0.7.0", + "impl-rlp", + "impl-serde 0.3.2", + "tiny-keccak 2.0.2", +] + +[[package]] +name = "ethbloom" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" +dependencies = [ + "crunchy", + "fixed-hash 0.8.0", "impl-rlp", - "impl-serde", + "impl-serde 0.4.0", "tiny-keccak 2.0.2", ] @@ -1929,11 +1997,25 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" dependencies = [ - "ethbloom", - "fixed-hash", + "ethbloom 0.11.1", + "fixed-hash 0.7.0", "impl-rlp", - "impl-serde", - "primitive-types", + "impl-serde 0.3.2", + "primitive-types 0.10.1", + "uint", +] + +[[package]] +name = "ethereum-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" +dependencies = [ + "ethbloom 0.13.0", + "fixed-hash 0.8.0", + "impl-rlp", + "impl-serde 0.4.0", + "primitive-types 0.12.1", "uint", ] @@ -1990,7 +2072,7 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "serde", "syn 1.0.109", @@ -2020,6 +2102,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -2124,6 +2218,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.1.31" @@ -2196,9 +2296,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -2310,14 +2410,15 @@ dependencies = [ [[package]] name = "gloo-net" -version = "0.2.6" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9902a044653b26b99f7e3693a42f171312d9be8b26b5697bd1e43ad1f8a35e10" +checksum = "a66b4e3c7d9ed8d315fd6b97c8b1f74a7c6ecbbc2320e65ae7ed38b7068cc620" dependencies = [ "futures-channel", "futures-core", "futures-sink", "gloo-utils", + "http", "js-sys", "pin-project", "serde", @@ -2568,6 +2669,9 @@ name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +dependencies = [ + "unicode-segmentation", +] [[package]] name = "hermit-abi" @@ -2600,22 +2704,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] -name = "hmac" -version = "0.10.1" +name = "hkdf" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", + "hmac 0.12.1", ] [[package]] name = "hmac" -version = "0.11.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac 0.10.1", "digest 0.9.0", ] @@ -2656,7 +2759,7 @@ checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes 1.4.0", "fnv", - "itoa 1.0.6", + "itoa", ] [[package]] @@ -2709,7 +2812,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.6", + "itoa", "pin-project-lite", "socket2", "tokio", @@ -2795,32 +2898,30 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] -name = "idna" -version = "0.4.0" +name = "impl-codec" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "parity-scale-codec 2.3.1", ] [[package]] name = "impl-codec" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", ] [[package]] @@ -2841,13 +2942,22 @@ dependencies = [ "serde", ] +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde", +] + [[package]] name = "impl-trait-for-tuples" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -2948,12 +3058,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.6" @@ -3036,7 +3140,7 @@ version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -3103,9 +3207,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1822d18e4384a5e79d94dc9e4d1239cfa9fad24e55b44d2efeff5b394c9fece4" +checksum = "e5f3783308bddc49d0218307f66a09330c106fbd792c58bac5c8dc294fdd0f98" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3120,9 +3224,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11aa5766d5c430b89cb26a99b88f3245eb91534be8126102cea9e45ee3891b22" +checksum = "abc5630e4fa0096f00ec7b44d520701fda4504170cb85e22dca603ae5d7ad0d7" dependencies = [ "futures-channel", "futures-util", @@ -3137,14 +3241,14 @@ dependencies = [ "tokio-rustls", "tokio-util 0.7.8", "tracing", - "webpki-roots 0.23.1", + "webpki-roots 0.24.0", ] [[package]] name = "jsonrpsee-core" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c6832a55f662b5a6ecc844db24b8b9c387453f923de863062c60ce33d62b81" +checksum = "5aaa4c4d5fb801dcc316d81f76422db259809037a86b3194ae538dd026b05ed7" dependencies = [ "anyhow", "async-lock", @@ -3170,9 +3274,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1705c65069729e3dccff6fd91ee431d5d31cabcf00ce68a62a2c6435ac713af9" +checksum = "aa7165efcbfbc951d180162ff28fe91b657ed81925e37a35e4a396ce12109f96" dependencies = [ "async-trait", "hyper", @@ -3189,22 +3293,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6027ac0b197ce9543097d02a290f550ce1d9432bf301524b013053c0b75cc94" +checksum = "21dc12b1d4f16a86e8c522823c4fab219c88c03eb7c924ec0501a64bf12e058b" dependencies = [ "heck 0.4.1", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "jsonrpsee-server" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f06661d1a6b6e5b85469dc9c29acfbb9b3bb613797a6fd10a3ebb8a70754057" +checksum = "6e79d78cfd5abd8394da10753723093c3ff64391602941c9c4b1d80a3414fd53" dependencies = [ "futures-util", "hyper", @@ -3222,9 +3326,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e5bf6c75ce2a4217421154adfc65a24d2b46e77286e59bba5d9fa6544ccc8f4" +checksum = "00aa7cc87bc42e04e26c8ac3e7186142f7fd2949c763d9b6a7e64a69672d8fb2" dependencies = [ "anyhow", "beef", @@ -3236,9 +3340,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34e6ea7c6d862e60f8baebd946c037b70c6808a4e4e31e792a4029184e3ce13a" +checksum = "0fe953c2801356f214d3f4051f786b3d11134512a46763ee8c39a9e3fa2cc1c0" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3247,9 +3351,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64b2589680ba1ad7863f279cd2d5083c1dc0a7c0ea959d22924553050f8ab9f" +checksum = "5c71b2597ec1c958c6d5bc94bb61b44d74eb28e69dc421731ab0035706f13882" dependencies = [ "http", "jsonrpsee-client-transport", @@ -3302,16 +3406,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "l1_tx_effective_gas_price_migration" -version = "0.1.0" -dependencies = [ - "structopt", - "tokio", - "zksync_dal", - "zksync_types", -] - [[package]] name = "language-tags" version = "0.3.2" @@ -3363,9 +3457,9 @@ checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "librocksdb-sys" -version = "0.6.3+6.28.2" +version = "0.11.0+8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "184ce2a189a817be2731070775ad053b6804a340fee05c6686d711db27455917" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" dependencies = [ "bindgen", "bzip2-sys", @@ -3412,7 +3506,6 @@ dependencies = [ "once_cell", "prometheus_exporter", "rand 0.8.5", - "rand_distr", "regex", "reqwest", "serde", @@ -3489,10 +3582,10 @@ dependencies = [ ] [[package]] -name = "matches" -version = "0.1.10" +name = "matchit" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "maybe-uninit" @@ -3502,13 +3595,11 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "md-5" -version = "0.9.1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" +checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.7", ] [[package]] @@ -3571,7 +3662,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -3709,6 +3800,20 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +[[package]] +name = "multivm" +version = "0.1.0" +dependencies = [ + "vlog", + "vm", + "vm_m5", + "vm_m6", + "zksync_contracts", + "zksync_state", + "zksync_types", + "zksync_utils", +] + [[package]] name = "native-tls" version = "0.2.11" @@ -3893,7 +3998,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -3964,6 +4069,27 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2 1.0.66", + "quote 1.0.28", + "syn 2.0.27", +] + [[package]] name = "object" version = "0.30.4" @@ -4018,9 +4144,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -4138,7 +4264,7 @@ checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" dependencies = [ "Inflector", "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -4172,7 +4298,7 @@ dependencies = [ "aes-ctr", "block-modes", "digest 0.9.0", - "ethereum-types", + "ethereum-types 0.12.1", "hmac 0.10.1", "lazy_static", "pbkdf2 0.7.5", @@ -4193,10 +4319,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ "arrayvec 0.7.3", - "bitvec", + "bitvec 0.20.4", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive", + "parity-scale-codec-derive 2.3.1", + "serde", +] + +[[package]] +name = "parity-scale-codec" +version = "3.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2287753623c76f953acd29d15d8100bcab84d29db78fb6f352adb3c53e83b967" +dependencies = [ + "arrayvec 0.7.3", + "bitvec 1.0.1", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive 3.6.1", "serde", ] @@ -4207,7 +4347,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", + "quote 1.0.28", + "syn 1.0.109", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b6937b5e67bfba3351b87b040d48352a2fcb6ad72f81855412ce97b45c8f110" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -4374,9 +4526,9 @@ checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -4415,9 +4567,9 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -4525,8 +4677,8 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1" dependencies = [ - "proc-macro2 1.0.60", - "syn 2.0.18", + "proc-macro2 1.0.66", + "syn 2.0.27", ] [[package]] @@ -4535,10 +4687,23 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" dependencies = [ - "fixed-hash", - "impl-codec", + "fixed-hash 0.7.0", + "impl-codec 0.5.1", + "impl-rlp", + "impl-serde 0.3.2", + "uint", +] + +[[package]] +name = "primitive-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +dependencies = [ + "fixed-hash 0.8.0", + "impl-codec 0.6.0", "impl-rlp", - "impl-serde", + "impl-serde 0.4.0", "uint", ] @@ -4568,7 +4733,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", "version_check", @@ -4580,7 +4745,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "version_check", ] @@ -4602,9 +4767,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.60" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -4617,7 +4782,6 @@ dependencies = [ "metrics-exporter-prometheus", "tokio", "vlog", - "zksync_config", ] [[package]] @@ -4658,7 +4822,7 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -4731,7 +4895,7 @@ version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", ] [[package]] @@ -4740,6 +4904,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.4.6" @@ -4859,16 +5029,6 @@ dependencies = [ "getrandom 0.2.10", ] -[[package]] -name = "rand_distr" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - [[package]] name = "rand_hc" version = "0.1.0" @@ -5176,9 +5336,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620f4129485ff1a7128d184bc687470c21c7951b64779ebc9cfdad3dcd920290" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" dependencies = [ "libc", "librocksdb-sys", @@ -5259,7 +5419,7 @@ checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.100.1", "sct", ] @@ -5294,6 +5454,16 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.101.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f36a6828982f422756984e47912a7a51dcbc2a197aa791158f8ca61cd8204e" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.12" @@ -5386,16 +5556,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" dependencies = [ "rand 0.6.5", - "secp256k1-sys", + "secp256k1-sys 0.4.2", ] [[package]] name = "secp256k1" -version = "0.21.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" +checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" dependencies = [ - "secp256k1-sys", + "secp256k1-sys 0.8.1", ] [[package]] @@ -5407,6 +5577,15 @@ dependencies = [ "cc", ] +[[package]] +name = "secp256k1-sys" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" +dependencies = [ + "cc", +] + [[package]] name = "security-framework" version = "2.9.1" @@ -5555,22 +5734,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.164" +version = "1.0.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" +checksum = "5d25439cd7397d044e2748a6fe2432b5e85db703d6d097bd014b3c0ad1ebff0b" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.164" +version = "1.0.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" +checksum = "b23f7ade6f110613c0d63858ddb8b94c1041f550eab58a16b371bdf2c9c80ab4" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -5579,12 +5758,21 @@ version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ - "indexmap", - "itoa 1.0.6", + "itoa", "ryu", "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -5592,7 +5780,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.6", + "itoa", "ryu", "serde", ] @@ -5603,6 +5791,7 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" dependencies = [ + "base64 0.13.1", "hex", "serde", "serde_with_macros", @@ -5615,7 +5804,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -5645,6 +5834,17 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sha-1" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.7", +] + [[package]] name = "sha1" version = "0.10.5" @@ -5861,9 +6061,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.5.9" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7911b0031a0247af40095838002999c7a52fba29d9739e93326e71a5a1bc9d43" +checksum = "551873805652ba0d912fec5bbb0f8b4cdd96baf8e2ebf5970e5671092966019b" dependencies = [ "sqlx-core", "sqlx-macros", @@ -5871,9 +6071,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.5.9" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aec89bfaca8f7737439bad16d52b07f1ccd0730520d3bf6ae9d069fe4b641fb1" +checksum = "e48c61941ccf5ddcada342cd59e3e5173b007c509e1e8e990dafc830294d9dc5" dependencies = [ "ahash 0.7.6", "atoi", @@ -5884,34 +6084,34 @@ dependencies = [ "bytes 1.4.0", "chrono", "crc", - "crossbeam-channel 0.5.8", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.16", "dirs", "either", + "event-listener", "futures-channel", "futures-core", "futures-intrusive", "futures-util", "hashlink", "hex", - "hmac 0.11.0", + "hkdf", + "hmac 0.12.1", "indexmap", "ipnetwork", - "itoa 0.4.8", + "itoa", "libc", "log", "md-5", "memchr", "num-bigint 0.3.3", "once_cell", - "parking_lot 0.11.2", + "paste", "percent-encoding", "rand 0.8.5", "serde", "serde_json", - "sha-1 0.9.8", - "sha2 0.9.9", + "sha-1 0.10.1", + "sha2 0.10.6", "smallvec", "sqlformat", "sqlx-rt", @@ -5924,20 +6124,20 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.5.9" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "584866c833511b1a152e87a7ee20dee2739746f60c858b3c5209150bc4b466f5" +checksum = "bc0fba2b0cae21fc00fe6046f8baa4c7fcb49e379f0f592b04696607f69ed2e1" dependencies = [ "dotenv", "either", - "heck 0.3.3", + "heck 0.4.1", "hex", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "serde", "serde_json", - "sha2 0.9.9", + "sha2 0.10.6", "sqlx-core", "sqlx-rt", "syn 1.0.109", @@ -6003,7 +6203,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -6024,7 +6224,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "rustversion", "syn 1.0.109", @@ -6053,18 +6253,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.18" +version = "2.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" +checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "unicode-ident", ] @@ -6072,7 +6272,7 @@ dependencies = [ [[package]] name = "sync_vm" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#22d9d3a2018df8d4ac4bc0b0ada61c191d0cee30" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ "arrayvec 0.7.3", "cs_derive", @@ -6091,10 +6291,16 @@ dependencies = [ "sha2 0.10.6", "sha3 0.10.6", "smallvec", - "zk_evm", - "zkevm_opcode_defs", + "zk_evm 1.3.3", + "zkevm_opcode_defs 1.3.2", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "system-constants-generator" version = "0.1.0" @@ -6161,7 +6367,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", "syn 1.0.109", ] @@ -6196,9 +6402,9 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -6211,6 +6417,26 @@ dependencies = [ "once_cell", ] +[[package]] +name = "tikv-jemalloc-sys" +version = "0.5.3+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a678df20055b43e57ef8cddde41cdfda9a3c1a060b67f4c5836dfb1d78543ba8" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "tikv-jemallocator" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20612db8a13a6c06d57ec83953694185a367e16945f66565e8028d2c0bd76979" +dependencies = [ + "libc", + "tikv-jemalloc-sys", +] + [[package]] name = "time" version = "0.1.43" @@ -6227,7 +6453,7 @@ version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ - "itoa 1.0.6", + "itoa", "serde", "time-core", "time-macros", @@ -6326,9 +6552,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -6465,7 +6691,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "prost-build", "quote 1.0.28", "syn 1.0.109", @@ -6553,9 +6779,9 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", ] [[package]] @@ -6760,7 +6986,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna", "percent-encoding", "serde", ] @@ -6839,8 +7065,8 @@ dependencies = [ "thiserror", "tokio", "vlog", - "zk_evm", - "zkevm-assembly", + "zk_evm 1.3.3", + "zkevm-assembly 1.3.2", "zksync_config", "zksync_contracts", "zksync_eth_signer", @@ -6868,10 +7094,59 @@ dependencies = [ "once_cell", "ouroboros", "vm", - "zk_evm", + "zk_evm 1.3.3", + "zksync_config", + "zksync_contracts", + "zksync_state", + "zksync_types", + "zksync_utils", +] + +[[package]] +name = "vm_m5" +version = "0.1.0" +dependencies = [ + "hex", + "itertools", + "metrics", + "once_cell", + "serde", + "serde_json", + "tempfile", + "thiserror", + "tracing", + "vlog", + "zk_evm 1.3.1", "zksync_config", "zksync_contracts", + "zksync_crypto", "zksync_state", + "zksync_storage", + "zksync_types", + "zksync_utils", +] + +[[package]] +name = "vm_m6" +version = "0.1.0" +dependencies = [ + "hex", + "itertools", + "metrics", + "once_cell", + "serde", + "serde_json", + "tempfile", + "thiserror", + "tracing", + "vlog", + "zk_evm 1.3.1", + "zkevm-assembly 1.3.1", + "zksync_config", + "zksync_contracts", + "zksync_crypto", + "zksync_state", + "zksync_storage", "zksync_types", "zksync_utils", ] @@ -6932,9 +7207,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", "wasm-bindgen-shared", ] @@ -6966,9 +7241,9 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.66", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.27", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7004,21 +7279,21 @@ dependencies = [ [[package]] name = "web3" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" +checksum = "5388522c899d1e1c96a4c307e3797e0f697ba7c77dd8e0e625ecba9dd0342937" dependencies = [ "arrayvec 0.7.3", - "base64 0.13.1", + "base64 0.21.2", "bytes 1.4.0", "derive_more", "ethabi", - "ethereum-types", + "ethereum-types 0.14.1", "futures 0.3.28", "futures-timer", "headers", "hex", - "idna 0.2.3", + "idna", "jsonrpc-core 18.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log", "once_cell", @@ -7026,7 +7301,7 @@ dependencies = [ "pin-project", "reqwest", "rlp", - "secp256k1 0.21.3", + "secp256k1 0.27.0", "serde", "serde_json", "tiny-keccak 2.0.2", @@ -7054,11 +7329,11 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.23.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" dependencies = [ - "rustls-webpki", + "rustls-webpki 0.101.1", ] [[package]] @@ -7315,6 +7590,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "yaml-rust" version = "0.4.5" @@ -7330,10 +7614,27 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +[[package]] +name = "zk_evm" +version = "1.3.1" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#be4da71446924f739b9fb10cfd310231adf2a542" +dependencies = [ + "blake2 0.10.6", + "k256", + "lazy_static", + "num 0.4.0", + "serde", + "serde_json", + "sha2 0.10.6", + "sha3 0.10.6", + "static_assertions", + "zkevm_opcode_defs 1.3.1", +] + [[package]] name = "zk_evm" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#c08a8581421d2a0cf1fc8cbbdcd06c00da01fe0e" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.3#9a1eaa98acb9e3280dbbde5b132cbf64e15fe96e" dependencies = [ "anyhow", "lazy_static", @@ -7342,47 +7643,78 @@ dependencies = [ "serde_json", "static_assertions", "zk_evm_abstractions", - "zkevm_opcode_defs", + "zkevm_opcode_defs 1.3.2", ] [[package]] name = "zk_evm_abstractions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#973a1f661c045e0e8b9a287505f353659279b3b3" +source = "git+https://github.com/matter-labs/zk_evm_abstractions.git#839721a4ae2093c5c0aa8ffd49758f32ecd172ed" dependencies = [ "anyhow", "serde", "static_assertions", - "zkevm_opcode_defs", + "zkevm_opcode_defs 1.3.2", +] + +[[package]] +name = "zkevm-assembly" +version = "1.3.1" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#55e9f69a9b57725c4e47eef91eebf6899919e0e0" +dependencies = [ + "env_logger 0.9.3", + "hex", + "lazy_static", + "log", + "nom", + "num-bigint 0.4.3", + "num-traits", + "sha3 0.10.6", + "smallvec", + "structopt", + "thiserror", + "zkevm_opcode_defs 1.3.1", ] [[package]] name = "zkevm-assembly" version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.3.2#edc364e59a2eea9c4b1d4ce79f15d0b7c6b55b98" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#a276422b9f874242eeaeecd1434542565064f85e" dependencies = [ - "env_logger", + "env_logger 0.9.3", "hex", "lazy_static", "log", "nom", "num-bigint 0.4.3", "num-traits", + "regex", "sha3 0.10.6", "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs", + "zkevm_opcode_defs 1.3.2", +] + +[[package]] +name = "zkevm_opcode_defs" +version = "1.3.1" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#8fab95391d64d4c5a035da1dcd7b7e3f989f9f76" +dependencies = [ + "bitflags 1.3.2", + "ethereum-types 0.14.1", + "lazy_static", + "sha2 0.10.6", ] [[package]] name = "zkevm_opcode_defs" version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#2f69c6975a272e8c31d2d82c136a4ea81df25115" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#780ce4129a95ab9a68abf0d60c156ee8df6008c2" dependencies = [ "bitflags 2.3.2", "blake2 0.10.6", - "ethereum-types", + "ethereum-types 0.14.1", "k256", "lazy_static", "sha2 0.10.6", @@ -7392,14 +7724,14 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.3#6453eab3c9c8915f588ff4eceb48d7be9a695ecb" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.3#363ead7afaac72bd3006c49d501934747781cbb4" dependencies = [ "bincode", "circuit_testing", "codegen 0.2.0", "crossbeam 0.8.2", "derivative", - "env_logger", + "env_logger 0.10.0", "hex", "num-bigint 0.4.3", "num-integer", @@ -7412,8 +7744,8 @@ dependencies = [ "sync_vm", "test-log", "tracing", - "zk_evm", - "zkevm-assembly", + "zk_evm 1.3.3", + "zkevm-assembly 1.3.2", ] [[package]] @@ -7452,7 +7784,6 @@ dependencies = [ "convert_case 0.6.0", "futures 0.3.28", "hex", - "serde", "serde_json", "thiserror", "tokio", @@ -7461,8 +7792,6 @@ dependencies = [ "zksync_dal", "zksync_eth_client", "zksync_types", - "zksync_utils", - "zksync_verification_key_generator_and_server", ] [[package]] @@ -7531,6 +7860,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", + "axum", "bigdecimal", "bitflags 1.3.2", "chrono", @@ -7540,7 +7870,6 @@ dependencies = [ "futures 0.3.28", "governor", "hex", - "hyper", "itertools", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "jsonrpc-core-client", @@ -7549,6 +7878,7 @@ dependencies = [ "jsonrpc-pubsub", "jsonrpc-ws-server", "metrics", + "multivm", "num 0.3.1", "once_cell", "prometheus_exporter", @@ -7558,6 +7888,7 @@ dependencies = [ "serde_json", "tempfile", "thiserror", + "tikv-jemallocator", "tokio", "tower", "tower-http", @@ -7593,7 +7924,6 @@ dependencies = [ "blake2 0.10.6", "hex", "once_cell", - "rand 0.4.6", "serde", "serde_json", "sha2 0.9.9", @@ -7606,6 +7936,7 @@ name = "zksync_dal" version = "1.0.0" dependencies = [ "anyhow", + "assert_matches", "bigdecimal", "bincode", "db_test_macro", @@ -7614,6 +7945,7 @@ dependencies = [ "metrics", "num 0.3.1", "once_cell", + "serde", "serde_json", "sqlx", "strum", @@ -7636,7 +7968,6 @@ dependencies = [ "hex", "jsonrpc-core 18.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "metrics", - "parity-crypto", "serde", "thiserror", "tokio", @@ -7660,7 +7991,7 @@ dependencies = [ "parity-crypto", "reqwest", "rlp", - "secp256k1 0.21.3", + "secp256k1 0.27.0", "serde", "serde_derive", "serde_json", @@ -7698,7 +8029,12 @@ dependencies = [ name = "zksync_health_check" version = "0.1.0" dependencies = [ + "assert_matches", "async-trait", + "futures 0.3.28", + "serde", + "serde_json", + "tokio", ] [[package]] @@ -7765,12 +8101,14 @@ dependencies = [ name = "zksync_prover_utils" version = "1.0.0" dependencies = [ + "async-trait", "ctrlc", "futures 0.3.28", "metrics", "regex", "reqwest", "tokio", + "toml_edit 0.14.4", "vlog", "zksync_config", "zksync_utils", @@ -7783,7 +8121,6 @@ dependencies = [ "async-trait", "tokio", "vlog", - "zksync_dal", "zksync_utils", ] @@ -7813,56 +8150,29 @@ dependencies = [ "vlog", ] -[[package]] -name = "zksync_test_node" -version = "1.0.0" -dependencies = [ - "anyhow", - "bigdecimal", - "clap 4.3.4", - "futures 0.3.28", - "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", - "jsonrpc-http-server", - "once_cell", - "reqwest", - "serde", - "tokio", - "tracing", - "tracing-subscriber", - "vlog", - "vm", - "zksync_basic_types", - "zksync_contracts", - "zksync_core", - "zksync_state", - "zksync_types", - "zksync_utils", - "zksync_web3_decl", -] - [[package]] name = "zksync_types" version = "1.0.0" dependencies = [ - "bigdecimal", "blake2 0.10.6", "chrono", "codegen 0.1.0", + "ethereum-types 0.12.1", "hex", "metrics", "num 0.3.1", + "num_enum", "once_cell", "parity-crypto", "rlp", - "secp256k1 0.21.3", + "secp256k1 0.27.0", "serde", "serde_json", "serde_with", "strum", "thiserror", "tokio", - "zk_evm", - "zkevm-assembly", + "zk_evm 1.3.3", "zkevm_test_harness", "zksync_basic_types", "zksync_config", @@ -7889,7 +8199,7 @@ dependencies = [ "thiserror", "tokio", "vlog", - "zk_evm", + "zk_evm 1.3.3", "zksync_basic_types", ] @@ -7902,10 +8212,11 @@ dependencies = [ "ff_ce", "hex", "itertools", + "once_cell", "serde_json", "structopt", - "toml_edit 0.14.4", "vlog", + "zksync_prover_utils", "zksync_types", ] diff --git a/Cargo.toml b/Cargo.toml index a82c4f8a1281..5c47da13111f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,9 +7,7 @@ members = [ # Server "core/bin/zksync_core", "core/bin/external_node", - "core/bin/admin-tools", "core/bin/system-constants-generator", - "core/bin/test_node", "core/tests/cross_external_nodes_checker", # Contract verifier "core/bin/contract-verifier", @@ -20,7 +18,7 @@ members = [ #"core/bin/witness_generator", # circuit synthesizer: its commented as it cannot be built with stable rust. # "core/bin/circuit_synthesizer", - "core/bin/l1_tx_effective_gas_price_migration", + "core/bin/enumeration_indices_migration", # Libraries "core/lib/basic_types", "core/lib/config", @@ -44,8 +42,13 @@ members = [ "core/lib/utils", "core/lib/vlog", "core/lib/vm", + "core/lib/multivm", "core/lib/web3_decl", + # MultiVM dependencies + "core/multivm_deps/vm_m5", + "core/multivm_deps/vm_m6", + # Test infrastructure "core/tests/loadnext", "core/tests/vm-benchmark", @@ -56,7 +59,7 @@ members = [ ] resolver = "2" -exclude = [ "core/bin/prover", "core/bin/circuit_synthesizer", "core/bin/setup_key_generator_and_server", "core/bin/witness_generator", "core/bin/vk_setup_data_generator_server_fri", "core/bin/prover_fri"] +exclude = [] # for `perf` profiling [profile.perf] diff --git a/bin/run_loadtest_from_github_actions b/bin/run_loadtest_from_github_actions index 78811f21bc56..f784ddd3180d 100755 --- a/bin/run_loadtest_from_github_actions +++ b/bin/run_loadtest_from_github_actions @@ -17,16 +17,5 @@ export CONTRACT_EXECUTION_PARAMS_HASHES=${execution_params[3]} export CONTRACT_EXECUTION_PARAMS_RECURSIVE_CALLS=${execution_params[4]} export CONTRACT_EXECUTION_PARAMS_DEPLOYS=${execution_params[5]} -read -ra execution_params <<<"$EXPLORER_API_REQUESTS_WEIGHTS" #reading $EXPLORER_API_REQUESTS_WEIGHTS as an array as tokens separated by IFS -export EXPLORER_API_REQUESTS_WEIGHTS_NETWORK_STATS=${execution_params[0]} -export EXPLORER_API_REQUESTS_WEIGHTS_BLOCKS=${execution_params[1]} -export EXPLORER_API_REQUESTS_WEIGHTS_BLOCK=${execution_params[2]} -export EXPLORER_API_REQUESTS_WEIGHTS_TRANSACTIONS=${execution_params[3]} -export EXPLORER_API_REQUESTS_WEIGHTS_ACCOUNT=${execution_params[4]} -export EXPLORER_API_REQUESTS_WEIGHTS_TOKEN=${execution_params[5]} -export EXPLORER_API_REQUESTS_WEIGHTS_CONTRACT=${execution_params[6]} -export EXPLORER_API_REQUESTS_WEIGHTS_TRANSACTION=${execution_params[7]} -export EXPLORER_API_REQUESTS_WEIGHTS_ACCOUNT_TRANSACTIONS=${execution_params[8]} - # Run the test cargo run --bin loadnext diff --git a/bors.toml b/bors.toml deleted file mode 100644 index 8513110ce503..000000000000 --- a/bors.toml +++ /dev/null @@ -1,21 +0,0 @@ -# Set bors's timeout to 4 hours -# https://ddg.gg/?q=4+hours+in+seconds -timeout-sec = 14400 - -# If expected statuses are not specified explicitly, bors tries to "guess" and apperently does it wrong sometimes -status = [ - "integration", - "loadtest", - "lint", - "unit-tests", - "Build images / Build and Push Docker Images (circuit-synthesizer)", - "Build images / Build and Push Docker Images (contract-verifier)", - "Build images / Build and Push Docker Images (external-node)", - "Build images / Build and Push Docker Images (prover-fri)", - "Build images / Build and Push Docker Images (prover-v2)", - "Build images / Build and Push Docker Images (server-v2)", - "Build images / Build and Push Docker Images (witness-generator)", - "Build images / Build and Push Docker Images (cross-external-nodes-checker)", -] - -use_squash_merge = true diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 74b7016ab19f..0ab823892422 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,178 @@ # Changelog +## [5.28.1](https://github.com/matter-labs/zksync-2-dev/compare/core-v5.28.0...core-v5.28.1) (2023-08-10) + + +### Bug Fixes + +* **api:** fix typo when setting `max_response_body_size` ([#2341](https://github.com/matter-labs/zksync-2-dev/issues/2341)) ([540da7f](https://github.com/matter-labs/zksync-2-dev/commit/540da7f16e745e0288a8877891c1f80d6d62bc00)) + +## [5.28.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v5.27.0...core-v5.28.0) (2023-08-10) + + +### Features + +* **api:** add `max_response_body_size` to config ([#2294](https://github.com/matter-labs/zksync-2-dev/issues/2294)) ([a29a71a](https://github.com/matter-labs/zksync-2-dev/commit/a29a71a81f04672c8dbae7e9aac760b70dbca8f0)) +* **db:** Configure statement timeout for Postgres ([#2317](https://github.com/matter-labs/zksync-2-dev/issues/2317)) ([afdbb6b](https://github.com/matter-labs/zksync-2-dev/commit/afdbb6b94d9e43b9659ff5d3428f2d9a7827b29f)) +* **en:** Add support for debug namespace in EN ([#2295](https://github.com/matter-labs/zksync-2-dev/issues/2295)) ([ebcc6e9](https://github.com/matter-labs/zksync-2-dev/commit/ebcc6e9ac387b85e44795f6d35edb4b0a6175de2)) +* **house-keeper:** refactor periodic job to be reusable by adding in lib ([#2333](https://github.com/matter-labs/zksync-2-dev/issues/2333)) ([ad72a16](https://github.com/matter-labs/zksync-2-dev/commit/ad72a1691b661b2b4eeaefd29375a8987b485715)) +* **hyperchain:** hyperchain wizard ([#2259](https://github.com/matter-labs/zksync-2-dev/issues/2259)) ([34c5b54](https://github.com/matter-labs/zksync-2-dev/commit/34c5b542d6436930a6068c4d08562804205154a9)) +* **prover-fri:** Add concurrent circuit synthesis for FRI GPU prover ([#2326](https://github.com/matter-labs/zksync-2-dev/issues/2326)) ([aef3491](https://github.com/matter-labs/zksync-2-dev/commit/aef3491cd6af01840dd4fe5b7e530028916ffa8f)) +* **state-keeper:** enforce different timestamps for miniblocks ([#2280](https://github.com/matter-labs/zksync-2-dev/issues/2280)) ([f87944e](https://github.com/matter-labs/zksync-2-dev/commit/f87944e72526112454934a61c71475b5a6fde22e)) + + +### Bug Fixes + +* **db:** Fix storage caches initialization ([#2339](https://github.com/matter-labs/zksync-2-dev/issues/2339)) ([ec8c822](https://github.com/matter-labs/zksync-2-dev/commit/ec8c8229ecd9f2a0f96f15a03929ede5453b6b09)) +* **prover:** Kill prover process for edge-case in crypto thread code ([#2334](https://github.com/matter-labs/zksync-2-dev/issues/2334)) ([f2b5e1a](https://github.com/matter-labs/zksync-2-dev/commit/f2b5e1a2fcbe3053e372f15992e592bc0c32a88f)) +* **state-keeper:** Order by number in `SELECT timestamp ...` query ([#2331](https://github.com/matter-labs/zksync-2-dev/issues/2331)) ([513e36e](https://github.com/matter-labs/zksync-2-dev/commit/513e36ec6aace545004b964861d080b308e7a98b)) + + +### Performance Improvements + +* **merkle tree:** Allow configuring multi-get chunk size ([#2332](https://github.com/matter-labs/zksync-2-dev/issues/2332)) ([0633911](https://github.com/matter-labs/zksync-2-dev/commit/06339117a36060bb31b6afb3933e12625c943e0b)) +* **merkle tree:** Parallelize loading data and updating tree ([#2327](https://github.com/matter-labs/zksync-2-dev/issues/2327)) ([1edd6ee](https://github.com/matter-labs/zksync-2-dev/commit/1edd6eee62b112d3f3d1b790df01acd04be1eeef)) +* **merkle tree:** Use batched multi-get for RocksDB ([#2304](https://github.com/matter-labs/zksync-2-dev/issues/2304)) ([df22946](https://github.com/matter-labs/zksync-2-dev/commit/df22946743ac56dbe86c5875a1e35345bfcd1f09)) + +## [5.27.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v5.26.0...core-v5.27.0) (2023-08-04) + + +### Features + +* **merkle tree:** Switch sync mode dynamically ([#2274](https://github.com/matter-labs/zksync-2-dev/issues/2274)) ([e2e2d98](https://github.com/matter-labs/zksync-2-dev/commit/e2e2d98e849d6d1b73c8f6c0dd32d9a5aed0ab42)) + + +### Bug Fixes + +* **migrations:** Add If Exists Clause to Migration ([#2285](https://github.com/matter-labs/zksync-2-dev/issues/2285)) ([1273f42](https://github.com/matter-labs/zksync-2-dev/commit/1273f4284f6fa02b1623a90145bf191bad5ca93f)) +* **prover:** Panics in `send_report` will make provers crash ([#2273](https://github.com/matter-labs/zksync-2-dev/issues/2273)) ([85974d3](https://github.com/matter-labs/zksync-2-dev/commit/85974d3f9482307e0dbad0ec179e80886dafa42e)) + + +### Performance Improvements + +* **db:** Cache latest state entries for VM ([#2258](https://github.com/matter-labs/zksync-2-dev/issues/2258)) ([f05f757](https://github.com/matter-labs/zksync-2-dev/commit/f05f757a942e1e67a24022f3b5fd054ae53b35dc)) +* **merkle tree:** Optimize loading data for tree some more ([#2281](https://github.com/matter-labs/zksync-2-dev/issues/2281)) ([58757e3](https://github.com/matter-labs/zksync-2-dev/commit/58757e359420fb85da2db9396661c6e2d65d7a1f)) + + +### Reverts + +* **migrations:** Add If Exists Clause to Migration ([#2285](https://github.com/matter-labs/zksync-2-dev/issues/2285)) ([#2301](https://github.com/matter-labs/zksync-2-dev/issues/2301)) ([517b2e0](https://github.com/matter-labs/zksync-2-dev/commit/517b2e0a9ce0a4cdaa09ff05cdef4aae761d1bcb)) + +## [5.26.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v5.25.0...core-v5.26.0) (2023-08-01) + + +### Features + +* **api:** Rewrite healthcheck server using `axum` ([#2241](https://github.com/matter-labs/zksync-2-dev/issues/2241)) ([5854c7f](https://github.com/matter-labs/zksync-2-dev/commit/5854c7ff71a25b291a3aa3bfe5455d0b5799f227)) +* **api:** Support setting maximum batch request size ([#2252](https://github.com/matter-labs/zksync-2-dev/issues/2252)) ([2cf24fd](https://github.com/matter-labs/zksync-2-dev/commit/2cf24fd0230ad83dc3839ca017fc5571603aab69)) +* **eth-sender:** Use Multicall for getting base system contracts hashes ([#2196](https://github.com/matter-labs/zksync-2-dev/issues/2196)) ([8d3e1b6](https://github.com/matter-labs/zksync-2-dev/commit/8d3e1b6308f6a0ec2142b81b5c319390344ea8df)) +* **prover-fri:** Added vk commitment generator in CI ([#2265](https://github.com/matter-labs/zksync-2-dev/issues/2265)) ([8ad75e0](https://github.com/matter-labs/zksync-2-dev/commit/8ad75e04b0a49dee34c6fa7e3b81a21392afa186)) +* **state-keeper:** save initial writes indices in state keeper ([#2127](https://github.com/matter-labs/zksync-2-dev/issues/2127)) ([3a8790c](https://github.com/matter-labs/zksync-2-dev/commit/3a8790c005f8ae8217a461fcfb11d913eb48692b)) +* Update RockDB bindings ([#2208](https://github.com/matter-labs/zksync-2-dev/issues/2208)) ([211f548](https://github.com/matter-labs/zksync-2-dev/commit/211f548fa9945b7ed5328026e526cd72c09f6a94)) + + +### Bug Fixes + +* **api:** Fix bytes deserialization by bumping web3 crate version ([#2240](https://github.com/matter-labs/zksync-2-dev/issues/2240)) ([59ef24a](https://github.com/matter-labs/zksync-2-dev/commit/59ef24afa6ceddf506a9ac7c4b1e9fc292311095)) +* **api:** underflow in `fee_history_impl` ([#2242](https://github.com/matter-labs/zksync-2-dev/issues/2242)) ([87c97cb](https://github.com/matter-labs/zksync-2-dev/commit/87c97cbdf40bfad8bbd1e01143dab27cc2c546f2)) +* **db:** `transactions` table deadlock ([#2267](https://github.com/matter-labs/zksync-2-dev/issues/2267)) ([1082267](https://github.com/matter-labs/zksync-2-dev/commit/1082267f5bbe097ccf27ea01d2c77bd43da4268e)) +* **merkle tree:** Brush up tree-related configuration ([#2266](https://github.com/matter-labs/zksync-2-dev/issues/2266)) ([18071c2](https://github.com/matter-labs/zksync-2-dev/commit/18071c240584fed009714f6a7d2b9560a6f6df67)) +* **merkle tree:** Make tree creation async in metadata calculator ([#2270](https://github.com/matter-labs/zksync-2-dev/issues/2270)) ([23b2fac](https://github.com/matter-labs/zksync-2-dev/commit/23b2fac8058d08448d1dc669d18d0c77b17167ae)) +* Use replica for slot_index_consistency_checker.rs ([#2256](https://github.com/matter-labs/zksync-2-dev/issues/2256)) ([15b3f5d](https://github.com/matter-labs/zksync-2-dev/commit/15b3f5de09acaa6d6608e51e1d6327a12cc53bbd)) + + +### Performance Improvements + +* **db:** Cache initial writes info for VM ([#2221](https://github.com/matter-labs/zksync-2-dev/issues/2221)) ([22735ae](https://github.com/matter-labs/zksync-2-dev/commit/22735ae6c58a8c002a9ddd539649918547d48d1a)) +* Various optimizations ([#2251](https://github.com/matter-labs/zksync-2-dev/issues/2251)) ([817a982](https://github.com/matter-labs/zksync-2-dev/commit/817a9827d7004c055e5966e0f8ad1a4d51502721)) + +## [5.25.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.24.0...v5.25.0) (2023-07-25) + + +### Features + +* **api:** Add metrics for requests with block height ([#2206](https://github.com/matter-labs/zksync-2-dev/issues/2206)) ([7be59cb](https://github.com/matter-labs/zksync-2-dev/commit/7be59cb8ffa375bad1b97146d966860149b9d767)) + +## [5.24.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.23.0...v5.24.0) (2023-07-24) + + +### Features + +* **api:** Bump jsonrpsee version ([#2219](https://github.com/matter-labs/zksync-2-dev/issues/2219)) ([c5ed6bc](https://github.com/matter-labs/zksync-2-dev/commit/c5ed6bccfcdd94330bb40eef04ea77f66c13a735)) +* **external node:** MultiVM ([#1833](https://github.com/matter-labs/zksync-2-dev/issues/1833)) ([0065e8e](https://github.com/matter-labs/zksync-2-dev/commit/0065e8e3f6846486be5d8a79f3b080a269ee632f)) + + +### Bug Fixes + +* **merkle tree:** Fix storage logs loading ([#2216](https://github.com/matter-labs/zksync-2-dev/issues/2216)) ([d393302](https://github.com/matter-labs/zksync-2-dev/commit/d393302795af69571fa3f30f25dbb0c3aa0b5a6b)) + +## [5.23.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.22.0...v5.23.0) (2023-07-24) + + +### Features + +* Use jemalloc as a global allocator ([#2213](https://github.com/matter-labs/zksync-2-dev/issues/2213)) ([4a230b6](https://github.com/matter-labs/zksync-2-dev/commit/4a230b6054bf1f0da55a086163750901c244ef52)) + + +### Performance Improvements + +* **merkle tree:** Optimize loading data for tree in metadata calculator ([#2197](https://github.com/matter-labs/zksync-2-dev/issues/2197)) ([f7736bc](https://github.com/matter-labs/zksync-2-dev/commit/f7736bc16bae3e7553eea24d33d4436627942635)) + +## [5.22.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.21.0...v5.22.0) (2023-07-21) + + +### Features + +* **fri-prover:** Generate setup-data for GPU FRI prover ([#2200](https://github.com/matter-labs/zksync-2-dev/issues/2200)) ([3213c2b](https://github.com/matter-labs/zksync-2-dev/commit/3213c2bdeb0f2929d53aaa713dcbe9b2e76fd022)) + + +### Bug Fixes + +* **house-keeper:** use proper display method in metric names ([#2209](https://github.com/matter-labs/zksync-2-dev/issues/2209)) ([894a033](https://github.com/matter-labs/zksync-2-dev/commit/894a03390a212cb6b205bc87e925abc8a203bab2)) + +## [5.21.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.20.1...v5.21.0) (2023-07-20) + + +### Features + +* **api:** added `eth_feeHistory` endpoint ([#2201](https://github.com/matter-labs/zksync-2-dev/issues/2201)) ([7a16252](https://github.com/matter-labs/zksync-2-dev/commit/7a16252e1ba7cb3eb42cee6a14ea320ddcf3e0a3)) +* **explorer-api:** add `/contract_verification/info/{address}` endpoint ([#2195](https://github.com/matter-labs/zksync-2-dev/issues/2195)) ([ade8019](https://github.com/matter-labs/zksync-2-dev/commit/ade80195c7cc0bc288959d556f62b286aa9db9b3)) + + +### Bug Fixes + +* **api:** Fix graceful shutdown for Tokio ([#2167](https://github.com/matter-labs/zksync-2-dev/issues/2167)) ([4542f51](https://github.com/matter-labs/zksync-2-dev/commit/4542f511c78cffee85a004cf24729a81de801e31)) +* **contract-verifier:** fix some vyper verification scenarios ([#2203](https://github.com/matter-labs/zksync-2-dev/issues/2203)) ([5a749b0](https://github.com/matter-labs/zksync-2-dev/commit/5a749b03380db3aac88c808cce202ae0c7343863)) +* **db:** Add index for getting pending l1 batch txs ([#2192](https://github.com/matter-labs/zksync-2-dev/issues/2192)) ([0ba7870](https://github.com/matter-labs/zksync-2-dev/commit/0ba78709e99282a57480b034ae16988f840b9073)) +* **merkle tree:** Remove new tree throttling ([#2189](https://github.com/matter-labs/zksync-2-dev/issues/2189)) ([e18c450](https://github.com/matter-labs/zksync-2-dev/commit/e18c45094dd5d187ccf3e5a9e434e287dd5f2dc9)) +* **ws-api:** handle closed pubsub connections when assigning id ([#2193](https://github.com/matter-labs/zksync-2-dev/issues/2193)) ([f8c448a](https://github.com/matter-labs/zksync-2-dev/commit/f8c448adac3ef31bc02e055cdd94207cc3d6c1c8)) + + +### Performance Improvements + +* better page datastructure ([#1812](https://github.com/matter-labs/zksync-2-dev/issues/1812)) ([80dcb34](https://github.com/matter-labs/zksync-2-dev/commit/80dcb3402ed65dbedf5153273564650942e099a6)) +* **merkle tree:** Measure and optimize RAM usage by tree ([#2202](https://github.com/matter-labs/zksync-2-dev/issues/2202)) ([c86fe43](https://github.com/matter-labs/zksync-2-dev/commit/c86fe43e0007fcf47d5594fd4fe15ea15a74c92c)) +* reduce memory use of memory pages by using chunks of 64 values ([#2204](https://github.com/matter-labs/zksync-2-dev/issues/2204)) ([4262c6d](https://github.com/matter-labs/zksync-2-dev/commit/4262c6d8ffaa4a45f3a619e92189ac8d575fe04f)) + +## [5.20.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.20.0...v5.20.1) (2023-07-17) + + +### Bug Fixes + +* **crypto:** update zkevm_circuits to fix sha256 circuits for FRI prover ([#2186](https://github.com/matter-labs/zksync-2-dev/issues/2186)) ([daf460e](https://github.com/matter-labs/zksync-2-dev/commit/daf460e0a5798c363b65c3de80fff53c743b20e8)) +* **merkle tree:** Handle tree having more versions than L1 batches in Postgres ([#2179](https://github.com/matter-labs/zksync-2-dev/issues/2179)) ([7b3d8ad](https://github.com/matter-labs/zksync-2-dev/commit/7b3d8ad545a8c2f0993d5087ae387935e7dff381)) +* remove annoying Cargo.lock ([#2181](https://github.com/matter-labs/zksync-2-dev/issues/2181)) ([04602a4](https://github.com/matter-labs/zksync-2-dev/commit/04602a446899f672789d83a24db85ea910d00c2f)) + +## [5.20.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.19.1...v5.20.0) (2023-07-14) + + +### Features + +* **fri-prover-config:** use flattened env variable instead of single composite ([#2183](https://github.com/matter-labs/zksync-2-dev/issues/2183)) ([5b67f1f](https://github.com/matter-labs/zksync-2-dev/commit/5b67f1fa9297bd49f5c2bf4a4bfaa71850b1feaf)) +* **merkle tree:** Retire the old tree implementation ([#2130](https://github.com/matter-labs/zksync-2-dev/issues/2130)) ([30738a7](https://github.com/matter-labs/zksync-2-dev/commit/30738a7488f4dfb726a6a64f546437b03dd721ed)) +* **prover-fri:** Add impl for running specialized prover ([#2166](https://github.com/matter-labs/zksync-2-dev/issues/2166)) ([0892ffe](https://github.com/matter-labs/zksync-2-dev/commit/0892ffeb34fcb987987e1f36b1daecb1a5ec07f5)) +* **witness-gen-fri:** force process configured block when sampling enabled ([#2177](https://github.com/matter-labs/zksync-2-dev/issues/2177)) ([12e0395](https://github.com/matter-labs/zksync-2-dev/commit/12e0395aabfe3f0f8a1968816ac46b5c4585746d)) + ## [5.19.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.19.0...v5.19.1) (2023-07-13) diff --git a/core/bin/admin-tools/Cargo.toml b/core/bin/admin-tools/Cargo.toml deleted file mode 100644 index 9748a57b0b54..000000000000 --- a/core/bin/admin-tools/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "admin-tools" -version = "0.1.0" -edition = "2018" -authors = ["The Matter Labs Team "] -homepage = "https://zksync.io/" -license = "MIT OR Apache-2.0" -keywords = ["blockchain", "zksync"] -categories = ["cryptography"] -description = "Admin tools CLI for zkSync" -publish = false # We don't want to publish our binaries. - -[dependencies] -anyhow = "1.0" -chrono = "^0.4" -clap = { version = "4.0", features = ["derive"] } -dotenvy = "^0.15" -tokio = { version = "1", features = ["full"] } - -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } - - diff --git a/core/bin/admin-tools/src/application.rs b/core/bin/admin-tools/src/application.rs deleted file mode 100644 index 628257c11145..000000000000 --- a/core/bin/admin-tools/src/application.rs +++ /dev/null @@ -1,69 +0,0 @@ -use std::path::Path; - -pub struct TerminalSize { - pub height: u32, - pub width: u32, -} - -pub struct App<'a> { - pub terminal: TerminalSize, - pub tokio: tokio::runtime::Runtime, - pub db: zksync_dal::StorageProcessor<'a>, -} - -pub fn create_app<'a>(profile: &Option) -> Result, AppError> { - if profile.is_some() { - let home = std::env::var("ZKSYNC_HOME").map_err(|x| AppError::Init(InitError::Env(x)))?; - - let path = - Path::new(home.as_str()).join(format!("etc/env/{}.env", profile.as_ref().unwrap())); - - dotenvy::from_filename(path) - .map_err(|x| AppError::Init(InitError::Generic(x.to_string())))?; - } - - let tokio = tokio::runtime::Runtime::new().map_err(|x| AppError::Init(InitError::IO(x)))?; - - let db = tokio.block_on(zksync_dal::StorageProcessor::establish_connection(true)); - - let invocation = std::process::Command::new("stty") - .arg("-f") - .arg("/dev/stderr") - .arg("size") - .output(); - - let terminal = match invocation { - Result::Ok(x) if x.stderr.is_empty() => { - let mut split = std::str::from_utf8(&x.stdout).unwrap().split_whitespace(); - - TerminalSize { - height: split.next().unwrap().parse().unwrap(), - width: split.next().unwrap().parse().unwrap(), - } - } - _ => TerminalSize { - height: 60, - width: 80, - }, - }; - - Ok(App { - tokio, - db, - terminal, - }) -} - -#[derive(Debug)] -pub enum InitError { - Env(std::env::VarError), - IO(std::io::Error), - Generic(String), -} - -#[derive(Debug)] -pub enum AppError { - Db(String), - Command(String), - Init(InitError), -} diff --git a/core/bin/admin-tools/src/blocks.rs b/core/bin/admin-tools/src/blocks.rs deleted file mode 100644 index 7a03a7a41913..000000000000 --- a/core/bin/admin-tools/src/blocks.rs +++ /dev/null @@ -1,315 +0,0 @@ -use std::cmp::{max, min}; -use std::ops::Add; -use std::{collections::HashMap, convert::TryFrom, iter}; - -use std::convert::AsRef; - -use chrono::{DateTime, Datelike, Duration, Timelike, Utc}; -use zksync_dal::prover_dal::GetProverJobsParams; -use zksync_dal::witness_generator_dal::GetWitnessJobsParams; -use zksync_types::proofs::{ProverJobStatus, WitnessJobStatus}; - -use zksync_types::{ - proofs::{AggregationRound, ProverJobInfo, WitnessJobInfo}, - L1BatchNumber, -}; - -use crate::application::{App, AppError}; - -pub struct RoundWitnessStats { - job: WitnessJobInfo, - created_at: DateTime, - updated_at: DateTime, -} - -pub struct RoundProverStats { - jobs: Vec, - updated_at: DateTime, -} - -pub struct AggregationRoundInfo { - prover: Option, - witness: Option, - round_number: AggregationRound, - created_at: DateTime, - updated_at: DateTime, -} - -pub struct BlockInfo { - id: L1BatchNumber, - aggregations: Vec>, -} - -pub fn get_block_info(id: L1BatchNumber, app: &mut App) -> Result { - /// Assumes that all provided jobs are from the same aggregation round. - fn jobs_to_round_stats( - witness: Option, - proofs: Vec, - ) -> (Option, Option) { - let witness = witness.map(move |x| RoundWitnessStats { - created_at: x.created_at, - updated_at: x.updated_at, - job: x, - }); - - let prover = proofs - .iter() - .map(|x| x.updated_at) - .reduce(max) - .map(move |updated_at| RoundProverStats { - jobs: proofs, - updated_at, - }); - - (witness, prover) - } - - fn compose_round( - round: AggregationRound, - witness: Option, - prover: Option, - ) -> Option { - witness.map(move |witness| AggregationRoundInfo { - round_number: round, - created_at: witness.created_at, - updated_at: prover - .as_ref() - .map(|x| x.updated_at) - .unwrap_or_else(|| witness.updated_at), - witness: Some(witness), - prover, - }) - } - - let handle = app.tokio.handle(); - - let witness_jobs = handle - .block_on( - app.db - .witness_generator_dal() - .get_jobs(GetWitnessJobsParams { - blocks: Some(id..id), - }), - ) - .map_err(|x| AppError::Db(x.to_string()))?; - - let proof_jobs = handle - .block_on( - app.db - .prover_dal() - .get_jobs(GetProverJobsParams::blocks(id..id)), - ) - .map_err(|x| AppError::Db(x.to_string()))?; - - let mut proof_groups = - proof_jobs - .into_iter() - .fold(HashMap::<_, Vec<_>>::new(), |mut aggr, cur| { - aggr.entry(cur.position.aggregation_round) - .or_default() - .push(cur); - aggr - }); - - let mut witns_groups = witness_jobs - .into_iter() - .fold(HashMap::new(), |mut aggr, cur| { - if aggr.insert(cur.position.aggregation_round, cur).is_some() { - panic!("Single witness job expected per generation round") - } - aggr - }); - - let aggregations = (0..3) - .map(|x| AggregationRound::try_from(x).unwrap()) - .map(|ar| { - ( - ar, - witns_groups.remove(&ar), - proof_groups.remove(&ar).unwrap_or_default(), - ) - }) - .map(|(ar, wtns, prfs)| (ar, jobs_to_round_stats(wtns, prfs))) - .map(|(ar, (w, p))| compose_round(ar, w, p)) - .collect::>(); - - Ok(BlockInfo { id, aggregations }) -} - -pub fn print_block_info(block: &BlockInfo) -> Result<(), AppError> { - fn indent(s: &str, i: usize) -> String { - let width = " ".repeat(i); - - String::new() - .add(width.as_str()) - .add(&s.replace('\n', &String::from("\n").add(&width))) - } - - fn timef(t: DateTime) -> String { - format!( - "{:02}:{:02}:{:02} {}-{:02}-{:02}", - t.hour(), - t.minute(), - t.second(), - t.year(), - t.month(), - t.day() - ) - } - - fn durf(d: Duration) -> String { - format!("{}:{:02}m", d.num_minutes(), d.num_seconds() % 60) - } - - fn print_existing_block( - round: &AggregationRoundInfo, - round_prev: &Option, - ) { - let (duration_reference, reference_name) = match round_prev { - Some(round_prev) => ( - round_prev.updated_at, - format!("U{}", round_prev.round_number as u32), - ), - None => (round.created_at, "C".to_string()), - }; - - let witness = match &round.witness { - Some(witness) => { - let status = match &witness.job.status { - WitnessJobStatus::Successful(x) => { - format!( - "Started: {} {}+{} -Elapsed: {}", - timef(x.started_at), - reference_name, - durf(x.started_at - duration_reference), - durf(x.time_taken) - ) - } - - _ => String::new(), - }; - - format!( - " Witness job: {}\n{}", - witness.job.status, - indent(status.as_str(), 6) - ) - } - None => "No witness job.".to_string(), - }; - - let prover = match &round.prover { - Some(prover) if !prover.jobs.is_empty() => { - let statuses = prover - .jobs - .iter() - .fold(HashMap::new(), |mut aggr, cur| { - aggr.entry(cur.status.as_ref()) - .and_modify(|x| *x += 1) - .or_insert(1); - aggr - }) - .iter() - .map(|(status, count)| format!("{}: {}", status, count)) - .collect::>() - .join(", "); - - fn map(f: fn(T, T) -> T, x: Option, y: T) -> Option - where - T: Copy, - { - x.map(|x| f(x, y)).or(Some(y)) - } - - let (started_min, started_max, elapsed_min, elapsed_max) = prover.jobs.iter().fold( - (None, None, None, None), - |(started_min, started_max, elapsed_min, elapsed_max), cur| match &cur.status { - ProverJobStatus::InProgress(s) => ( - map(min, started_min, s.started_at), - map(max, started_max, s.started_at), - elapsed_min, - elapsed_max, - ), - ProverJobStatus::Successful(s) => ( - map(min, started_min, s.started_at), - map(max, started_max, s.started_at), - map(min, elapsed_min, cur.updated_at - s.started_at), - map(max, elapsed_max, cur.updated_at - s.started_at), - ), - ProverJobStatus::Failed(s) => ( - map(min, started_min, s.started_at), - map(max, started_max, s.started_at), - elapsed_min, - elapsed_max, - ), - _ => (started_min, started_max, elapsed_min, elapsed_max), - }, - ); - - fn format_time( - t: Option>, - reference: DateTime, - reference_name: &str, - ) -> String { - match t { - Some(t) => { - format!("{} {}+{}", timef(t), reference_name, durf(t - reference)) - } - None => "N/A".to_owned(), - } - } - - let stats = format!( - "Started: {} (min) - {} (max) -Elapsed: {} (min), - {} (max)", - format_time(started_min, duration_reference, &reference_name), - format_time(started_max, duration_reference, &reference_name), - elapsed_min.map_or("N/A".to_owned(), durf), - elapsed_max.map_or("N/A".to_owned(), durf) - ); - - format!(" Prover jobs: {}\n{}", statuses, indent(&stats, 6)) - } - _ => "No prover jobs.".to_string(), - }; - println!("Round {}", round.round_number as u32); - println!("[C]reated {}", timef(round.created_at)); - println!( - "[U]pdated {} C+{}", - timef(round.updated_at), - durf(round.updated_at - round.created_at) - ); - - println!("{}\n{}", witness, prover); - } - - fn print_missing_block(round_ix: usize) { - println!("Round {} missing jobs", round_ix); - } - - println!("Block {}", block.id); - - let prevs = iter::once(&None).chain(block.aggregations.iter()); // No need to map into Some, cause previous round must to exist. - - block - .aggregations - .iter() - .zip(prevs) - .enumerate() - .for_each(|(i, (cur, prev))| { - // Option in `cur` refers to conceptual existence of block data. - // Option in `prev` signifies whether there is a previous block for `cur` block, and - // must only be None for first element. - assert!(i == 0 || prev.is_some()); - - match cur { - Some(x) => print_existing_block(x, prev), - None => print_missing_block(i), - } - }); - - Ok(()) -} diff --git a/core/bin/admin-tools/src/main.rs b/core/bin/admin-tools/src/main.rs deleted file mode 100644 index 350c2665f88f..000000000000 --- a/core/bin/admin-tools/src/main.rs +++ /dev/null @@ -1,170 +0,0 @@ -use std::convert::TryFrom; - -use application::{App, AppError}; -use blocks::print_block_info; -use clap::{Args, Parser, Subcommand}; -use zksync_dal::prover_dal::GetProverJobsParams; -use zksync_types::proofs::AggregationRound; -use zksync_types::L1BatchNumber; - -use crate::application::create_app; - -mod application; -mod blocks; -mod prover; - -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, clap::ValueEnum)] -enum JobStatus { - Queued, - Failed, - InProgress, - Successful, -} - -#[derive(Parser)] -struct Cli { - #[command(subcommand)] - command: Command, - #[arg(long)] - /// Can be used to load environment from ./etc/env/.env file. - profile: Option, -} - -#[derive(Subcommand)] -enum Command { - #[command(subcommand)] - Prover(ProverCommand), - #[command(subcommand)] - Blocks(BlockCommand), -} - -#[derive(Subcommand)] -enum ProverCommand { - /// Show general prover jobs statistics. - Stats, - /// List specific jobs - Ls(ProverLsCommand), -} - -#[derive(Subcommand)] -enum BlockCommand { - Show(BlockShowCommand), -} - -type AppFnBox<'a> = Box Result<(), AppError> + 'a>; -type CmdMatch<'a> = Result, AppError>; - -fn prover_stats<'a>() -> AppFnBox<'a> { - Box::new(|app| { - let stats = prover::get_stats(app)?; - prover::print_stats(&stats, app.terminal.width) - }) -} - -#[derive(Args)] -struct ProverLsCommand { - #[arg(long, short)] - /// Statuses to include. - status: Option>, - #[arg(long, short, default_value_t = 10)] - /// Limits the amount of returned results. - limit: u32, - #[arg(long)] - desc: bool, - #[arg(long)] - /// Block range. Format: `x` or `x..y`. - range: Option, - #[arg(long)] - round: Option, -} - -fn prover_ls<'a>(cmd: &ProverLsCommand) -> Result, AppError> { - let range = match &cmd.range { - Some(input) => { - let split = input - .split("..") - .map(|x| x.parse::()) - .collect::, _>>() - .map_err(|_| AppError::Command("Wrong range format".to_owned()))?; - - match split.as_slice() { - [] => Ok(None), - [id] => Ok(Some(L1BatchNumber(*id)..L1BatchNumber(*id))), - [s, e] => Ok(Some(L1BatchNumber(*s)..L1BatchNumber(*e))), - _ => Err(AppError::Command("Wrong range format".to_owned())), - } - } - None => Ok(None), - }?; - - let opts = GetProverJobsParams { - blocks: range, - statuses: cmd.status.as_ref().map(|x| { - x.iter() - .map(|x| { - clap::ValueEnum::to_possible_value(x) - .unwrap() - .get_name() - .replace('-', "_") - }) - .collect() - }), - limit: Some(cmd.limit), - desc: cmd.desc, - round: cmd - .round - .map_or(Ok(None), |x| AggregationRound::try_from(x).map(Some)) - .map_err(|_| AppError::Command("Wrong aggregation round value.".to_owned()))?, - }; - - Ok(Box::new(move |app| { - let jobs = prover::get_jobs(app, opts)?; - prover::print_jobs(&jobs) - })) -} - -#[derive(Args)] -struct BlockShowCommand { - id: u32, -} - -fn block_show<'a>(id: L1BatchNumber) -> AppFnBox<'a> { - Box::new(move |app| { - let block = blocks::get_block_info(id, app)?; - print_block_info(&block) - }) -} - -fn match_prover_cmd(cmd: &ProverCommand) -> CmdMatch { - match cmd { - ProverCommand::Stats => Ok(prover_stats()), - ProverCommand::Ls(x) => prover_ls(x), - } -} - -fn match_block_cmd(cmd: &BlockCommand) -> CmdMatch { - match cmd { - BlockCommand::Show(cmd) => Ok(block_show(L1BatchNumber(cmd.id))), - } -} - -fn match_cmd(cmd: &Command) -> CmdMatch { - match cmd { - Command::Prover(cmd) => match_prover_cmd(cmd), - Command::Blocks(cmd) => match_block_cmd(cmd), - } -} - -fn main() -> Result<(), AppError> { - let cli = Cli::parse(); - - let exec_fn = match_cmd(&cli.command)?; - - let mut app = create_app(&cli.profile)?; - - println!(); - let result = exec_fn(&mut app); - println!(); - - result -} diff --git a/core/bin/admin-tools/src/prover.rs b/core/bin/admin-tools/src/prover.rs deleted file mode 100644 index c316b80cb30c..000000000000 --- a/core/bin/admin-tools/src/prover.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::application::AppError; - -use super::application::App; -use std::string::ToString; -use zksync_dal::{self, prover_dal::GetProverJobsParams}; -use zksync_types::{ - proofs::{ProverJobInfo, ProverJobStatus}, - L1BatchNumber, -}; - -pub struct ProverStats { - pub successful: usize, - pub successful_padding: L1BatchNumber, - pub in_progress: usize, - pub queued: usize, - pub queued_padding: L1BatchNumber, - pub failed: usize, - pub jobs: Vec, -} - -pub fn get_stats(app: &mut App) -> Result { - let handle = app.tokio.handle(); - let stats = handle.block_on(app.db.prover_dal().get_prover_jobs_stats()); - let stats_extended = handle - .block_on(app.db.prover_dal().get_extended_stats()) - .map_err(|x| AppError::Db(x.to_string()))?; - - Ok(ProverStats { - successful: stats.successful, - successful_padding: stats_extended.successful_padding, - in_progress: stats.in_progress, - queued: stats.queued, - queued_padding: stats_extended.queued_padding, - failed: stats.failed, - jobs: stats_extended.active_area, - }) -} - -pub fn print_stats(stats: &ProverStats, term_width: u32) -> Result<(), AppError> { - struct Map { - //width: u32, - successful: Vec, - in_progress: Vec, - queued: Vec, - failed: Vec, - skipped: Vec, - } - - impl Map { - fn new( - display_w: u32, - active_area_start: u32, - active_area_size: u32, - jobs: &[ProverJobInfo], - ) -> Map { - let mut s: Vec<_> = (0..display_w).map(|_| false).collect(); - let mut i: Vec<_> = (0..display_w).map(|_| false).collect(); - let mut q: Vec<_> = (0..display_w).map(|_| false).collect(); - let mut f: Vec<_> = (0..display_w).map(|_| false).collect(); - let mut sk: Vec<_> = (0..display_w).map(|_| false).collect(); - - let area_term_ratio = active_area_size as f32 / display_w as f32; - - for j in jobs - .iter() - .filter(|x| !matches!(x.status, ProverJobStatus::Ignored)) - { - let ix = ((j.block_number.0 - active_area_start) as f32 / area_term_ratio) as usize; - - (match j.status { - ProverJobStatus::Successful(_) => &mut s, - ProverJobStatus::InProgress(_) => &mut i, - ProverJobStatus::Queued => &mut q, - ProverJobStatus::Failed(_) => &mut f, - ProverJobStatus::Skipped => &mut sk, - _ => unreachable!(), - })[ix] = true; - } - - Map { - successful: s, - failed: f, - in_progress: i, - queued: q, - skipped: sk, - } - } - } - - let active_area_start = stats.successful_padding.0 + 1; - let active_area_size = stats.queued_padding.0 - active_area_start; - - let display_w = std::cmp::min(term_width, active_area_size); - - let map = Map::new(display_w, active_area_start, active_area_size, &stats.jobs); - - let map_fn = |x: &bool| match x { - true => "+", - false => ".", - }; - - let to_str_fn = |v: Vec<_>| v.iter().map(map_fn).collect::(); - - println!("Prover jobs: "); - println!(" Queued: {}", stats.queued); - println!(" In progress: {}", stats.in_progress); - println!( - " Successful: {}, block reach: {}", - stats.successful, stats.successful_padding - ); - println!(" Failed: {}", stats.failed); - - if stats.failed > 0 { - println!(" [id:block] circuit type") - } - - for x in stats - .jobs - .iter() - .filter(|x| matches!(x.status, ProverJobStatus::Failed(_))) - { - println!(" - [{}:{}] {}", x.id, x.block_number, x.circuit_type) - } - - println!(); - println!( - "Active area [{} - {}] ({})", - stats.successful_padding.0 + 1, - stats.queued_padding.0 - 1, - stats.queued_padding.0 - stats.successful_padding.0 - 2, - ); - println!("q: --|{}|--", to_str_fn(map.queued)); - println!("i: --|{}|--", to_str_fn(map.in_progress)); - println!("s: --|{}|--", to_str_fn(map.successful)); - println!("f: --|{}|--", to_str_fn(map.failed)); - println!("x: --|{}|--", to_str_fn(map.skipped)); - - Ok(()) -} - -pub fn get_jobs(app: &mut App, opts: GetProverJobsParams) -> Result, AppError> { - let handle = app.tokio.handle(); - handle - .block_on(app.db.prover_dal().get_jobs(opts)) - .map_err(|x| AppError::Db(x.to_string())) -} - -pub fn print_jobs(jobs: &[ProverJobInfo]) -> Result<(), AppError> { - fn pji2string(job: &ProverJobInfo) -> String { - format!( - "Id: {} -Block: {} -Circuit type: {} -Aggregation round: {} -Status: {}", - job.id, - job.block_number, - job.circuit_type, - job.position.aggregation_round as u32, - job.status - ) - } - - let results = jobs.iter().map(pji2string).collect::>(); - - println!("{}\n\n{} results", results.join("\n\n"), results.len()); - - Ok(()) -} diff --git a/core/bin/circuit_synthesizer/Cargo.lock b/core/bin/circuit_synthesizer/Cargo.lock deleted file mode 100644 index 7c7f491332ce..000000000000 --- a/core/bin/circuit_synthesizer/Cargo.lock +++ /dev/null @@ -1,5769 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addchain" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2e69442aa5628ea6951fa33e24efe8313f4321a91bd729fc2f75bdfc858570" -dependencies = [ - "num-bigint 0.3.3", - "num-integer", - "num-traits", -] - -[[package]] -name = "addr2line" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher", -] - -[[package]] -name = "aes-ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7729c3cde54d67063be556aeac75a81330d802f0259500ca40cb52967f975763" -dependencies = [ - "aes-soft", - "aesni", - "cipher", - "ctr", -] - -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher", - "opaque-debug", -] - -[[package]] -name = "ahash" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.10", - "once_cell", - "version_check", -] - -[[package]] -name = "aho-corasick" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" -dependencies = [ - "memchr", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anyhow" -version = "1.0.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" - -[[package]] -name = "api" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.2#9418d0216d52e605a2ecd02e33e9cf0361630333" -dependencies = [ - "bellman_ce", - "cfg-if 1.0.0", - "num_cpus", - "serde", -] - -[[package]] -name = "arr_macro" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a105bfda48707cf19220129e78fca01e9639433ffaef4163546ed8fb04120a5" -dependencies = [ - "arr_macro_impl", - "proc-macro-hack", -] - -[[package]] -name = "arr_macro_impl" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" -dependencies = [ - "proc-macro-hack", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "arrayref" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" - -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "async-trait" -version = "0.1.71" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "atoi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616896e05fc0e2649463a93a15183c6a16bf03413a7af88ef1285ddedfa9cda5" -dependencies = [ - "num-traits", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "backtrace" -version = "0.3.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" -dependencies = [ - "addr2line", - "cc", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" - -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - -[[package]] -name = "bellman_ce" -version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" -dependencies = [ - "arrayvec 0.7.4", - "bit-vec", - "blake2s_const", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures 0.3.28", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "bigdecimal" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc403c26e6b03005522e6e8053384c4e881dfe5b2bf041c0c2c49be33d64a539" -dependencies = [ - "num-bigint 0.3.3", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bit-vec" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -dependencies = [ - "serde", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" - -[[package]] -name = "bitvec" -version = "0.20.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "blake2-rfc_bellman_edition" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc60350286c7c3db13b98e91dbe5c8b6830a6821bc20af5b0c310ce94d74915" -dependencies = [ - "arrayvec 0.4.12", - "byteorder", - "constant_time_eq", -] - -[[package]] -name = "blake2s_const" -version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake2s_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "block-padding", - "generic-array", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding", - "cipher", -] - -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - -[[package]] -name = "bumpalo" -version = "3.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" - -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "cc" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "rustc-serialize", - "serde", - "time 0.1.43", - "wasm-bindgen", - "winapi", -] - -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - -[[package]] -name = "circuit_testing" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#abd44b507840f836da6e084aaacb2ba8a7cb1df6" -dependencies = [ - "bellman_ce", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "codegen" -version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#cad8d38f631691a6b456eb4eb7b410fd129ca006" -dependencies = [ - "ethereum-types", - "franklin-crypto", - "handlebars", - "hex", - "paste", - "rescue_poseidon", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "codegen" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff61280aed771c3070e7dcc9e050c66f1eb1e3b96431ba66f9f74641d02fc41d" -dependencies = [ - "indexmap 1.9.3", -] - -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - -[[package]] -name = "const-oid" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" - -[[package]] -name = "cpufeatures" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" -dependencies = [ - "libc", -] - -[[package]] -name = "crc" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" - -[[package]] -name = "crossbeam" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", - "crossbeam-deque 0.7.4", - "crossbeam-epoch 0.8.2", - "crossbeam-queue 0.2.3", - "crossbeam-utils 0.7.2", -] - -[[package]] -name = "crossbeam" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-channel 0.5.8", - "crossbeam-deque 0.8.3", - "crossbeam-epoch 0.9.15", - "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", - "memoffset 0.9.0", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-bigint" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "cs_derive" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.2#4205618b2c3ef82c8e498a318a95f3f3a64496e2" -dependencies = [ - "proc-macro-error", - "proc-macro2 1.0.66", - "quote 1.0.31", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "cs_derive" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#22d9d3a2018df8d4ac4bc0b0ada61c191d0cee30" -dependencies = [ - "proc-macro-error", - "proc-macro2 1.0.66", - "quote 1.0.31", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" -dependencies = [ - "cipher", -] - -[[package]] -name = "ctrlc" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" -dependencies = [ - "nix", - "windows-sys", -] - -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.66", - "quote 1.0.31", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "debugid" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" -dependencies = [ - "serde", - "uuid", -] - -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid 0.7.1", - "crypto-bigint 0.3.2", - "pem-rfc7468", -] - -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid 0.9.4", - "zeroize", -] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "convert_case", - "proc-macro2 1.0.66", - "quote 1.0.31", - "rustc_version", - "syn 1.0.109", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "crypto-common", - "subtle", -] - -[[package]] -name = "dirs" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "dotenv" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" - -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve", - "rfc6979", - "signature", -] - -[[package]] -name = "either" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" -dependencies = [ - "serde", -] - -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff", - "generic-array", - "group", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1", - "subtle", - "zeroize", -] - -[[package]] -name = "encoding_rs" -version = "0.8.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "env_logger" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" -dependencies = [ - "humantime", - "is-terminal", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "envy" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f47e0157f2cb54f5ae1bd371b30a2ae4311e1c028f575cd4e81de7353215965" -dependencies = [ - "serde", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "ethabi" -version = "16.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c98847055d934070b90e806e12d3936b787d0a115068981c1d8dfd5dfef5a5" -dependencies = [ - "ethereum-types", - "hex", - "serde", - "serde_json", - "sha3 0.9.1", - "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak 2.0.2", -] - -[[package]] -name = "ethereum-types" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "ff_ce" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" -dependencies = [ - "byteorder", - "ff_derive_ce", - "hex", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "ff_derive_ce" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" -dependencies = [ - "num-bigint 0.4.3", - "num-integer", - "num-traits", - "proc-macro2 1.0.66", - "quote 1.0.31", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "findshlibs" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" -dependencies = [ - "cc", - "lazy_static", - "libc", - "winapi", -] - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "franklin-crypto" -version = "0.0.5" -source = "git+https://github.com/matter-labs/franklin-crypto?branch=dev#5922873d25ecec827cd60420ca8cd84a188bb965" -dependencies = [ - "arr_macro", - "bellman_ce", - "bit-vec", - "blake2 0.9.2", - "blake2-rfc_bellman_edition", - "blake2s_simd", - "byteorder", - "digest 0.9.0", - "hex", - "indexmap 1.9.3", - "itertools", - "lazy_static", - "num-bigint 0.4.3", - "num-derive 0.2.5", - "num-integer", - "num-traits", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "sha3 0.9.1", - "smallvec", - "splitmut", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "futures" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" - -[[package]] -name = "futures" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" - -[[package]] -name = "futures-executor" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-intrusive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" -dependencies = [ - "futures-core", - "lock_api", - "parking_lot 0.11.2", -] - -[[package]] -name = "futures-io" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" - -[[package]] -name = "futures-macro" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "futures-sink" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" - -[[package]] -name = "futures-task" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" - -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - -[[package]] -name = "futures-util" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" -dependencies = [ - "futures 0.1.31", - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", -] - -[[package]] -name = "gimli" -version = "0.27.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" - -[[package]] -name = "google-cloud-auth" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f40175857d0b8d7b6cad6cd9594284da5041387fa2ddff30ab6d8faef65eb" -dependencies = [ - "async-trait", - "base64 0.21.2", - "google-cloud-metadata", - "google-cloud-token", - "home", - "jsonwebtoken", - "reqwest", - "serde", - "serde_json", - "thiserror", - "time 0.3.23", - "tokio", - "tracing", - "urlencoding", -] - -[[package]] -name = "google-cloud-metadata" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" -dependencies = [ - "reqwest", - "thiserror", - "tokio", -] - -[[package]] -name = "google-cloud-storage" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "215abab97e07d144428425509c1dad07e57ea72b84b21bcdb6a8a5f12a5c4932" -dependencies = [ - "async-stream", - "base64 0.21.2", - "bytes", - "futures-util", - "google-cloud-auth", - "google-cloud-metadata", - "google-cloud-token", - "hex", - "once_cell", - "percent-encoding", - "regex", - "reqwest", - "ring", - "rsa", - "serde", - "serde_json", - "sha2 0.10.6", - "thiserror", - "time 0.3.23", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "google-cloud-token" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcd62eb34e3de2f085bcc33a09c3e17c4f65650f36d53eb328b00d63bcb536a" -dependencies = [ - "async-trait", -] - -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "h2" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap 1.9.3", - "slab", - "tokio", - "tokio-util 0.7.8", - "tracing", -] - -[[package]] -name = "handlebars" -version = "4.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" - -[[package]] -name = "hashlink" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" -dependencies = [ - "hashbrown 0.11.2", -] - -[[package]] -name = "headers" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" -dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", - "bytes", - "headers-core", - "http", - "httpdate", - "mime", - "sha1", -] - -[[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" -dependencies = [ - "http", -] - -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "home" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - -[[package]] -name = "http" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" -dependencies = [ - "bytes", - "fnv", - "itoa 1.0.9", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.14.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa 1.0.9", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" -dependencies = [ - "futures-util", - "http", - "hyper", - "rustls", - "tokio", - "tokio-rustls", -] - -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg 1.1.0", - "hashbrown 0.12.3", -] - -[[package]] -name = "indexmap" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" -dependencies = [ - "equivalent", - "hashbrown 0.14.0", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.2", - "libc", - "windows-sys", -] - -[[package]] -name = "ipnet" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" - -[[package]] -name = "ipnetwork" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" - -[[package]] -name = "is-terminal" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" -dependencies = [ - "hermit-abi 0.3.2", - "rustix 0.38.4", - "windows-sys", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "itoa" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" - -[[package]] -name = "js-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" -dependencies = [ - "futures 0.3.28", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "jsonwebtoken" -version = "8.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" -dependencies = [ - "base64 0.21.2", - "pem", - "ring", - "serde", - "serde_json", - "simple_asn1", -] - -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if 1.0.0", - "ecdsa", - "elliptic-curve", - "sha2 0.10.6", -] - -[[package]] -name = "keccak" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" -dependencies = [ - "cpufeatures", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = [ - "spin", -] - -[[package]] -name = "libc" -version = "0.2.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" - -[[package]] -name = "libm" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" - -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" - -[[package]] -name = "local-ip-address" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2815836665de176ba66deaa449ada98fdf208d84730d1a84a22cbeed6151a6fa" -dependencies = [ - "libc", - "neli", - "thiserror", - "windows-sys", -] - -[[package]] -name = "lock_api" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" -dependencies = [ - "autocfg 1.1.0", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" - -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata 0.1.10", -] - -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "md-5" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "metrics" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" -dependencies = [ - "ahash", - "metrics-macros", - "portable-atomic 0.3.20", -] - -[[package]] -name = "metrics-exporter-prometheus" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8603921e1f54ef386189335f288441af761e0fc61bcb552168d9cedfe63ebc70" -dependencies = [ - "hyper", - "indexmap 1.9.3", - "ipnet", - "metrics", - "metrics-util", - "parking_lot 0.12.1", - "portable-atomic 0.3.20", - "quanta", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "metrics-macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "metrics-util" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" -dependencies = [ - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", - "hashbrown 0.12.3", - "metrics", - "num_cpus", - "parking_lot 0.12.1", - "portable-atomic 0.3.20", - "quanta", - "sketches-ddsketch", -] - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "miniz_oxide" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" -dependencies = [ - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", -] - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "neli" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" -dependencies = [ - "byteorder", - "libc", - "log", - "neli-proc-macros", -] - -[[package]] -name = "neli-proc-macros" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" -dependencies = [ - "either", - "proc-macro2 1.0.66", - "quote 1.0.31", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "nix" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" -dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", - "libc", - "static_assertions", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - -[[package]] -name = "num" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7a8e9be5e039e2ff869df49155f1c06bd01ade2117ec783e56ab0932b67a8f" -dependencies = [ - "num-bigint 0.3.3", - "num-complex 0.3.1", - "num-integer", - "num-iter", - "num-rational 0.3.2", - "num-traits", -] - -[[package]] -name = "num" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" -dependencies = [ - "num-bigint 0.4.3", - "num-complex 0.4.3", - "num-integer", - "num-iter", - "num-rational 0.4.1", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-bigint" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-bigint-dig" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" -dependencies = [ - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.8.5", - "smallvec", - "zeroize", -] - -[[package]] -name = "num-complex" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5" -dependencies = [ - "num-traits", - "serde", -] - -[[package]] -name = "num-complex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-derive" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", -] - -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg 1.1.0", - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" -dependencies = [ - "autocfg 1.1.0", - "num-bigint 0.3.3", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-rational" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" -dependencies = [ - "autocfg 1.1.0", - "num-bigint 0.4.3", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg 1.1.0", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.2", - "libc", -] - -[[package]] -name = "object" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "openssl" -version = "0.10.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" -dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.90" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "opentelemetry" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" -dependencies = [ - "async-trait", - "crossbeam-channel 0.5.8", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", -] - -[[package]] -name = "opentelemetry-http" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449048140ee61e28f57abe6e9975eedc1f3a29855c7407bd6c12b18578863379" -dependencies = [ - "async-trait", - "bytes", - "http", - "opentelemetry", - "reqwest", -] - -[[package]] -name = "opentelemetry-otlp" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" -dependencies = [ - "async-trait", - "futures 0.3.28", - "futures-util", - "http", - "opentelemetry", - "opentelemetry-http", - "prost", - "prost-build", - "reqwest", - "thiserror", - "tokio", - "tonic", - "tonic-build", -] - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985cc35d832d412224b2cffe2f9194b1b89b6aa5d0bef76d080dce09d90e62bd" -dependencies = [ - "opentelemetry", -] - -[[package]] -name = "os_info" -version = "3.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" -dependencies = [ - "log", - "serde", - "winapi", -] - -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - -[[package]] -name = "pairing_ce" -version = "0.28.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007b21259660d025918e653508f03050bf23fb96a88601f9936329faadc597" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "parity-crypto" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b92ea9ddac0d6e1db7c49991e7d397d34a9fd814b4c93cda53788e8eef94e35" -dependencies = [ - "aes", - "aes-ctr", - "block-modes", - "digest 0.9.0", - "ethereum-types", - "hmac 0.10.1", - "lazy_static", - "pbkdf2 0.7.5", - "ripemd160", - "rustc-hex", - "scrypt", - "secp256k1 0.20.3", - "sha2 0.9.9", - "subtle", - "tiny-keccak 2.0.2", - "zeroize", -] - -[[package]] -name = "parity-scale-codec" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec 0.7.4", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" -dependencies = [ - "proc-macro-crate", - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core 0.9.8", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall 0.3.5", - "smallvec", - "windows-targets", -] - -[[package]] -name = "password-hash" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54986aa4bfc9b98c6a5f40184223658d187159d7b3c6af33f2b2aa25ae1db0fa" -dependencies = [ - "base64ct", - "rand_core 0.6.4", -] - -[[package]] -name = "paste" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" - -[[package]] -name = "pbkdf2" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b8c0d71734018084da0c0354193a5edfb81b20d2d57a92c5b154aefc554a4a" -dependencies = [ - "crypto-mac 0.10.1", -] - -[[package]] -name = "pbkdf2" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf916dd32dd26297907890d99dc2740e33f6bd9073965af4ccff2967962f5508" -dependencies = [ - "base64ct", - "crypto-mac 0.10.1", - "hmac 0.10.1", - "password-hash", - "sha2 0.9.9", -] - -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - -[[package]] -name = "pem-rfc7468" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" -dependencies = [ - "base64ct", -] - -[[package]] -name = "percent-encoding" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" - -[[package]] -name = "pest" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d2d1d55045829d65aad9d389139882ad623b33b904e7c9f1b10c5b8927298e5" -dependencies = [ - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f94bca7e7a599d89dea5dfa309e217e7906c3c007fb9c3299c40b10d6a315d3" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d490fe7e8556575ff6911e45567ab95e71617f43781e5c05490dc8d75c965c" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "pest_meta" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2674c66ebb4b4d9036012091b537aae5878970d6999f81a265034d85b136b341" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.6", -] - -[[package]] -name = "petgraph" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" -dependencies = [ - "fixedbitset", - "indexmap 1.9.3", -] - -[[package]] -name = "pin-project" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkcs1" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" -dependencies = [ - "der 0.5.1", - "pkcs8 0.8.0", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der 0.5.1", - "spki 0.5.4", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - -[[package]] -name = "pkg-config" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" - -[[package]] -name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.4.1", -] - -[[package]] -name = "portable-atomic" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc55135a600d700580e406b4de0d59cb9ad25e344a3a091a97ded2622ec4ec6" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "version_check", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "proc-macro2" -version = "1.0.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "prometheus_exporter" -version = "1.0.0" -dependencies = [ - "metrics", - "metrics-exporter-prometheus", - "tokio", - "vlog", - "zksync_config", -] - -[[package]] -name = "prost" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" -dependencies = [ - "bytes", - "heck 0.3.3", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost", - "prost-types", - "regex", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" -dependencies = [ - "bytes", - "prost", -] - -[[package]] -name = "prover-service" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.2#9418d0216d52e605a2ecd02e33e9cf0361630333" -dependencies = [ - "api", - "bincode", - "crossbeam-utils 0.8.16", - "log", - "num_cpus", - "rand 0.4.6", - "serde", - "serde_json", - "zkevm_test_harness 1.3.2", -] - -[[package]] -name = "quanta" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" -dependencies = [ - "crossbeam-utils 0.8.16", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - -[[package]] -name = "quote" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0" -dependencies = [ - "proc-macro2 1.0.66", -] - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", -] - -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.8", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift", - "winapi", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.10", -] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "rayon" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" -dependencies = [ - "crossbeam-channel 0.5.8", - "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.16", - "num_cpus", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_users" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" -dependencies = [ - "getrandom 0.2.10", - "redox_syscall 0.2.16", - "thiserror", -] - -[[package]] -name = "regex" -version = "1.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata 0.3.3", - "regex-syntax 0.7.4", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", -] - -[[package]] -name = "regex-automata" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax 0.7.4", -] - -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - -[[package]] -name = "regex-syntax" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" - -[[package]] -name = "reqwest" -version = "0.11.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" -dependencies = [ - "base64 0.21.2", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-rustls", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "mime_guess", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-native-tls", - "tokio-rustls", - "tokio-util 0.7.8", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-streams", - "web-sys", - "webpki-roots", - "winreg", -] - -[[package]] -name = "rescue_poseidon" -version = "0.4.1" -source = "git+https://github.com/matter-labs/rescue-poseidon.git#f611a3353e48cf42153e44d89ed90da9bc5934e8" -dependencies = [ - "addchain", - "arrayvec 0.7.4", - "blake2 0.10.6", - "byteorder", - "franklin-crypto", - "num-bigint 0.3.3", - "num-integer", - "num-iter", - "num-traits", - "rand 0.4.6", - "serde", - "sha3 0.9.1", - "smallvec", -] - -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac 0.12.1", - "zeroize", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi", -] - -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "rlp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" -dependencies = [ - "bytes", - "rustc-hex", -] - -[[package]] -name = "rsa" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" -dependencies = [ - "byteorder", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", - "num-iter", - "num-traits", - "pkcs1", - "pkcs8 0.8.0", - "rand_core 0.6.4", - "smallvec", - "subtle", - "zeroize", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc-serialize" -version = "0.3.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "rustix" -version = "0.37.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys", -] - -[[package]] -name = "rustix" -version = "0.38.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" -dependencies = [ - "bitflags 2.3.3", - "errno", - "libc", - "linux-raw-sys 0.4.3", - "windows-sys", -] - -[[package]] -name = "rustls" -version = "0.21.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" -dependencies = [ - "log", - "ring", - "rustls-webpki", - "sct", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" -dependencies = [ - "base64 0.21.2", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f36a6828982f422756984e47912a7a51dcbc2a197aa791158f8ca61cd8204e" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustversion" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" - -[[package]] -name = "ryu" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "salsa20" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" -dependencies = [ - "cipher", -] - -[[package]] -name = "schannel" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "scrypt" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da492dab03f925d977776a0b7233d7b934d6dc2b94faead48928e2e9bacedb9" -dependencies = [ - "base64 0.13.1", - "hmac 0.10.1", - "pbkdf2 0.6.0", - "rand 0.7.3", - "rand_core 0.5.1", - "salsa20", - "sha2 0.9.9", - "subtle", -] - -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct", - "der 0.6.1", - "generic-array", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - -[[package]] -name = "secp256k1" -version = "0.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" -dependencies = [ - "rand 0.6.5", - "secp256k1-sys", -] - -[[package]] -name = "secp256k1" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" -dependencies = [ - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", -] - -[[package]] -name = "security-framework" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" - -[[package]] -name = "sentry" -version = "0.31.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b0ad16faa5d12372f914ed40d00bda21a6d1bdcc99264c5e5e1c9495cf3654" -dependencies = [ - "httpdate", - "native-tls", - "reqwest", - "sentry-backtrace", - "sentry-contexts", - "sentry-core", - "sentry-debug-images", - "sentry-panic", - "sentry-tracing", - "tokio", - "ureq", -] - -[[package]] -name = "sentry-backtrace" -version = "0.31.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f2ee8f147bb5f22ac59b5c35754a759b9a6f6722402e2a14750b2a63fc59bd" -dependencies = [ - "backtrace", - "once_cell", - "regex", - "sentry-core", -] - -[[package]] -name = "sentry-contexts" -version = "0.31.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcd133362c745151eeba0ac61e3ba8350f034e9fe7509877d08059fe1d7720c6" -dependencies = [ - "hostname", - "libc", - "os_info", - "rustc_version", - "sentry-core", - "uname", -] - -[[package]] -name = "sentry-core" -version = "0.31.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7163491708804a74446642ff2c80b3acd668d4b9e9f497f85621f3d250fd012b" -dependencies = [ - "once_cell", - "rand 0.8.5", - "sentry-types", - "serde", - "serde_json", -] - -[[package]] -name = "sentry-debug-images" -version = "0.31.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a5003d7ff08aa3b2b76994080b183e8cfa06c083e280737c9cee02ca1c70f5e" -dependencies = [ - "findshlibs", - "once_cell", - "sentry-core", -] - -[[package]] -name = "sentry-panic" -version = "0.31.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4dfe8371c9b2e126a8b64f6fefa54cef716ff2a50e63b5558a48b899265bccd" -dependencies = [ - "sentry-backtrace", - "sentry-core", -] - -[[package]] -name = "sentry-tracing" -version = "0.31.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca8b88978677a27ee1a91beafe4052306c474c06f582321fde72d2e2cc2f7f" -dependencies = [ - "sentry-backtrace", - "sentry-core", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sentry-types" -version = "0.31.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e7a88e0c1922d19b3efee12a8215f6a8a806e442e665ada71cc222cab72985f" -dependencies = [ - "debugid", - "getrandom 0.2.10", - "hex", - "serde", - "serde_json", - "thiserror", - "time 0.3.23", - "url", - "uuid", -] - -[[package]] -name = "serde" -version = "1.0.171" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.171" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "serde_json" -version = "1.0.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" -dependencies = [ - "indexmap 2.0.0", - "itoa 1.0.9", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa 1.0.9", - "ryu", - "serde", -] - -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros", -] - -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling", - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha1" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug", -] - -[[package]] -name = "sha3" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "sharded-slab" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint 0.4.3", - "num-traits", - "thiserror", - "time 0.3.23", -] - -[[package]] -name = "sketches-ddsketch" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" - -[[package]] -name = "slab" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "smallvec" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" - -[[package]] -name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der 0.5.1", -] - -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] - -[[package]] -name = "splitmut" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85070f382340e8b23a75808e83573ddf65f9ad9143df9573ca37c1ed2ee956a" - -[[package]] -name = "sqlformat" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b7922be017ee70900be125523f38bdd644f4f06a1b16e8fa5a8ee8c34bffd4" -dependencies = [ - "itertools", - "nom", - "unicode_categories", -] - -[[package]] -name = "sqlx" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7911b0031a0247af40095838002999c7a52fba29d9739e93326e71a5a1bc9d43" -dependencies = [ - "sqlx-core", - "sqlx-macros", -] - -[[package]] -name = "sqlx-core" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aec89bfaca8f7737439bad16d52b07f1ccd0730520d3bf6ae9d069fe4b641fb1" -dependencies = [ - "ahash", - "atoi", - "base64 0.13.1", - "bigdecimal", - "bitflags 1.3.2", - "byteorder", - "bytes", - "chrono", - "crc", - "crossbeam-channel 0.5.8", - "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.16", - "dirs", - "either", - "futures-channel", - "futures-core", - "futures-intrusive", - "futures-util", - "hashlink", - "hex", - "hmac 0.11.0", - "indexmap 1.9.3", - "ipnetwork", - "itoa 0.4.8", - "libc", - "log", - "md-5", - "memchr", - "num-bigint 0.3.3", - "once_cell", - "parking_lot 0.11.2", - "percent-encoding", - "rand 0.8.5", - "serde", - "serde_json", - "sha-1", - "sha2 0.9.9", - "smallvec", - "sqlformat", - "sqlx-rt", - "stringprep", - "thiserror", - "tokio-stream", - "url", - "whoami", -] - -[[package]] -name = "sqlx-macros" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "584866c833511b1a152e87a7ee20dee2739746f60c858b3c5209150bc4b466f5" -dependencies = [ - "dotenv", - "either", - "heck 0.3.3", - "hex", - "once_cell", - "proc-macro2 1.0.66", - "quote 1.0.31", - "serde", - "serde_json", - "sha2 0.9.9", - "sqlx-core", - "sqlx-rt", - "syn 1.0.109", - "url", -] - -[[package]] -name = "sqlx-rt" -version = "0.5.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae" -dependencies = [ - "native-tls", - "once_cell", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "stringprep" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "structopt" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" -dependencies = [ - "clap", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" -dependencies = [ - "heck 0.3.3", - "proc-macro-error", - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "strum" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" -dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.66", - "quote 1.0.31", - "rustversion", - "syn 1.0.109", -] - -[[package]] -name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "unicode-ident", -] - -[[package]] -name = "sync_vm" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.2#4205618b2c3ef82c8e498a318a95f3f3a64496e2" -dependencies = [ - "arrayvec 0.7.4", - "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.2)", - "derivative", - "franklin-crypto", - "hex", - "itertools", - "num-bigint 0.4.3", - "num-derive 0.3.3", - "num-integer", - "num-traits", - "once_cell", - "rand 0.4.6", - "rescue_poseidon", - "serde", - "sha2 0.10.6", - "sha3 0.10.6", - "smallvec", - "zk_evm 1.3.2", - "zkevm_opcode_defs", -] - -[[package]] -name = "sync_vm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#22d9d3a2018df8d4ac4bc0b0ada61c191d0cee30" -dependencies = [ - "arrayvec 0.7.4", - "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3)", - "derivative", - "franklin-crypto", - "hex", - "itertools", - "num-bigint 0.4.3", - "num-derive 0.3.3", - "num-integer", - "num-traits", - "once_cell", - "rand 0.4.6", - "rescue_poseidon", - "serde", - "sha2 0.10.6", - "sha3 0.10.6", - "smallvec", - "zk_evm 1.3.3", - "zkevm_opcode_defs", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 1.0.0", - "fastrand", - "redox_syscall 0.3.5", - "rustix 0.37.23", - "windows-sys", -] - -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "test-log" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "thread_local" -version = "1.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" -dependencies = [ - "cfg-if 1.0.0", - "once_cell", -] - -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" -dependencies = [ - "itoa 1.0.9", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" - -[[package]] -name = "time-macros" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" -dependencies = [ - "time-core", -] - -[[package]] -name = "tiny-keccak" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" -dependencies = [ - "autocfg 1.1.0", - "backtrace", - "bytes", - "libc", - "mio", - "num_cpus", - "parking_lot 0.12.1", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls", - "tokio", -] - -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "toml_datetime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" - -[[package]] -name = "toml_edit" -version = "0.19.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" -dependencies = [ - "indexmap 2.0.0", - "toml_datetime", - "winnow", -] - -[[package]] -name = "tonic" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" -dependencies = [ - "async-stream", - "async-trait", - "base64 0.13.1", - "bytes", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "prost-derive", - "tokio", - "tokio-stream", - "tokio-util 0.6.10", - "tower", - "tower-layer", - "tower-service", - "tracing", - "tracing-futures", -] - -[[package]] -name = "tonic-build" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" -dependencies = [ - "proc-macro2 1.0.66", - "prost-build", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "indexmap 1.9.3", - "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", - "tokio", - "tokio-util 0.7.8", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" -dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "tracing-core" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-opentelemetry" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" -dependencies = [ - "once_cell", - "opentelemetry", - "tracing", - "tracing-core", - "tracing-log", - "tracing-subscriber", -] - -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" -dependencies = [ - "matchers", - "nu-ansi-term", - "once_cell", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "time 0.3.23", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "typenum" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "uname" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72f89f0ca32e4db1c04e2a72f5345d59796d4866a1ee0609084569f73683dc8" -dependencies = [ - "libc", -] - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - -[[package]] -name = "unicode-ident" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" - -[[package]] -name = "unicode_categories" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "ureq" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9" -dependencies = [ - "base64 0.21.2", - "log", - "native-tls", - "once_cell", - "url", -] - -[[package]] -name = "url" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" -dependencies = [ - "form_urlencoded", - "idna 0.4.0", - "percent-encoding", - "serde", -] - -[[package]] -name = "urlencoding" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" - -[[package]] -name = "uuid" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" -dependencies = [ - "getrandom 0.2.10", - "serde", -] - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "vlog" -version = "1.0.0" -dependencies = [ - "chrono", - "opentelemetry", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "sentry", - "serde_json", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" -dependencies = [ - "quote 1.0.31", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" - -[[package]] -name = "wasm-streams" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" -dependencies = [ - "futures-util", - "js-sys", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "web-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web3" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" -dependencies = [ - "arrayvec 0.7.4", - "base64 0.13.1", - "bytes", - "derive_more", - "ethabi", - "ethereum-types", - "futures 0.3.28", - "futures-timer", - "headers", - "hex", - "idna 0.2.3", - "jsonrpc-core", - "log", - "once_cell", - "parking_lot 0.12.1", - "pin-project", - "reqwest", - "rlp", - "secp256k1 0.21.3", - "serde", - "serde_json", - "tiny-keccak 2.0.2", - "url", -] - -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.22.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", -] - -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - -[[package]] -name = "whoami" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" -dependencies = [ - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" - -[[package]] -name = "winnow" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fac9742fd1ad1bd9643b991319f72dd031016d44b77039a26977eb667141e7" -dependencies = [ - "memchr", -] - -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "zeroize" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" - -[[package]] -name = "zk_evm" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.2#4262966337708702b5a6cdad902a757acc968dbb" -dependencies = [ - "lazy_static", - "num 0.4.1", - "serde", - "serde_json", - "static_assertions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zk_evm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#c08a8581421d2a0cf1fc8cbbdcd06c00da01fe0e" -dependencies = [ - "anyhow", - "lazy_static", - "num 0.4.1", - "serde", - "serde_json", - "static_assertions", - "zk_evm_abstractions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zk_evm_abstractions" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#973a1f661c045e0e8b9a287505f353659279b3b3" -dependencies = [ - "anyhow", - "serde", - "static_assertions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zkevm-assembly" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.3.2#edc364e59a2eea9c4b1d4ce79f15d0b7c6b55b98" -dependencies = [ - "env_logger 0.9.3", - "hex", - "lazy_static", - "log", - "nom", - "num-bigint 0.4.3", - "num-traits", - "sha3 0.10.6", - "smallvec", - "structopt", - "thiserror", - "zkevm_opcode_defs", -] - -[[package]] -name = "zkevm_opcode_defs" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#2f69c6975a272e8c31d2d82c136a4ea81df25115" -dependencies = [ - "bitflags 2.3.3", - "blake2 0.10.6", - "ethereum-types", - "k256", - "lazy_static", - "sha2 0.10.6", - "sha3 0.10.6", -] - -[[package]] -name = "zkevm_test_harness" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.2#22f29e09715133f4158ad0d71c48547daf283090" -dependencies = [ - "bincode", - "circuit_testing", - "codegen 0.2.0", - "crossbeam 0.8.2", - "derivative", - "env_logger 0.10.0", - "hex", - "num-bigint 0.4.3", - "num-integer", - "num-traits", - "rayon", - "serde", - "serde_json", - "smallvec", - "structopt", - "sync_vm 1.3.2", - "test-log", - "tracing", - "zk_evm 1.3.2", - "zkevm-assembly", -] - -[[package]] -name = "zkevm_test_harness" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.3#6453eab3c9c8915f588ff4eceb48d7be9a695ecb" -dependencies = [ - "bincode", - "circuit_testing", - "codegen 0.2.0", - "crossbeam 0.8.2", - "derivative", - "env_logger 0.10.0", - "hex", - "num-bigint 0.4.3", - "num-integer", - "num-traits", - "rayon", - "serde", - "serde_json", - "smallvec", - "structopt", - "sync_vm 1.3.3", - "test-log", - "tracing", - "zk_evm 1.3.3", - "zkevm-assembly", -] - -[[package]] -name = "zksync_basic_types" -version = "1.0.0" -dependencies = [ - "serde", - "web3", -] - -[[package]] -name = "zksync_circuit_synthesizer" -version = "0.1.0" -dependencies = [ - "ctrlc", - "futures 0.3.28", - "local-ip-address", - "metrics", - "prometheus_exporter", - "prover-service", - "structopt", - "tokio", - "vlog", - "zkevm_test_harness 1.3.3", - "zksync_config", - "zksync_dal", - "zksync_object_store", - "zksync_prover_utils", - "zksync_queued_job_processor", - "zksync_utils", -] - -[[package]] -name = "zksync_config" -version = "1.0.0" -dependencies = [ - "bigdecimal", - "envy", - "num 0.3.1", - "once_cell", - "serde", - "serde_json", - "url", - "zksync_basic_types", - "zksync_contracts", - "zksync_utils", -] - -[[package]] -name = "zksync_contracts" -version = "1.0.0" -dependencies = [ - "ethabi", - "hex", - "once_cell", - "serde", - "serde_json", - "zksync_utils", -] - -[[package]] -name = "zksync_crypto" -version = "1.0.0" -dependencies = [ - "base64 0.13.1", - "blake2 0.10.6", - "hex", - "once_cell", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "thiserror", - "zksync_basic_types", -] - -[[package]] -name = "zksync_dal" -version = "1.0.0" -dependencies = [ - "anyhow", - "bigdecimal", - "bincode", - "hex", - "itertools", - "metrics", - "num 0.3.1", - "once_cell", - "serde_json", - "sqlx", - "strum", - "thiserror", - "tokio", - "vlog", - "zksync_config", - "zksync_contracts", - "zksync_health_check", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_health_check" -version = "0.1.0" -dependencies = [ - "async-trait", -] - -[[package]] -name = "zksync_mini_merkle_tree" -version = "1.0.0" -dependencies = [ - "once_cell", - "zksync_basic_types", - "zksync_crypto", -] - -[[package]] -name = "zksync_object_store" -version = "1.0.0" -dependencies = [ - "async-trait", - "bincode", - "google-cloud-auth", - "google-cloud-storage", - "http", - "metrics", - "tokio", - "vlog", - "zksync_config", - "zksync_types", -] - -[[package]] -name = "zksync_prover_utils" -version = "1.0.0" -dependencies = [ - "ctrlc", - "futures 0.3.28", - "metrics", - "regex", - "reqwest", - "tokio", - "vlog", - "zksync_config", - "zksync_utils", -] - -[[package]] -name = "zksync_queued_job_processor" -version = "1.0.0" -dependencies = [ - "async-trait", - "tokio", - "vlog", - "zksync_dal", - "zksync_utils", -] - -[[package]] -name = "zksync_types" -version = "1.0.0" -dependencies = [ - "bigdecimal", - "blake2 0.10.6", - "chrono", - "codegen 0.1.0", - "metrics", - "num 0.3.1", - "once_cell", - "parity-crypto", - "rlp", - "serde", - "serde_json", - "serde_with", - "strum", - "thiserror", - "zk_evm 1.3.3", - "zkevm-assembly", - "zkevm_test_harness 1.3.3", - "zksync_basic_types", - "zksync_config", - "zksync_contracts", - "zksync_mini_merkle_tree", - "zksync_utils", -] - -[[package]] -name = "zksync_utils" -version = "1.0.0" -dependencies = [ - "anyhow", - "bigdecimal", - "envy", - "futures 0.3.28", - "hex", - "itertools", - "metrics", - "num 0.3.1", - "reqwest", - "serde", - "thiserror", - "tokio", - "vlog", - "zk_evm 1.3.3", - "zksync_basic_types", -] diff --git a/core/bin/circuit_synthesizer/Cargo.toml b/core/bin/circuit_synthesizer/Cargo.toml deleted file mode 100644 index 987158d38fcd..000000000000 --- a/core/bin/circuit_synthesizer/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "zksync_circuit_synthesizer" -version = "0.1.0" -edition = "2021" - -[[bin]] -name = "zksync_circuit_synthesizer" -path = "src/main.rs" - -[dependencies] -zksync_dal = { path = "../../lib/dal", version = "1.0" } -zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } -zksync_object_store = { path = "../../lib/object_store", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } -prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } -zksync_prover_utils = { path = "../../lib/prover_utils", version = "1.0" } - -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3"} - -prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["legacy"], default-features=false} - -structopt = "0.3.26" -tokio = { version = "1.23.0", features = ["full"] } -futures = "0.3" -ctrlc = { version = "3.1", features = ["termination"] } -local-ip-address = "0.5.0" -metrics = "0.20" diff --git a/core/bin/contract-verifier/Cargo.toml b/core/bin/contract-verifier/Cargo.toml index b607a2753b11..6a57884dff7f 100644 --- a/core/bin/contract-verifier/Cargo.toml +++ b/core/bin/contract-verifier/Cargo.toml @@ -26,7 +26,7 @@ ctrlc = { version = "3.1", features = ["termination"] } thiserror = "1.0" chrono = "0.4" serde_json = "1.0" -ethabi = "16.0.0" +ethabi = "18.0.0" metrics = "0.20" hex = "0.4" serde = { version = "1.0", features = ["derive"] } diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index 8ebd2cdeb2c9..b806d4e09520 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -37,7 +37,6 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { }) .collect(); transaction - .explorer() .contract_verification_dal() .set_zksolc_versions(zksolc_versions) .await @@ -59,7 +58,6 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { }) .collect(); transaction - .explorer() .contract_verification_dal() .set_solc_versions(solc_versions) .await @@ -81,7 +79,6 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { }) .collect(); transaction - .explorer() .contract_verification_dal() .set_zkvyper_versions(zkvyper_versions) .await @@ -104,7 +101,6 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { .collect(); transaction - .explorer() .contract_verification_dal() .set_vyper_versions(vyper_versions) .await @@ -133,7 +129,7 @@ async fn main() { listener_port: verifier_config.prometheus_port, ..ApiConfig::from_env().prometheus }; - let pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let pool = ConnectionPool::singleton(DbVariant::Master).build().await; vlog::init(); let sentry_guard = vlog::init_sentry(); @@ -160,8 +156,6 @@ async fn main() { let contract_verifier = ContractVerifier::new(verifier_config, pool); let tasks = vec![ - // The prover connection pool is not used by the contract verifier, but we need to pass it - // since `JobProcessor` trait requires it. tokio::spawn(contract_verifier.run(stop_receiver, opt.jobs_number)), prometheus_exporter::run_prometheus_exporter(prometheus_config.listener_port, None), ]; diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index e13ca05c5425..e39c4796a2b0 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -13,7 +13,7 @@ use zksync_config::ContractVerifierConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ - explorer_api::{ + contract_verification_api::{ CompilationArtifacts, CompilerType, DeployContractCalldata, SourceCodeData, VerificationInfo, VerificationRequest, }, @@ -59,7 +59,6 @@ impl ContractVerifier { // Bytecode should be present because it is checked when accepting request. let (deployed_bytecode, creation_tx_calldata) = storage - .explorer() .contract_verification_dal() .get_contract_info_for_verification(request.req.contract_address).await .unwrap() @@ -241,13 +240,14 @@ impl ContractVerifier { .await .map_err(|_| ContractVerifierError::CompilationTimeout)??; + let file_name = format!("{contract_name}.vy"); let object = output .as_object() .cloned() .ok_or(ContractVerifierError::InternalError)?; for (path, artifact) in object { let path = Path::new(&path); - if path.file_name().unwrap().to_str().unwrap() == contract_name.as_str() { + if path.file_name().unwrap().to_str().unwrap() == file_name { let bytecode_str = artifact["bytecode"] .as_str() .ok_or(ContractVerifierError::InternalError)?; @@ -420,7 +420,6 @@ impl ContractVerifier { match verification_result { Ok(info) => { storage - .explorer() .contract_verification_dal() .save_verification_info(info) .await @@ -436,7 +435,6 @@ impl ContractVerifier { _ => serde_json::Value::Array(Vec::new()), }; storage - .explorer() .contract_verification_dal() .save_verification_error(request_id, error_message, compilation_errors, None) .await @@ -466,7 +464,6 @@ impl JobProcessor for ContractVerifier { // `compilation_timeout` + `non_compilation_time_overhead` (which is significantly less than `compilation_timeout`), // we re-pick up jobs that are being executed for a bit more than `compilation_timeout`. let job = connection - .explorer() .contract_verification_dal() .get_next_queued_verification_request(self.config.compilation_timeout() + TIME_OVERHEAD) .await @@ -479,7 +476,6 @@ impl JobProcessor for ContractVerifier { let mut connection = self.connection_pool.access_storage().await; connection - .explorer() .contract_verification_dal() .save_verification_error( job_id, diff --git a/core/bin/contract-verifier/src/zkvyper_utils.rs b/core/bin/contract-verifier/src/zkvyper_utils.rs index a0831f44712b..33a99f256f90 100644 --- a/core/bin/contract-verifier/src/zkvyper_utils.rs +++ b/core/bin/contract-verifier/src/zkvyper_utils.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::fs::File; use std::io::Write; use std::path::PathBuf; use std::process::Stdio; @@ -40,19 +41,23 @@ impl ZkVyper { .stdout(Stdio::piped()) .stderr(Stdio::piped()); - let mut files = vec![]; - for (name, content) in input.sources { - let mut file = tempfile::Builder::new() - .prefix(&name) - .suffix("") - .rand_bytes(0) - .tempfile() - .map_err(|_err| ContractVerifierError::InternalError)?; + let temp_dir = tempfile::tempdir().map_err(|_err| ContractVerifierError::InternalError)?; + for (mut name, content) in input.sources { + if !name.ends_with(".vy") { + name += ".vy"; + } + let path = temp_dir.path().join(name); + if let Some(prefix) = path.parent() { + std::fs::create_dir_all(prefix) + .map_err(|_err| ContractVerifierError::InternalError)?; + } + let mut file = + File::create(&path).map_err(|_err| ContractVerifierError::InternalError)?; file.write_all(content.as_bytes()) .map_err(|_err| ContractVerifierError::InternalError)?; - command.arg(file.path().to_str().unwrap()); - files.push(file); + command.arg(path.into_os_string()); } + let child = command .spawn() .map_err(|_err| ContractVerifierError::InternalError)?; diff --git a/core/bin/l1_tx_effective_gas_price_migration/Cargo.toml b/core/bin/enumeration_indices_migration/Cargo.toml similarity index 60% rename from core/bin/l1_tx_effective_gas_price_migration/Cargo.toml rename to core/bin/enumeration_indices_migration/Cargo.toml index 0c6dff2371a1..db33b6f00767 100644 --- a/core/bin/l1_tx_effective_gas_price_migration/Cargo.toml +++ b/core/bin/enumeration_indices_migration/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "l1_tx_effective_gas_price_migration" +name = "enumeration_indices_migration" version = "0.1.0" edition = "2021" @@ -7,6 +7,6 @@ edition = "2021" [dependencies] tokio = { version = "1" } -zksync_types = { path = "../../lib/types", version = "1.0" } zksync_dal = { path = "../../lib/dal", version = "1.0" } -structopt = "0.3.26" +zksync_core = { path = "../../bin/zksync_core", version = "1.0" } +vlog = { path = "../../lib/vlog", version = "1.0" } diff --git a/core/bin/enumeration_indices_migration/src/main.rs b/core/bin/enumeration_indices_migration/src/main.rs new file mode 100644 index 000000000000..86d581f379b7 --- /dev/null +++ b/core/bin/enumeration_indices_migration/src/main.rs @@ -0,0 +1,11 @@ +use zksync_dal::connection::DbVariant; +use zksync_dal::ConnectionPool; + +#[tokio::main] +async fn main() { + vlog::init(); + + let pool = ConnectionPool::singleton(DbVariant::Master).build().await; + let mut storage = pool.access_storage().await; + zksync_core::state_keeper::set_missing_initial_writes_indices(&mut storage).await; +} diff --git a/core/bin/external_node/src/config.rs b/core/bin/external_node/src/config/mod.rs similarity index 55% rename from core/bin/external_node/src/config.rs rename to core/bin/external_node/src/config/mod.rs index 9ec79384da57..f6506c9f1dde 100644 --- a/core/bin/external_node/src/config.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -3,16 +3,23 @@ use serde::Deserialize; use std::{env, time::Duration}; use url::Url; -use zksync_basic_types::{Address, L1ChainId, L2ChainId, H256}; +use zksync_basic_types::{Address, L1ChainId, L2ChainId, MiniblockNumber, H256}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_core::api_server::{tx_sender::TxSenderConfig, web3::state::InternalApiConfig}; +use zksync_core::api_server::{ + tx_sender::TxSenderConfig, web3::state::InternalApiConfig, web3::Namespace, +}; use zksync_types::api::BridgeAddresses; use zksync_web3_decl::{ jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, - namespaces::{EthNamespaceClient, ZksNamespaceClient}, + namespaces::{EnNamespaceClient, EthNamespaceClient, ZksNamespaceClient}, }; +#[cfg(test)] +mod tests; + +const BYTES_IN_MEGABYTE: usize = 1_024 * 1_024; + /// This part of the external node config is fetched directly from the main node. #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct RemoteENConfig { @@ -24,6 +31,11 @@ pub struct RemoteENConfig { pub l2_testnet_paymaster_addr: Option
, pub l2_chain_id: L2ChainId, pub l1_chain_id: L1ChainId, + + pub default_aa_hash: H256, + pub bootloader_hash: H256, + + pub fair_l2_gas_price: u64, } impl RemoteENConfig { @@ -54,6 +66,16 @@ impl RemoteENConfig { .context("Failed to fetch L1 chain ID")? .as_u64(), ); + let current_miniblock = client + .get_block_number() + .await + .context("Failed to fetch block number")?; + let block_header = client + .sync_l2_block(MiniblockNumber(current_miniblock.as_u32()), false) + .await + .context("Failed to fetch last miniblock header")? + .expect("Block is known to exist"); + let base_system_contract_hashes = block_header.base_system_contracts_hashes; Ok(Self { diamond_proxy_addr, @@ -64,121 +86,244 @@ impl RemoteENConfig { l2_weth_bridge_addr: bridges.l2_weth_bridge, l2_chain_id, l1_chain_id, + default_aa_hash: base_system_contract_hashes.default_aa, + bootloader_hash: base_system_contract_hashes.bootloader, + fair_l2_gas_price: block_header.l2_fair_gas_price, }) } + + pub fn base_system_contracts_hashes(&self) -> BaseSystemContractsHashes { + BaseSystemContractsHashes { + default_aa: self.default_aa_hash, + bootloader: self.bootloader_hash, + } + } } /// This part of the external node config is completely optional to provide. -/// It can tweak limits of the API, delay intervals of cetrain components, etc. +/// It can tweak limits of the API, delay intervals of certain components, etc. /// If any of the fields are not provided, the default values will be used. -#[derive(Debug, Deserialize, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Deserialize)] pub struct OptionalENConfig { + // User-facing API limits /// Max possible limit of filters to be in the API state at once. - filters_limit: Option, + #[serde(default = "OptionalENConfig::default_filters_limit")] + pub filters_limit: usize, /// Max possible limit of subscriptions to be in the API state at once. - subscriptions_limit: Option, - /// Interval between polling db for pubsub (in ms). - pubsub_polling_interval: Option, + #[serde(default = "OptionalENConfig::default_subscriptions_limit")] + pub subscriptions_limit: usize, /// Max possible limit of entities to be requested via API at once. - req_entities_limit: Option, - /// Max possible size of an ABI encoded tx (in bytes). - max_tx_size: Option, - /// The factor by which to scale the gasLimit - estimate_gas_scale_factor: Option, - /// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. - estimate_gas_acceptable_overestimation: Option, - /// The multiplier to use when suggesting gas price. Should be higher than one, - /// otherwise if the L1 prices soar, the suggested gas price won't be sufficient to be included in block - gas_price_scale_factor: Option, - /// Tx nonce: how far ahead from the committed nonce can it be. - max_nonce_ahead: Option, - metadata_calculator_delay: Option, + #[serde(default = "OptionalENConfig::default_req_entities_limit")] + pub req_entities_limit: usize, + /// Max possible size of an ABI encoded tx (in bytes). + #[serde(default = "OptionalENConfig::default_max_tx_size")] + pub max_tx_size: usize, /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the api server panics. /// This is a temporary solution to mitigate API request resulting in thousands of DB queries. pub vm_execution_cache_misses_limit: Option, /// Inbound transaction limit used for throttling. pub transactions_per_sec_limit: Option, - /// Port on which the Prometheus exporter server is listening. - pub prometheus_port: Option, - /// Throttle interval for the tree in milliseconds. This interval will be - /// applied after each time the tree makes progress. - merkle_tree_throttle: Option, - /// Maximum number of blocks to be processed by the Merkle tree at a time. - max_blocks_per_tree_batch: Option, + /// Limit for fee history block range. + #[serde(default = "OptionalENConfig::default_fee_history_limit")] + pub fee_history_limit: u64, + /// Maximum number of requests in a single batch JSON RPC request. Default is 500. + #[serde(default = "OptionalENConfig::default_max_batch_request_size")] + pub max_batch_request_size: usize, + /// Maximum response body size in MiBs. Default is 10 MiB. + #[serde(default = "OptionalENConfig::default_max_response_body_size_mb")] + pub max_response_body_size_mb: usize, + + // Other API config settings + /// Interval between polling DB for pubsub (in ms). + #[serde( + rename = "pubsub_polling_interval", + default = "OptionalENConfig::default_polling_interval" + )] + polling_interval: u64, + /// Tx nonce: how far ahead from the committed nonce can it be. + #[serde(default = "OptionalENConfig::default_max_nonce_ahead")] + pub max_nonce_ahead: u32, /// Max number of VM instances to be concurrently spawned by the API server. /// This option can be tweaked down if the API server is running out of memory. - vm_concurrency_limit: Option, - /// Smart contract source code cache size for the API server. - factory_deps_cache_size_mb: Option, + #[serde(default = "OptionalENConfig::default_vm_concurrency_limit")] + pub vm_concurrency_limit: usize, + /// Smart contract bytecode cache size for the API server. Default value is 128 MiB. + #[serde(default = "OptionalENConfig::default_factory_deps_cache_size_mb")] + factory_deps_cache_size_mb: usize, + /// Initial writes cache size for the API server. Default value is 32 MiB. + #[serde(default = "OptionalENConfig::default_initial_writes_cache_size_mb")] + initial_writes_cache_size_mb: usize, + /// Latest values cache size in MiBs. The default value is 128 MiB. If set to 0, the latest + /// values cache will be disabled. + #[serde(default = "OptionalENConfig::default_latest_values_cache_size_mb")] + latest_values_cache_size_mb: usize, + /// Enabled JSON RPC API namespaces. + api_namespaces: Option>, + + // Gas estimation config + /// The factor by which to scale the gasLimit + #[serde(default = "OptionalENConfig::default_estimate_gas_scale_factor")] + pub estimate_gas_scale_factor: f64, + /// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. + #[serde(default = "OptionalENConfig::default_estimate_gas_acceptable_overestimation")] + pub estimate_gas_acceptable_overestimation: u32, + /// The multiplier to use when suggesting gas price. Should be higher than one, + /// otherwise if the L1 prices soar, the suggested gas price won't be sufficient to be included in block + #[serde(default = "OptionalENConfig::default_gas_price_scale_factor")] + pub gas_price_scale_factor: f64, + + // Merkle tree config + #[serde(default = "OptionalENConfig::default_metadata_calculator_delay")] + metadata_calculator_delay: u64, + /// Maximum number of L1 batches to be processed by the Merkle tree at a time. + #[serde( + alias = "max_blocks_per_tree_batch", + default = "OptionalENConfig::default_max_l1_batches_per_tree_iter" + )] + pub max_l1_batches_per_tree_iter: usize, + /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, + /// but the effects vary wildly depending on the setup (e.g., the filesystem used). + #[serde(default = "OptionalENConfig::default_merkle_tree_multi_get_chunk_size")] + pub merkle_tree_multi_get_chunk_size: usize, + /// Capacity of the block cache for the Merkle tree RocksDB. Reasonable values range from ~100 MiB to several GiB. + /// The default value is 128 MiB. + #[serde(default = "OptionalENConfig::default_merkle_tree_block_cache_size_mb")] + merkle_tree_block_cache_size_mb: usize, + + // Other config settings + /// Port on which the Prometheus exporter server is listening. + pub prometheus_port: Option, + /// Whether to try running EN with MultiVM. + #[serde(default)] + pub experimental_multivm_support: bool, } impl OptionalENConfig { - pub fn polling_interval(&self) -> Duration { - Duration::from_millis(self.pubsub_polling_interval.unwrap_or(200)) + const fn default_filters_limit() -> usize { + 10_000 + } + + const fn default_subscriptions_limit() -> usize { + 10_000 + } + + const fn default_req_entities_limit() -> usize { + 1_024 } - pub fn req_entities_limit(&self) -> usize { - self.req_entities_limit.unwrap_or(1024) + const fn default_max_tx_size() -> usize { + 1_000_000 } - pub fn filters_limit(&self) -> usize { - self.filters_limit.unwrap_or(10000) + const fn default_polling_interval() -> u64 { + 200 } - pub fn subscriptions_limit(&self) -> usize { - self.subscriptions_limit.unwrap_or(10000) + const fn default_estimate_gas_scale_factor() -> f64 { + 1.2 } - pub fn max_tx_size(&self) -> usize { - self.max_tx_size.unwrap_or(1000000) + const fn default_estimate_gas_acceptable_overestimation() -> u32 { + 1_000 } - pub fn estimate_gas_scale_factor(&self) -> f64 { - self.estimate_gas_scale_factor.unwrap_or(1.2) + const fn default_gas_price_scale_factor() -> f64 { + 1.2 } - pub fn estimate_gas_acceptable_overestimation(&self) -> u32 { - self.estimate_gas_acceptable_overestimation.unwrap_or(1000) + const fn default_max_nonce_ahead() -> u32 { + 50 } - pub fn gas_price_scale_factor(&self) -> f64 { - self.gas_price_scale_factor.unwrap_or(1.2) + const fn default_metadata_calculator_delay() -> u64 { + 100 } - pub fn max_nonce_ahead(&self) -> u32 { - self.max_nonce_ahead.unwrap_or(50) + const fn default_max_l1_batches_per_tree_iter() -> usize { + 20 + } + + const fn default_vm_concurrency_limit() -> usize { + // The default limit is large so that it does not create a bottleneck on its own. + // VM execution can still be limited by Tokio runtime parallelism and/or the number + // of DB connections in a pool. + 2_048 + } + + const fn default_factory_deps_cache_size_mb() -> usize { + 128 + } + + const fn default_initial_writes_cache_size_mb() -> usize { + 32 + } + + const fn default_latest_values_cache_size_mb() -> usize { + 128 + } + + const fn default_merkle_tree_multi_get_chunk_size() -> usize { + 500 + } + + const fn default_merkle_tree_block_cache_size_mb() -> usize { + 128 + } + + const fn default_fee_history_limit() -> u64 { + 1_024 + } + + const fn default_max_batch_request_size() -> usize { + 500 // The default limit is chosen to be reasonably permissive. + } + + const fn default_max_response_body_size_mb() -> usize { + 10 + } + + pub fn polling_interval(&self) -> Duration { + Duration::from_millis(self.polling_interval) } pub fn metadata_calculator_delay(&self) -> Duration { - Duration::from_millis(self.metadata_calculator_delay.unwrap_or(100)) + Duration::from_millis(self.metadata_calculator_delay) + } + + /// Returns the size of factory dependencies cache in bytes. + pub fn factory_deps_cache_size(&self) -> usize { + self.factory_deps_cache_size_mb * BYTES_IN_MEGABYTE } - pub fn max_blocks_per_tree_batch(&self) -> usize { - self.max_blocks_per_tree_batch.unwrap_or(100) + /// Returns the size of initial writes cache in bytes. + pub fn initial_writes_cache_size(&self) -> usize { + self.initial_writes_cache_size_mb * BYTES_IN_MEGABYTE } - pub fn merkle_tree_throttle(&self) -> Duration { - Duration::from_millis(self.merkle_tree_throttle.unwrap_or(0)) + /// Returns the size of latest values cache in bytes. + pub fn latest_values_cache_size(&self) -> usize { + self.latest_values_cache_size_mb * BYTES_IN_MEGABYTE } - pub fn vm_concurrency_limit(&self) -> Option { - self.vm_concurrency_limit + /// Returns the size of block cache for Merkle tree in bytes. + pub fn merkle_tree_block_cache_size(&self) -> usize { + self.merkle_tree_block_cache_size_mb * BYTES_IN_MEGABYTE } - pub fn factory_deps_cache_size_mb(&self) -> usize { - // 128MB is the default smart contract code cache size. - self.factory_deps_cache_size_mb.unwrap_or(128) + pub fn api_namespaces(&self) -> Vec { + self.api_namespaces + .clone() + .unwrap_or_else(|| Namespace::NON_DEBUG.to_vec()) + } + + pub fn max_response_body_size(&self) -> usize { + self.max_response_body_size_mb * BYTES_IN_MEGABYTE } } /// This part of the external node config is required for its operation. #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct RequiredENConfig { - /// Default AA hash used at genesis. - pub default_aa_hash: H256, - /// Bootloader hash used at genesis. - pub bootloader_hash: H256, - /// Port on which the HTTP RPC server is listening. pub http_port: u16, /// Port on which the WebSocket RPC server is listening. @@ -197,11 +342,6 @@ pub struct RequiredENConfig { pub state_cache_path: String, /// Fast SSD path. Used as a RocksDB dir for the Merkle tree (*new* implementation). pub merkle_tree_path: String, - - pub max_allowed_l2_tx_gas_limit: u32, - pub fee_account_addr: Address, - pub fair_l2_gas_price: u64, - pub validation_computational_gas_limit: u32, } impl RequiredENConfig { @@ -229,13 +369,6 @@ pub struct ExternalNodeConfig { } impl ExternalNodeConfig { - pub fn base_system_contracts_hashes(&self) -> BaseSystemContractsHashes { - BaseSystemContractsHashes { - bootloader: self.required.bootloader_hash, - default_aa: self.required.default_aa_hash, - } - } - /// Loads config from the environment variables and /// fetches contracts addresses from the main node. pub async fn collect() -> anyhow::Result { @@ -315,11 +448,11 @@ impl From for InternalApiConfig { Self { l1_chain_id: config.remote.l1_chain_id, l2_chain_id: config.remote.l2_chain_id, - max_tx_size: config.optional.max_tx_size(), - estimate_gas_scale_factor: config.optional.estimate_gas_scale_factor(), + max_tx_size: config.optional.max_tx_size, + estimate_gas_scale_factor: config.optional.estimate_gas_scale_factor, estimate_gas_acceptable_overestimation: config .optional - .estimate_gas_acceptable_overestimation(), + .estimate_gas_acceptable_overestimation, bridge_addresses: BridgeAddresses { l1_erc20_default_bridge: config.remote.l1_erc20_bridge_proxy_addr, l2_erc20_default_bridge: config.remote.l2_erc20_bridge_addr, @@ -328,7 +461,8 @@ impl From for InternalApiConfig { }, diamond_proxy_addr: config.remote.diamond_proxy_addr, l2_testnet_paymaster_addr: config.remote.l2_testnet_paymaster_addr, - req_entities_limit: config.optional.req_entities_limit(), + req_entities_limit: config.optional.req_entities_limit, + fee_history_limit: config.optional.fee_history_limit, } } } @@ -336,15 +470,21 @@ impl From for InternalApiConfig { impl From for TxSenderConfig { fn from(config: ExternalNodeConfig) -> Self { Self { - fee_account_addr: config.required.fee_account_addr, - gas_price_scale_factor: config.optional.gas_price_scale_factor(), - max_nonce_ahead: config.optional.max_nonce_ahead(), - max_allowed_l2_tx_gas_limit: config.required.max_allowed_l2_tx_gas_limit, - fair_l2_gas_price: config.required.fair_l2_gas_price, + // Fee account address does not matter for the EN operation, since + // actual fee distribution is handled my the main node. + fee_account_addr: "0xfee0000000000000000000000000000000000000" + .parse() + .unwrap(), + gas_price_scale_factor: config.optional.gas_price_scale_factor, + max_nonce_ahead: config.optional.max_nonce_ahead, + fair_l2_gas_price: config.remote.fair_l2_gas_price, vm_execution_cache_misses_limit: config.optional.vm_execution_cache_misses_limit, - validation_computational_gas_limit: config.required.validation_computational_gas_limit, - default_aa: config.required.default_aa_hash, - bootloader: config.required.bootloader_hash, + default_aa: config.remote.default_aa_hash, + bootloader: config.remote.bootloader_hash, + // We set these values to the maximum since we don't know the actual values + // and they will be enforced by the main node anyway. + max_allowed_l2_tx_gas_limit: u32::MAX, + validation_computational_gas_limit: u32::MAX, } } } diff --git a/core/bin/external_node/src/config/tests.rs b/core/bin/external_node/src/config/tests.rs new file mode 100644 index 000000000000..243be39a1139 --- /dev/null +++ b/core/bin/external_node/src/config/tests.rs @@ -0,0 +1,73 @@ +//! Tests for EN configuration. + +use super::*; + +#[test] +fn parsing_optional_config_from_empty_env() { + let config: OptionalENConfig = envy::prefixed("EN_").from_iter([]).unwrap(); + assert_eq!(config.filters_limit, 10_000); + assert_eq!(config.subscriptions_limit, 10_000); + assert_eq!(config.fee_history_limit, 1_024); + assert_eq!(config.polling_interval(), Duration::from_millis(200)); + assert_eq!(config.max_tx_size, 1_000_000); + assert_eq!( + config.metadata_calculator_delay(), + Duration::from_millis(100) + ); + assert_eq!(config.max_nonce_ahead, 50); + assert_eq!(config.estimate_gas_scale_factor, 1.2); + assert_eq!(config.vm_concurrency_limit, 2_048); + assert_eq!(config.factory_deps_cache_size(), 128 * BYTES_IN_MEGABYTE); + assert_eq!(config.latest_values_cache_size(), 128 * BYTES_IN_MEGABYTE); + assert_eq!(config.merkle_tree_multi_get_chunk_size, 500); + assert_eq!( + config.merkle_tree_block_cache_size(), + 128 * BYTES_IN_MEGABYTE + ); + assert_eq!(config.max_response_body_size(), 10 * BYTES_IN_MEGABYTE); +} + +#[test] +fn parsing_optional_config_from_env() { + let env_vars = [ + ("EN_FILTERS_LIMIT", "5000"), + ("EN_SUBSCRIPTIONS_LIMIT", "20000"), + ("EN_FEE_HISTORY_LIMIT", "1000"), + ("EN_PUBSUB_POLLING_INTERVAL", "500"), + ("EN_MAX_TX_SIZE", "1048576"), + ("EN_METADATA_CALCULATOR_DELAY", "50"), + ("EN_MAX_NONCE_AHEAD", "100"), + ("EN_ESTIMATE_GAS_SCALE_FACTOR", "1.5"), + ("EN_VM_CONCURRENCY_LIMIT", "1000"), + ("EN_FACTORY_DEPS_CACHE_SIZE_MB", "64"), + ("EN_LATEST_VALUES_CACHE_SIZE_MB", "50"), + ("EN_MERKLE_TREE_MULTI_GET_CHUNK_SIZE", "1000"), + ("EN_MERKLE_TREE_BLOCK_CACHE_SIZE_MB", "32"), + ("EN_MAX_RESPONSE_BODY_SIZE_MB", "1"), + ]; + let env_vars = env_vars + .into_iter() + .map(|(name, value)| (name.to_owned(), value.to_owned())); + + let config: OptionalENConfig = envy::prefixed("EN_").from_iter(env_vars).unwrap(); + assert_eq!(config.filters_limit, 5_000); + assert_eq!(config.subscriptions_limit, 20_000); + assert_eq!(config.fee_history_limit, 1_000); + assert_eq!(config.polling_interval(), Duration::from_millis(500)); + assert_eq!(config.max_tx_size, BYTES_IN_MEGABYTE); + assert_eq!( + config.metadata_calculator_delay(), + Duration::from_millis(50) + ); + assert_eq!(config.max_nonce_ahead, 100); + assert_eq!(config.estimate_gas_scale_factor, 1.5); + assert_eq!(config.vm_concurrency_limit, 1_000); + assert_eq!(config.factory_deps_cache_size(), 64 * BYTES_IN_MEGABYTE); + assert_eq!(config.latest_values_cache_size(), 50 * BYTES_IN_MEGABYTE); + assert_eq!(config.merkle_tree_multi_get_chunk_size, 1_000); + assert_eq!( + config.merkle_tree_block_cache_size(), + 32 * BYTES_IN_MEGABYTE + ); + assert_eq!(config.max_response_body_size(), BYTES_IN_MEGABYTE); +} diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index e5f9d9ef8a48..28d60c4d3a44 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -1,17 +1,16 @@ -use prometheus_exporter::run_prometheus_exporter; +use anyhow::Context; use tokio::{sync::watch, task, time::sleep}; -use zksync_state::FactoryDepsCache; -use config::ExternalNodeConfig; use std::{sync::Arc, time::Duration}; -use zksync_basic_types::Address; -use zksync_config::DBConfig; -use zksync_core::api_server::healthcheck::HealthCheckHandle; +use prometheus_exporter::run_prometheus_exporter; +use zksync_basic_types::Address; use zksync_core::{ api_server::{ - execution_sandbox::VmConcurrencyLimiter, healthcheck, tx_sender::TxSenderBuilder, - web3::ApiBuilder, + execution_sandbox::VmConcurrencyLimiter, + healthcheck::HealthCheckHandle, + tx_sender::{ApiContracts, TxSenderBuilder}, + web3::{ApiBuilder, Namespace}, }, block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert}, consistency_checker::ConsistencyChecker, @@ -21,7 +20,10 @@ use zksync_core::{ }, reorg_detector::ReorgDetector, setup_sigint_handler, - state_keeper::{MainBatchExecutorBuilder, SealManager, ZkSyncStateKeeper}, + state_keeper::{ + L1BatchExecutorBuilder, MainBatchExecutorBuilder, MultiVMConfig, SealManager, + ZkSyncStateKeeper, + }, sync_layer::{ batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, fetcher::MainNodeFetcher, genesis::perform_genesis_if_needed, ActionQueue, @@ -30,22 +32,28 @@ use zksync_core::{ }; use zksync_dal::{connection::DbVariant, healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; use zksync_health_check::CheckHealth; +use zksync_state::PostgresStorageCaches; use zksync_storage::RocksDB; use zksync_utils::wait_for_tasks::wait_for_tasks; mod config; +use crate::config::ExternalNodeConfig; + /// Creates the state keeper configured to work in the external node mode. +#[allow(clippy::too_many_arguments)] async fn build_state_keeper( action_queue: ActionQueue, state_keeper_db_path: String, - main_node_url: String, + config: &ExternalNodeConfig, connection_pool: ConnectionPool, sync_state: SyncState, l2_erc20_bridge_addr: Address, stop_receiver: watch::Receiver, + use_multivm: bool, ) -> ZkSyncStateKeeper { let en_sealer = ExternalNodeSealer::new(action_queue.clone()); + let main_node_url = config.required.main_node_url().unwrap(); let sealer = SealManager::custom( None, vec![en_sealer.clone().into_unconditional_batch_seal_criterion()], @@ -58,16 +66,31 @@ async fn build_state_keeper( // node has already executed the transaction, then the external node must execute it too. let max_allowed_l2_tx_gas_limit = u32::MAX.into(); let validation_computational_gas_limit = u32::MAX; - // We don't need call traces on the external node. - let save_call_traces = false; + // We only need call traces on the external node if the `debug_` namespace is enabled. + let save_call_traces = config.optional.api_namespaces().contains(&Namespace::Debug); - let batch_executor_base: Box = + // Only supply MultiVM config if the corresponding feature is enabled. + let multivm_config = use_multivm.then(|| { + vlog::error!( + "Using experimental MultiVM support! The feature is not ready, use at your own risk!" + ); + if main_node_url.contains("mainnet") { + MultiVMConfig::mainnet_config_wip() + } else if main_node_url.contains("testnet") { + MultiVMConfig::testnet_config_wip() + } else { + panic!("MultiVM can only be configured for mainnet/testnet now") + } + }); + + let batch_executor_base: Box = Box::new(MainBatchExecutorBuilder::new( state_keeper_db_path, connection_pool.clone(), max_allowed_l2_tx_gas_limit, save_call_traces, validation_computational_gas_limit, + multivm_config, )); let io = Box::new( @@ -106,15 +129,18 @@ async fn init_tasks( let state_keeper = build_state_keeper( action_queue.clone(), config.required.state_cache_path.clone(), - main_node_url.to_string(), + &config, connection_pool.clone(), sync_state.clone(), config.remote.l2_erc20_bridge_addr, stop_receiver.clone(), + config.optional.experimental_multivm_support, ) .await; + + let singleton_pool_builder = ConnectionPool::singleton(DbVariant::Master); let fetcher = MainNodeFetcher::new( - ConnectionPool::new(Some(1), DbVariant::Master).await, + singleton_pool_builder.build().await, &main_node_url, action_queue.clone(), sync_state.clone(), @@ -126,8 +152,9 @@ async fn init_tasks( db_path: &config.required.merkle_tree_path, mode: MetadataCalculatorModeConfig::Lightweight, delay_interval: config.optional.metadata_calculator_delay(), - max_block_batch: config.optional.max_blocks_per_tree_batch(), - throttle_interval: config.optional.merkle_tree_throttle(), + max_l1_batches_per_iter: config.optional.max_l1_batches_per_tree_iter, + multi_get_chunk_size: config.optional.merkle_tree_multi_get_chunk_size, + block_cache_capacity: config.optional.merkle_tree_block_cache_size(), }) .await; healthchecks.push(Box::new(metadata_calculator.tree_health_check())); @@ -138,28 +165,31 @@ async fn init_tasks( .eth_client_url() .expect("L1 client URL is incorrect"), 10, - ConnectionPool::new(Some(1), DbVariant::Master).await, + singleton_pool_builder.build().await, ); - let batch_status_updater = BatchStatusUpdater::new( - &main_node_url, - ConnectionPool::new(Some(1), DbVariant::Master).await, - ) - .await; + let batch_status_updater = + BatchStatusUpdater::new(&main_node_url, singleton_pool_builder.build().await).await; // Run the components. let tree_stop_receiver = stop_receiver.clone(); - let tree_pool = ConnectionPool::new(Some(1), DbVariant::Master).await; - let prover_tree_pool = ConnectionPool::new(Some(1), DbVariant::Prover).await; + let tree_pool = singleton_pool_builder.build().await; + let prover_tree_pool = ConnectionPool::singleton(DbVariant::Prover).build().await; let tree_handle = task::spawn(metadata_calculator.run(tree_pool, prover_tree_pool, tree_stop_receiver)); - let consistency_checker_handle = tokio::spawn(consistency_checker.run(stop_receiver.clone())); + + let consistency_checker_handle = if !config.optional.experimental_multivm_support { + Some(tokio::spawn(consistency_checker.run(stop_receiver.clone()))) + } else { + None + }; + let updater_handle = task::spawn(batch_status_updater.run(stop_receiver.clone())); let sk_handle = task::spawn(state_keeper.run()); let fetcher_handle = tokio::spawn(fetcher.run()); let gas_adjuster_handle = tokio::spawn(gas_adjuster.clone().run(stop_receiver.clone())); - let tx_sender = { + let (tx_sender, vm_barrier, cache_update_handle) = { let mut tx_sender_builder = TxSenderBuilder::new(config.clone().into(), connection_pool.clone()) .with_main_connection_pool(connection_pool.clone()) @@ -170,52 +200,64 @@ async fn init_tasks( tx_sender_builder = tx_sender_builder.with_rate_limiter(tps_limit); }; - let vm_concurrency_limiter = - VmConcurrencyLimiter::new(config.optional.vm_concurrency_limit()); - - let factory_deps_cache = FactoryDepsCache::new( - "factory_deps_cache", - config.optional.factory_deps_cache_size_mb(), + let max_concurrency = config.optional.vm_concurrency_limit; + let (vm_concurrency_limiter, vm_barrier) = VmConcurrencyLimiter::new(max_concurrency); + let mut storage_caches = PostgresStorageCaches::new( + config.optional.factory_deps_cache_size() as u64, + config.optional.initial_writes_cache_size() as u64, ); - - tx_sender_builder + let latest_values_cache_size = config.optional.latest_values_cache_size() as u64; + let cache_update_handle = (latest_values_cache_size > 0).then(|| { + task::spawn_blocking(storage_caches.configure_storage_values_cache( + latest_values_cache_size, + connection_pool.clone(), + tokio::runtime::Handle::current(), + )) + }); + + let tx_sender = tx_sender_builder .build( gas_adjuster, - config.required.default_aa_hash, Arc::new(vm_concurrency_limiter), - factory_deps_cache.clone(), + ApiContracts::load_from_disk(), + storage_caches, ) - .await + .await; + (tx_sender, vm_barrier, cache_update_handle) }; let (http_api_handle, http_api_healthcheck) = ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool.clone()) .http(config.required.http_port) - .with_filter_limit(config.optional.filters_limit()) + .with_filter_limit(config.optional.filters_limit) + .with_batch_request_size_limit(config.optional.max_batch_request_size) + .with_response_body_size_limit(config.optional.max_response_body_size()) .with_threads(config.required.threads_per_server) - .with_tx_sender(tx_sender.clone()) + .with_tx_sender(tx_sender.clone(), vm_barrier.clone()) .with_sync_state(sync_state.clone()) + .enable_api_namespaces(config.optional.api_namespaces()) .build(stop_receiver.clone()) .await; let (mut task_handles, ws_api_healthcheck) = - ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool) + ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool.clone()) .ws(config.required.ws_port) - .with_filter_limit(config.optional.filters_limit()) - .with_subscriptions_limit(config.optional.subscriptions_limit()) + .with_filter_limit(config.optional.filters_limit) + .with_subscriptions_limit(config.optional.subscriptions_limit) + .with_batch_request_size_limit(config.optional.max_batch_request_size) + .with_response_body_size_limit(config.optional.max_response_body_size()) .with_polling_interval(config.optional.polling_interval()) .with_threads(config.required.threads_per_server) - .with_tx_sender(tx_sender) + .with_tx_sender(tx_sender, vm_barrier) .with_sync_state(sync_state) + .enable_api_namespaces(config.optional.api_namespaces()) .build(stop_receiver.clone()) .await; healthchecks.push(Box::new(ws_api_healthcheck)); healthchecks.push(Box::new(http_api_healthcheck)); - healthchecks.push(Box::new(ConnectionPoolHealthCheck::new( - ConnectionPool::new(Some(1), DbVariant::Master).await, - ))); - let healthcheck_handle = healthcheck::start_server_thread_detached( + healthchecks.push(Box::new(ConnectionPoolHealthCheck::new(connection_pool))); + let healthcheck_handle = HealthCheckHandle::spawn_server( ([0, 0, 0, 0], config.required.healthcheck_port).into(), healthchecks, ); @@ -225,14 +267,17 @@ async fn init_tasks( } task_handles.extend(http_api_handle); + task_handles.extend(cache_update_handle); task_handles.extend([ sk_handle, fetcher_handle, updater_handle, tree_handle, gas_adjuster_handle, - consistency_checker_handle, ]); + if let Some(consistency_checker) = consistency_checker_handle { + task_handles.push(consistency_checker); + } (task_handles, stop_sender, healthcheck_handle) } @@ -241,8 +286,10 @@ async fn shutdown_components( stop_sender: watch::Sender, healthcheck_handle: HealthCheckHandle, ) { - let _ = stop_sender.send(true); - RocksDB::await_rocksdb_termination(); + stop_sender.send(true).ok(); + task::spawn_blocking(RocksDB::await_rocksdb_termination) + .await + .unwrap(); // Sleep for some time to let components gracefully stop. sleep(Duration::from_secs(10)).await; healthcheck_handle.stop().await; @@ -262,7 +309,7 @@ async fn main() -> anyhow::Result<()> { .main_node_url() .expect("Main node URL is incorrect"); - let connection_pool = ConnectionPool::new(None, DbVariant::Master).await; + let connection_pool = ConnectionPool::builder(DbVariant::Master).build().await; let sigint_receiver = setup_sigint_handler(); vlog::warn!("The external node is in the alpha phase, and should be used with caution."); @@ -274,10 +321,10 @@ async fn main() -> anyhow::Result<()> { perform_genesis_if_needed( &mut connection_pool.access_storage().await, config.remote.l2_chain_id, - config.base_system_contracts_hashes(), main_node_url.clone(), ) - .await; + .await + .context("Performing genesis failed")?; let (task_handles, stop_sender, health_check_handle) = init_tasks(config.clone(), connection_pool.clone()).await; @@ -285,12 +332,6 @@ async fn main() -> anyhow::Result<()> { let reorg_detector = ReorgDetector::new(&main_node_url, connection_pool.clone()); let reorg_detector_handle = tokio::spawn(reorg_detector.run()); - let reverter_config = DBConfig { - state_keeper_db_path: config.required.state_cache_path.clone(), - new_merkle_tree_ssd_path: config.required.merkle_tree_path.clone(), - ..Default::default() - }; - let particular_crypto_alerts = None; let graceful_shutdown = None::>; let tasks_allowed_to_finish = false; @@ -303,7 +344,14 @@ async fn main() -> anyhow::Result<()> { if let Ok(last_correct_batch) = last_correct_batch { vlog::info!("Performing rollback to block {}", last_correct_batch); shutdown_components(stop_sender, health_check_handle).await; - BlockReverter::new(reverter_config, None, connection_pool, L1ExecutedBatchesRevert::Allowed) + let reverter = BlockReverter::new( + config.required.state_cache_path, + config.required.merkle_tree_path, + None, + connection_pool, + L1ExecutedBatchesRevert::Allowed, + ); + reverter .rollback_db(last_correct_batch, BlockReverterFlags::all()) .await; vlog::info!("Rollback successfully completed, the node has to restart to continue working"); diff --git a/core/bin/l1_tx_effective_gas_price_migration/src/main.rs b/core/bin/l1_tx_effective_gas_price_migration/src/main.rs deleted file mode 100644 index ad5815d70648..000000000000 --- a/core/bin/l1_tx_effective_gas_price_migration/src/main.rs +++ /dev/null @@ -1,63 +0,0 @@ -use structopt::StructOpt; -use zksync_dal::connection::DbVariant; -use zksync_dal::ConnectionPool; - -#[derive(Debug, StructOpt)] -#[structopt( - name = "DB migration for setting correct effective_gas_price", - about = "DB migration for setting correct effective_gas_price" -)] -struct Opt { - #[structopt(short = "f", long = "first_post_m6_block")] - first_post_m6_block: u32, -} - -#[tokio::main] -async fn main() { - let opt = Opt::from_args(); - let first_post_m6_block = opt.first_post_m6_block; - println!("first_post_m6_block: {first_post_m6_block}"); - - let pool = ConnectionPool::new(Some(1), DbVariant::Master).await; - let mut storage = pool.access_storage().await; - - const BLOCK_RANGE: u32 = 1000; - println!("Setting effective gas price for pre-M6 transactions"); - - let mut from_block_number = 0; - loop { - if from_block_number >= first_post_m6_block { - break; - } - - let to_block_number = - std::cmp::min(first_post_m6_block - 1, from_block_number + BLOCK_RANGE - 1); - println!("Block range {from_block_number}-{to_block_number}"); - storage - .transactions_dal() - .migrate_l1_txs_effective_gas_price_pre_m6(from_block_number, to_block_number) - .await; - - from_block_number = to_block_number + 1; - } - - println!("Setting effective gas price for post-M6 transactions"); - - let current_block_number = storage.blocks_dal().get_sealed_miniblock_number().await; - let mut from_block_number = first_post_m6_block; - loop { - if from_block_number > current_block_number.0 { - break; - } - - let to_block_number = - std::cmp::min(current_block_number.0, from_block_number + BLOCK_RANGE - 1); - println!("Block range {from_block_number}-{to_block_number}"); - storage - .transactions_dal() - .migrate_l1_txs_effective_gas_price_post_m6(from_block_number, to_block_number) - .await; - - from_block_number = to_block_number + 1; - } -} diff --git a/core/bin/prover/Cargo.lock b/core/bin/prover/Cargo.lock deleted file mode 100644 index 2d38231b9c51..000000000000 --- a/core/bin/prover/Cargo.lock +++ /dev/null @@ -1,6001 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addchain" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2e69442aa5628ea6951fa33e24efe8313f4321a91bd729fc2f75bdfc858570" -dependencies = [ - "num-bigint 0.3.3", - "num-integer", - "num-traits", -] - -[[package]] -name = "addr2line" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher", -] - -[[package]] -name = "aes-ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7729c3cde54d67063be556aeac75a81330d802f0259500ca40cb52967f975763" -dependencies = [ - "aes-soft", - "aesni", - "cipher", - "ctr", -] - -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher", - "opaque-debug", -] - -[[package]] -name = "ahash" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.10", - "once_cell", - "version_check", -] - -[[package]] -name = "aho-corasick" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" -dependencies = [ - "memchr", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anyhow" -version = "1.0.71" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" - -[[package]] -name = "api" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.3#522dcebcc7cbaf5f9ba939b1ea229cadcb02065a" -dependencies = [ - "bellman_ce", - "cfg-if 1.0.0", - "gpu-prover", - "num_cpus", - "serde", -] - -[[package]] -name = "arr_macro" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a105bfda48707cf19220129e78fca01e9639433ffaef4163546ed8fb04120a5" -dependencies = [ - "arr_macro_impl", - "proc-macro-hack", -] - -[[package]] -name = "arr_macro_impl" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" -dependencies = [ - "proc-macro-hack", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "arrayref" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8868f09ff8cea88b079da74ae569d9b8c62a23c68c746240b704ee6f7525c89c" - -[[package]] -name = "assert_matches" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" - -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite 0.2.9", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "async-trait" -version = "0.1.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "atoi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616896e05fc0e2649463a93a15183c6a16bf03413a7af88ef1285ddedfa9cda5" -dependencies = [ - "num-traits", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "backon" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c1a6197b2120bb2185a267f6515038558b019e92b832bb0320e96d66268dcf9" -dependencies = [ - "fastrand", - "futures-core", - "pin-project", - "tokio 1.28.2", -] - -[[package]] -name = "backtrace" -version = "0.3.67" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" -dependencies = [ - "addr2line", - "cc", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" - -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - -[[package]] -name = "bellman_ce" -version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" -dependencies = [ - "arrayvec 0.7.3", - "bit-vec", - "blake2s_const", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures 0.3.28", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "bigdecimal" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc403c26e6b03005522e6e8053384c4e881dfe5b2bf041c0c2c49be33d64a539" -dependencies = [ - "num-bigint 0.3.3", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bindgen" -version = "0.59.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "clap", - "env_logger", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2 1.0.60", - "quote 1.0.28", - "regex", - "rustc-hash", - "shlex", - "which", -] - -[[package]] -name = "bit-vec" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -dependencies = [ - "serde", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" - -[[package]] -name = "bitvec" -version = "0.20.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "blake2-rfc_bellman_edition" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc60350286c7c3db13b98e91dbe5c8b6830a6821bc20af5b0c310ce94d74915" -dependencies = [ - "arrayvec 0.4.12", - "byteorder", - "constant_time_eq", -] - -[[package]] -name = "blake2s_const" -version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake2s_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "block-padding", - "generic-array", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding", - "cipher", -] - -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - -[[package]] -name = "bumpalo" -version = "3.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" - -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - -[[package]] -name = "bytes" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "cc" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "rustc-serialize", - "serde", - "time 0.1.43", - "wasm-bindgen", - "winapi", -] - -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - -[[package]] -name = "circuit_testing" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#abd44b507840f836da6e084aaacb2ba8a7cb1df6" -dependencies = [ - "bellman_ce", -] - -[[package]] -name = "clang-sys" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "codegen" -version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#cad8d38f631691a6b456eb4eb7b410fd129ca006" -dependencies = [ - "ethereum-types", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "handlebars", - "hex", - "paste", - "rescue_poseidon", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "codegen" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff61280aed771c3070e7dcc9e050c66f1eb1e3b96431ba66f9f74641d02fc41d" -dependencies = [ - "indexmap", -] - -[[package]] -name = "combine" -version = "4.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" -dependencies = [ - "bytes 1.4.0", - "memchr", -] - -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - -[[package]] -name = "const-oid" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" - -[[package]] -name = "cpufeatures" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" -dependencies = [ - "libc", -] - -[[package]] -name = "crc" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" - -[[package]] -name = "crossbeam" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", - "crossbeam-deque 0.7.4", - "crossbeam-epoch 0.8.2", - "crossbeam-queue 0.2.3", - "crossbeam-utils 0.7.2", -] - -[[package]] -name = "crossbeam" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-channel 0.5.8", - "crossbeam-deque 0.8.3", - "crossbeam-epoch 0.9.15", - "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", - "memoffset 0.9.0", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-bigint" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "cs_derive" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#22d9d3a2018df8d4ac4bc0b0ada61c191d0cee30" -dependencies = [ - "proc-macro-error", - "proc-macro2 1.0.60", - "quote 1.0.28", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" -dependencies = [ - "cipher", -] - -[[package]] -name = "ctrlc" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" -dependencies = [ - "nix", - "windows-sys 0.48.0", -] - -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.60", - "quote 1.0.28", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "debugid" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" -dependencies = [ - "serde", - "uuid", -] - -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid 0.7.1", - "crypto-bigint 0.3.2", - "pem-rfc7468", -] - -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid 0.9.2", - "zeroize", -] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "convert_case 0.4.0", - "proc-macro2 1.0.60", - "quote 1.0.28", - "rustc_version", - "syn 1.0.109", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "crypto-common", - "subtle", -] - -[[package]] -name = "dirs" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "dotenv" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" - -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve", - "rfc6979", - "signature", -] - -[[package]] -name = "either" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" -dependencies = [ - "serde", -] - -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff", - "generic-array", - "group", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1", - "subtle", - "zeroize", -] - -[[package]] -name = "encoding_rs" -version = "0.8.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "envy" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f47e0157f2cb54f5ae1bd371b30a2ae4311e1c028f575cd4e81de7353215965" -dependencies = [ - "serde", -] - -[[package]] -name = "errno" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "ethabi" -version = "16.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c98847055d934070b90e806e12d3936b787d0a115068981c1d8dfd5dfef5a5" -dependencies = [ - "ethereum-types", - "hex", - "serde", - "serde_json", - "sha3 0.9.1", - "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak 2.0.2", -] - -[[package]] -name = "ethereum-types" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "ff_ce" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" -dependencies = [ - "byteorder", - "ff_derive_ce", - "hex", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "ff_derive_ce" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" -dependencies = [ - "num-bigint 0.4.3", - "num-integer", - "num-traits", - "proc-macro2 1.0.60", - "quote 1.0.28", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "findshlibs" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" -dependencies = [ - "cc", - "lazy_static", - "libc", - "winapi", -] - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "franklin-crypto" -version = "0.0.5" -source = "git+ssh://git@github.com/matter-labs/franklin-crypto?branch=dev#5922873d25ecec827cd60420ca8cd84a188bb965" -dependencies = [ - "arr_macro", - "bellman_ce", - "bit-vec", - "blake2 0.9.2", - "blake2-rfc_bellman_edition", - "blake2s_simd", - "byteorder", - "digest 0.9.0", - "hex", - "indexmap", - "itertools", - "lazy_static", - "num-bigint 0.4.3", - "num-derive 0.2.5", - "num-integer", - "num-traits", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "sha3 0.9.1", - "smallvec", - "splitmut", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "franklin-crypto" -version = "0.0.5" -source = "git+https://github.com/matter-labs/franklin-crypto?branch=dev#5922873d25ecec827cd60420ca8cd84a188bb965" -dependencies = [ - "arr_macro", - "bellman_ce", - "bit-vec", - "blake2 0.9.2", - "blake2-rfc_bellman_edition", - "blake2s_simd", - "byteorder", - "digest 0.9.0", - "hex", - "indexmap", - "itertools", - "lazy_static", - "num-bigint 0.4.3", - "num-derive 0.2.5", - "num-integer", - "num-traits", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "sha3 0.9.1", - "smallvec", - "splitmut", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "futures" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" - -[[package]] -name = "futures" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" - -[[package]] -name = "futures-executor" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-intrusive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" -dependencies = [ - "futures-core", - "lock_api", - "parking_lot 0.11.2", -] - -[[package]] -name = "futures-io" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" - -[[package]] -name = "futures-locks" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c4e684ddb2d8a4db5ca8a02b35156da129674ba4412b6f528698d58c594954" -dependencies = [ - "futures 0.3.28", - "tokio 0.2.25", -] - -[[package]] -name = "futures-macro" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "futures-sink" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" - -[[package]] -name = "futures-task" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" - -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - -[[package]] -name = "futures-util" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" -dependencies = [ - "futures 0.1.31", - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite 0.2.9", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", -] - -[[package]] -name = "gimli" -version = "0.27.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" - -[[package]] -name = "glob" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" - -[[package]] -name = "google-cloud-auth" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f40175857d0b8d7b6cad6cd9594284da5041387fa2ddff30ab6d8faef65eb" -dependencies = [ - "async-trait", - "base64 0.21.2", - "google-cloud-metadata", - "google-cloud-token", - "home", - "jsonwebtoken", - "reqwest", - "serde", - "serde_json", - "thiserror", - "time 0.3.22", - "tokio 1.28.2", - "tracing", - "urlencoding", -] - -[[package]] -name = "google-cloud-metadata" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" -dependencies = [ - "reqwest", - "thiserror", - "tokio 1.28.2", -] - -[[package]] -name = "google-cloud-storage" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "215abab97e07d144428425509c1dad07e57ea72b84b21bcdb6a8a5f12a5c4932" -dependencies = [ - "async-stream", - "base64 0.21.2", - "bytes 1.4.0", - "futures-util", - "google-cloud-auth", - "google-cloud-metadata", - "google-cloud-token", - "hex", - "once_cell", - "percent-encoding", - "regex", - "reqwest", - "ring", - "rsa", - "serde", - "serde_json", - "sha2 0.10.6", - "thiserror", - "time 0.3.22", - "tokio 1.28.2", - "tracing", - "url", -] - -[[package]] -name = "google-cloud-token" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcd62eb34e3de2f085bcc33a09c3e17c4f65650f36d53eb328b00d63bcb536a" -dependencies = [ - "async-trait", -] - -[[package]] -name = "gpu-ffi" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.3#522dcebcc7cbaf5f9ba939b1ea229cadcb02065a" -dependencies = [ - "bindgen", - "crossbeam 0.7.3", - "derivative", - "futures 0.3.28", - "futures-locks", - "num_cpus", -] - -[[package]] -name = "gpu-prover" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.3#522dcebcc7cbaf5f9ba939b1ea229cadcb02065a" -dependencies = [ - "bit-vec", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "franklin-crypto 0.0.5 (git+ssh://git@github.com/matter-labs/franklin-crypto?branch=dev)", - "gpu-ffi", - "itertools", - "num_cpus", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "h2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" -dependencies = [ - "bytes 1.4.0", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 1.28.2", - "tokio-util 0.7.8", - "tracing", -] - -[[package]] -name = "handlebars" -version = "4.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashlink" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" -dependencies = [ - "hashbrown 0.11.2", -] - -[[package]] -name = "headers" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" -dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", - "bytes 1.4.0", - "headers-core", - "http", - "httpdate", - "mime", - "sha1", -] - -[[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" -dependencies = [ - "http", -] - -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "home" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - -[[package]] -name = "http" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" -dependencies = [ - "bytes 1.4.0", - "fnv", - "itoa 1.0.6", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes 1.4.0", - "http", - "pin-project-lite 0.2.9", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.14.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" -dependencies = [ - "bytes 1.4.0", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa 1.0.6", - "pin-project-lite 0.2.9", - "socket2", - "tokio 1.28.2", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" -dependencies = [ - "http", - "hyper", - "rustls", - "tokio 1.28.2", - "tokio-rustls", -] - -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper", - "pin-project-lite 0.2.9", - "tokio 1.28.2", - "tokio-io-timeout", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes 1.4.0", - "hyper", - "native-tls", - "tokio 1.28.2", - "tokio-native-tls", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg 1.1.0", - "hashbrown 0.12.3", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.1", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "ipnet" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" - -[[package]] -name = "ipnetwork" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "itoa" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" - -[[package]] -name = "js-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" -dependencies = [ - "futures 0.3.28", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "jsonwebtoken" -version = "8.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" -dependencies = [ - "base64 0.21.2", - "pem", - "ring", - "serde", - "serde_json", - "simple_asn1", -] - -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if 1.0.0", - "ecdsa", - "elliptic-curve", - "sha2 0.10.6", -] - -[[package]] -name = "keccak" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" -dependencies = [ - "cpufeatures", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = [ - "spin", -] - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.146" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" - -[[package]] -name = "libloading" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" -dependencies = [ - "cfg-if 1.0.0", - "winapi", -] - -[[package]] -name = "libm" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" - -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "local-ip-address" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2815836665de176ba66deaa449ada98fdf208d84730d1a84a22cbeed6151a6fa" -dependencies = [ - "libc", - "neli", - "thiserror", - "windows-sys 0.48.0", -] - -[[package]] -name = "lock_api" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" -dependencies = [ - "autocfg 1.1.0", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" - -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "md-5" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "metrics" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" -dependencies = [ - "ahash", - "metrics-macros", - "portable-atomic 0.3.20", -] - -[[package]] -name = "metrics-exporter-prometheus" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8603921e1f54ef386189335f288441af761e0fc61bcb552168d9cedfe63ebc70" -dependencies = [ - "hyper", - "indexmap", - "ipnet", - "metrics", - "metrics-util", - "parking_lot 0.12.1", - "portable-atomic 0.3.20", - "quanta", - "thiserror", - "tokio 1.28.2", - "tracing", -] - -[[package]] -name = "metrics-macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "metrics-util" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" -dependencies = [ - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", - "hashbrown 0.12.3", - "metrics", - "num_cpus", - "parking_lot 0.12.1", - "portable-atomic 0.3.20", - "quanta", - "sketches-ddsketch", -] - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "miniz_oxide" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" -dependencies = [ - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", -] - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "neli" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" -dependencies = [ - "byteorder", - "libc", - "log", - "neli-proc-macros", -] - -[[package]] -name = "neli-proc-macros" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" -dependencies = [ - "either", - "proc-macro2 1.0.60", - "quote 1.0.28", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "nix" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" -dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", - "libc", - "static_assertions", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - -[[package]] -name = "num" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7a8e9be5e039e2ff869df49155f1c06bd01ade2117ec783e56ab0932b67a8f" -dependencies = [ - "num-bigint 0.3.3", - "num-complex 0.3.1", - "num-integer", - "num-iter", - "num-rational 0.3.2", - "num-traits", -] - -[[package]] -name = "num" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" -dependencies = [ - "num-bigint 0.4.3", - "num-complex 0.4.3", - "num-integer", - "num-iter", - "num-rational 0.4.1", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-bigint" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-bigint-dig" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" -dependencies = [ - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.8.5", - "smallvec", - "zeroize", -] - -[[package]] -name = "num-complex" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5" -dependencies = [ - "num-traits", - "serde", -] - -[[package]] -name = "num-complex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-derive" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", -] - -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg 1.1.0", - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" -dependencies = [ - "autocfg 1.1.0", - "num-bigint 0.3.3", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-rational" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" -dependencies = [ - "autocfg 1.1.0", - "num-bigint 0.4.3", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg 1.1.0", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" -dependencies = [ - "hermit-abi 0.2.6", - "libc", -] - -[[package]] -name = "object" -version = "0.30.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "openssl" -version = "0.10.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" -dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.88" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "opentelemetry" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" -dependencies = [ - "async-trait", - "crossbeam-channel 0.5.8", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", -] - -[[package]] -name = "opentelemetry-http" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449048140ee61e28f57abe6e9975eedc1f3a29855c7407bd6c12b18578863379" -dependencies = [ - "async-trait", - "bytes 1.4.0", - "http", - "opentelemetry", - "reqwest", -] - -[[package]] -name = "opentelemetry-otlp" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" -dependencies = [ - "async-trait", - "futures 0.3.28", - "futures-util", - "http", - "opentelemetry", - "opentelemetry-http", - "prost", - "prost-build", - "reqwest", - "thiserror", - "tokio 1.28.2", - "tonic", - "tonic-build", -] - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985cc35d832d412224b2cffe2f9194b1b89b6aa5d0bef76d080dce09d90e62bd" -dependencies = [ - "opentelemetry", -] - -[[package]] -name = "os_info" -version = "3.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" -dependencies = [ - "log", - "serde", - "winapi", -] - -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - -[[package]] -name = "pairing_ce" -version = "0.28.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007b21259660d025918e653508f03050bf23fb96a88601f9936329faadc597" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "parity-crypto" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b92ea9ddac0d6e1db7c49991e7d397d34a9fd814b4c93cda53788e8eef94e35" -dependencies = [ - "aes", - "aes-ctr", - "block-modes", - "digest 0.9.0", - "ethereum-types", - "hmac 0.10.1", - "lazy_static", - "pbkdf2 0.7.5", - "ripemd160", - "rustc-hex", - "scrypt", - "secp256k1 0.20.3", - "sha2 0.9.9", - "subtle", - "tiny-keccak 2.0.2", - "zeroize", -] - -[[package]] -name = "parity-scale-codec" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec 0.7.3", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" -dependencies = [ - "proc-macro-crate", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core 0.9.8", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall 0.3.5", - "smallvec", - "windows-targets", -] - -[[package]] -name = "password-hash" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54986aa4bfc9b98c6a5f40184223658d187159d7b3c6af33f2b2aa25ae1db0fa" -dependencies = [ - "base64ct", - "rand_core 0.6.4", -] - -[[package]] -name = "paste" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" - -[[package]] -name = "pbkdf2" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b8c0d71734018084da0c0354193a5edfb81b20d2d57a92c5b154aefc554a4a" -dependencies = [ - "crypto-mac 0.10.1", -] - -[[package]] -name = "pbkdf2" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf916dd32dd26297907890d99dc2740e33f6bd9073965af4ccff2967962f5508" -dependencies = [ - "base64ct", - "crypto-mac 0.10.1", - "hmac 0.10.1", - "password-hash", - "sha2 0.9.9", -] - -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - -[[package]] -name = "pem-rfc7468" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" -dependencies = [ - "base64ct", -] - -[[package]] -name = "percent-encoding" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" - -[[package]] -name = "pest" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" -dependencies = [ - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "pest_meta" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.6", -] - -[[package]] -name = "petgraph" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "pin-project" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - -[[package]] -name = "pin-project-lite" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkcs1" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" -dependencies = [ - "der 0.5.1", - "pkcs8 0.8.0", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der 0.5.1", - "spki 0.5.4", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - -[[package]] -name = "pkg-config" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" - -[[package]] -name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.3.3", -] - -[[package]] -name = "portable-atomic" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.10", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "version_check", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "proc-macro2" -version = "1.0.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "prometheus_exporter" -version = "1.0.0" -dependencies = [ - "metrics", - "metrics-exporter-prometheus", - "tokio 1.28.2", - "vlog", - "zksync_config", -] - -[[package]] -name = "prost" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" -dependencies = [ - "bytes 1.4.0", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" -dependencies = [ - "bytes 1.4.0", - "heck 0.3.3", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost", - "prost-types", - "regex", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" -dependencies = [ - "bytes 1.4.0", - "prost", -] - -[[package]] -name = "prover-service" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.3#522dcebcc7cbaf5f9ba939b1ea229cadcb02065a" -dependencies = [ - "api", - "bincode", - "crossbeam-utils 0.8.16", - "log", - "num_cpus", - "rand 0.4.6", - "serde", - "serde_json", - "zkevm_test_harness", -] - -[[package]] -name = "quanta" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" -dependencies = [ - "crossbeam-utils 0.8.16", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "queues" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1475abae4f8ad4998590fe3acfe20104f0a5d48fc420c817cd2c09c3f56151f0" - -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - -[[package]] -name = "quote" -version = "1.0.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" -dependencies = [ - "proc-macro2 1.0.60", -] - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", -] - -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.8", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift", - "winapi", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.10", -] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "rayon" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" -dependencies = [ - "crossbeam-channel 0.5.8", - "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.16", - "num_cpus", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_users" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" -dependencies = [ - "getrandom 0.2.10", - "redox_syscall 0.2.16", - "thiserror", -] - -[[package]] -name = "regex" -version = "1.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax 0.7.2", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", -] - -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - -[[package]] -name = "regex-syntax" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" - -[[package]] -name = "reqwest" -version = "0.11.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" -dependencies = [ - "base64 0.21.2", - "bytes 1.4.0", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-rustls", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "mime_guess", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite 0.2.9", - "rustls", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "tokio 1.28.2", - "tokio-native-tls", - "tokio-rustls", - "tokio-util 0.7.8", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-streams", - "web-sys", - "webpki-roots", - "winreg", -] - -[[package]] -name = "rescue_poseidon" -version = "0.4.1" -source = "git+https://github.com/matter-labs/rescue-poseidon.git#f611a3353e48cf42153e44d89ed90da9bc5934e8" -dependencies = [ - "addchain", - "arrayvec 0.7.3", - "blake2 0.10.6", - "byteorder", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "num-bigint 0.3.3", - "num-integer", - "num-iter", - "num-traits", - "rand 0.4.6", - "serde", - "sha3 0.9.1", - "smallvec", -] - -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac 0.12.1", - "zeroize", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi", -] - -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "rlp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" -dependencies = [ - "bytes 1.4.0", - "rustc-hex", -] - -[[package]] -name = "rsa" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" -dependencies = [ - "byteorder", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", - "num-iter", - "num-traits", - "pkcs1", - "pkcs8 0.8.0", - "rand_core 0.6.4", - "smallvec", - "subtle", - "zeroize", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc-serialize" -version = "0.3.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "rustix" -version = "0.37.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustls" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" -dependencies = [ - "log", - "ring", - "rustls-webpki", - "sct", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" -dependencies = [ - "base64 0.21.2", -] - -[[package]] -name = "rustls-webpki" -version = "0.100.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustversion" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" - -[[package]] -name = "ryu" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" - -[[package]] -name = "salsa20" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" -dependencies = [ - "cipher", -] - -[[package]] -name = "schannel" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" -dependencies = [ - "windows-sys 0.42.0", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "scrypt" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da492dab03f925d977776a0b7233d7b934d6dc2b94faead48928e2e9bacedb9" -dependencies = [ - "base64 0.13.1", - "hmac 0.10.1", - "pbkdf2 0.6.0", - "rand 0.7.3", - "rand_core 0.5.1", - "salsa20", - "sha2 0.9.9", - "subtle", -] - -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct", - "der 0.6.1", - "generic-array", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - -[[package]] -name = "secp256k1" -version = "0.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" -dependencies = [ - "rand 0.6.5", - "secp256k1-sys", -] - -[[package]] -name = "secp256k1" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" -dependencies = [ - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", -] - -[[package]] -name = "security-framework" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" - -[[package]] -name = "sentry" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e0bd2cbc3398be701a933e5b7357a4b6b1f94038d2054f118cba90b481a9fbe" -dependencies = [ - "httpdate", - "native-tls", - "reqwest", - "sentry-backtrace", - "sentry-contexts", - "sentry-core", - "sentry-debug-images", - "sentry-panic", - "sentry-tracing", - "tokio 1.28.2", - "ureq", -] - -[[package]] -name = "sentry-backtrace" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf043f9bcb6c9ae084b7f10fb363a697c924badcbe7dac2dbeecea31271ed0c" -dependencies = [ - "backtrace", - "once_cell", - "regex", - "sentry-core", -] - -[[package]] -name = "sentry-contexts" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16bde19e361cff463253371dbabee51dab416c6f9285d6e62106539f96d12079" -dependencies = [ - "hostname", - "libc", - "os_info", - "rustc_version", - "sentry-core", - "uname", -] - -[[package]] -name = "sentry-core" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe345c342f17e48b65451f424ce0848405b6b3a84fa0007ba444b84754bf760a" -dependencies = [ - "once_cell", - "rand 0.8.5", - "sentry-types", - "serde", - "serde_json", -] - -[[package]] -name = "sentry-debug-images" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be9460cda9409f799f839510ff3b2ab8db6e457f3085298e18eefc463948e157" -dependencies = [ - "findshlibs", - "once_cell", - "sentry-core", -] - -[[package]] -name = "sentry-panic" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063ac270f11157e435f8b133a007669a3e1a7920e23374485357a8692996188f" -dependencies = [ - "sentry-backtrace", - "sentry-core", -] - -[[package]] -name = "sentry-tracing" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc167b6746500ea4bb86c2c13afe7ca6f75f2ed1bcfd84243e870780b8ced529" -dependencies = [ - "sentry-backtrace", - "sentry-core", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sentry-types" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d10a5962144f5fb65bb1290551623e6b976f442cb2fcb4e1dfe9fe6f8e8df4" -dependencies = [ - "debugid", - "getrandom 0.2.10", - "hex", - "serde", - "serde_json", - "thiserror", - "time 0.3.22", - "url", - "uuid", -] - -[[package]] -name = "serde" -version = "1.0.164" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.164" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "serde_json" -version = "1.0.97" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" -dependencies = [ - "indexmap", - "itoa 1.0.6", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa 1.0.6", - "ryu", - "serde", -] - -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros", -] - -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "setup_key_generator_and_server" -version = "1.0.0" -dependencies = [ - "api", - "circuit_testing", - "itertools", - "prover-service", - "structopt", - "vlog", - "zkevm_test_harness", - "zksync_config", - "zksync_types", -] - -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha1" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug", -] - -[[package]] -name = "sha3" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "sharded-slab" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shlex" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" - -[[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint 0.4.3", - "num-traits", - "thiserror", - "time 0.3.22", -] - -[[package]] -name = "sketches-ddsketch" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" - -[[package]] -name = "slab" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "smallvec" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" - -[[package]] -name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der 0.5.1", -] - -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] - -[[package]] -name = "splitmut" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85070f382340e8b23a75808e83573ddf65f9ad9143df9573ca37c1ed2ee956a" - -[[package]] -name = "sqlformat" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b7922be017ee70900be125523f38bdd644f4f06a1b16e8fa5a8ee8c34bffd4" -dependencies = [ - "itertools", - "nom", - "unicode_categories", -] - -[[package]] -name = "sqlx" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7911b0031a0247af40095838002999c7a52fba29d9739e93326e71a5a1bc9d43" -dependencies = [ - "sqlx-core", - "sqlx-macros", -] - -[[package]] -name = "sqlx-core" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aec89bfaca8f7737439bad16d52b07f1ccd0730520d3bf6ae9d069fe4b641fb1" -dependencies = [ - "ahash", - "atoi", - "base64 0.13.1", - "bigdecimal", - "bitflags 1.3.2", - "byteorder", - "bytes 1.4.0", - "chrono", - "crc", - "crossbeam-channel 0.5.8", - "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.16", - "dirs", - "either", - "futures-channel", - "futures-core", - "futures-intrusive", - "futures-util", - "hashlink", - "hex", - "hmac 0.11.0", - "indexmap", - "ipnetwork", - "itoa 0.4.8", - "libc", - "log", - "md-5", - "memchr", - "num-bigint 0.3.3", - "once_cell", - "parking_lot 0.11.2", - "percent-encoding", - "rand 0.8.5", - "serde", - "serde_json", - "sha-1", - "sha2 0.9.9", - "smallvec", - "sqlformat", - "sqlx-rt", - "stringprep", - "thiserror", - "tokio-stream", - "url", - "whoami", -] - -[[package]] -name = "sqlx-macros" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "584866c833511b1a152e87a7ee20dee2739746f60c858b3c5209150bc4b466f5" -dependencies = [ - "dotenv", - "either", - "heck 0.3.3", - "hex", - "once_cell", - "proc-macro2 1.0.60", - "quote 1.0.28", - "serde", - "serde_json", - "sha2 0.9.9", - "sqlx-core", - "sqlx-rt", - "syn 1.0.109", - "url", -] - -[[package]] -name = "sqlx-rt" -version = "0.5.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae" -dependencies = [ - "native-tls", - "once_cell", - "tokio 1.28.2", - "tokio-native-tls", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "stringprep" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "structopt" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" -dependencies = [ - "clap", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" -dependencies = [ - "heck 0.3.3", - "proc-macro-error", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "strum" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" -dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.60", - "quote 1.0.28", - "rustversion", - "syn 1.0.109", -] - -[[package]] -name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "unicode-ident", -] - -[[package]] -name = "sync_vm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#22d9d3a2018df8d4ac4bc0b0ada61c191d0cee30" -dependencies = [ - "arrayvec 0.7.3", - "cs_derive", - "derivative", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "hex", - "itertools", - "num-bigint 0.4.3", - "num-derive 0.3.3", - "num-integer", - "num-traits", - "once_cell", - "rand 0.4.6", - "rescue_poseidon", - "serde", - "sha2 0.10.6", - "sha3 0.10.6", - "smallvec", - "zk_evm", - "zkevm_opcode_defs", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 1.0.0", - "fastrand", - "redox_syscall 0.3.5", - "rustix", - "windows-sys 0.48.0", -] - -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "test-log" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "thread_local" -version = "1.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" -dependencies = [ - "cfg-if 1.0.0", - "once_cell", -] - -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" -dependencies = [ - "itoa 1.0.6", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" - -[[package]] -name = "time-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" -dependencies = [ - "time-core", -] - -[[package]] -name = "tiny-keccak" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "pin-project-lite 0.1.12", - "slab", -] - -[[package]] -name = "tokio" -version = "1.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" -dependencies = [ - "autocfg 1.1.0", - "bytes 1.4.0", - "libc", - "mio", - "num_cpus", - "parking_lot 0.12.1", - "pin-project-lite 0.2.9", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite 0.2.9", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite 0.2.9", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-util" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" -dependencies = [ - "bytes 1.4.0", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.9", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-util" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" -dependencies = [ - "bytes 1.4.0", - "futures-core", - "futures-sink", - "pin-project-lite 0.2.9", - "tokio 1.28.2", - "tracing", -] - -[[package]] -name = "toml_datetime" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" - -[[package]] -name = "toml_edit" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5376256e44f2443f8896ac012507c19a012df0fe8758b55246ae51a2279db51f" -dependencies = [ - "combine", - "indexmap", - "itertools", -] - -[[package]] -name = "toml_edit" -version = "0.19.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" -dependencies = [ - "indexmap", - "toml_datetime", - "winnow", -] - -[[package]] -name = "tonic" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" -dependencies = [ - "async-stream", - "async-trait", - "base64 0.13.1", - "bytes 1.4.0", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "prost-derive", - "tokio 1.28.2", - "tokio-stream", - "tokio-util 0.6.10", - "tower", - "tower-layer", - "tower-service", - "tracing", - "tracing-futures", -] - -[[package]] -name = "tonic-build" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" -dependencies = [ - "proc-macro2 1.0.60", - "prost-build", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "indexmap", - "pin-project", - "pin-project-lite 0.2.9", - "rand 0.8.5", - "slab", - "tokio 1.28.2", - "tokio-util 0.7.8", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" -dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.9", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "tracing-core" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-opentelemetry" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" -dependencies = [ - "once_cell", - "opentelemetry", - "tracing", - "tracing-core", - "tracing-log", - "tracing-subscriber", -] - -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" -dependencies = [ - "matchers", - "nu-ansi-term", - "once_cell", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "time 0.3.22", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "typenum" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - -[[package]] -name = "ucd-trie" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" - -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "uname" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72f89f0ca32e4db1c04e2a72f5345d59796d4866a1ee0609084569f73683dc8" -dependencies = [ - "libc", -] - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - -[[package]] -name = "unicode-ident" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" - -[[package]] -name = "unicode_categories" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "ureq" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4b45063f47caea744e48f5baa99169bd8bd9b882d80a99941141327bbb00f99" -dependencies = [ - "base64 0.21.2", - "log", - "native-tls", - "once_cell", - "url", -] - -[[package]] -name = "url" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" -dependencies = [ - "form_urlencoded", - "idna 0.4.0", - "percent-encoding", - "serde", -] - -[[package]] -name = "urlencoding" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" - -[[package]] -name = "uuid" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" -dependencies = [ - "getrandom 0.2.10", - "serde", -] - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "vlog" -version = "1.0.0" -dependencies = [ - "chrono", - "opentelemetry", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "sentry", - "serde_json", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" -dependencies = [ - "quote 1.0.28", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" - -[[package]] -name = "wasm-streams" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" -dependencies = [ - "futures-util", - "js-sys", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "web-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web3" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" -dependencies = [ - "arrayvec 0.7.3", - "base64 0.13.1", - "bytes 1.4.0", - "derive_more", - "ethabi", - "ethereum-types", - "futures 0.3.28", - "futures-timer", - "headers", - "hex", - "idna 0.2.3", - "jsonrpc-core", - "log", - "once_cell", - "parking_lot 0.12.1", - "pin-project", - "reqwest", - "rlp", - "secp256k1 0.21.3", - "serde", - "serde_json", - "tiny-keccak 2.0.2", - "url", -] - -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.22.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", -] - -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - -[[package]] -name = "whoami" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c70234412ca409cc04e864e89523cb0fc37f5e1344ebed5a3ebf4192b6b9f68" -dependencies = [ - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" -dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" - -[[package]] -name = "winnow" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" -dependencies = [ - "memchr", -] - -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "zeroize" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" - -[[package]] -name = "zk_evm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#c08a8581421d2a0cf1fc8cbbdcd06c00da01fe0e" -dependencies = [ - "anyhow", - "lazy_static", - "num 0.4.0", - "serde", - "serde_json", - "static_assertions", - "zk_evm_abstractions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zk_evm_abstractions" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#973a1f661c045e0e8b9a287505f353659279b3b3" -dependencies = [ - "anyhow", - "serde", - "static_assertions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zkevm-assembly" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.3.2#edc364e59a2eea9c4b1d4ce79f15d0b7c6b55b98" -dependencies = [ - "env_logger", - "hex", - "lazy_static", - "log", - "nom", - "num-bigint 0.4.3", - "num-traits", - "sha3 0.10.6", - "smallvec", - "structopt", - "thiserror", - "zkevm_opcode_defs", -] - -[[package]] -name = "zkevm_opcode_defs" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#2f69c6975a272e8c31d2d82c136a4ea81df25115" -dependencies = [ - "bitflags 2.3.2", - "blake2 0.10.6", - "ethereum-types", - "k256", - "lazy_static", - "sha2 0.10.6", - "sha3 0.10.6", -] - -[[package]] -name = "zkevm_test_harness" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.3#6453eab3c9c8915f588ff4eceb48d7be9a695ecb" -dependencies = [ - "bincode", - "circuit_testing", - "codegen 0.2.0", - "crossbeam 0.8.2", - "derivative", - "env_logger", - "hex", - "num-bigint 0.4.3", - "num-integer", - "num-traits", - "rayon", - "serde", - "serde_json", - "smallvec", - "structopt", - "sync_vm", - "test-log", - "tracing", - "zk_evm", - "zkevm-assembly", -] - -[[package]] -name = "zksync_basic_types" -version = "1.0.0" -dependencies = [ - "serde", - "web3", -] - -[[package]] -name = "zksync_circuit_breaker" -version = "1.0.0" -dependencies = [ - "assert_matches", - "async-trait", - "backon", - "convert_case 0.6.0", - "futures 0.3.28", - "hex", - "serde", - "serde_json", - "thiserror", - "tokio 1.28.2", - "zksync_config", - "zksync_contracts", - "zksync_dal", - "zksync_eth_client", - "zksync_types", - "zksync_utils", - "zksync_verification_key_generator_and_server", -] - -[[package]] -name = "zksync_config" -version = "1.0.0" -dependencies = [ - "bigdecimal", - "envy", - "num 0.3.1", - "once_cell", - "serde", - "serde_json", - "url", - "zksync_basic_types", - "zksync_contracts", - "zksync_utils", -] - -[[package]] -name = "zksync_contracts" -version = "1.0.0" -dependencies = [ - "ethabi", - "hex", - "once_cell", - "serde", - "serde_json", - "zksync_utils", -] - -[[package]] -name = "zksync_crypto" -version = "1.0.0" -dependencies = [ - "base64 0.13.1", - "blake2 0.10.6", - "hex", - "once_cell", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "thiserror", - "zksync_basic_types", -] - -[[package]] -name = "zksync_dal" -version = "1.0.0" -dependencies = [ - "anyhow", - "bigdecimal", - "bincode", - "hex", - "itertools", - "metrics", - "num 0.3.1", - "once_cell", - "serde_json", - "sqlx", - "strum", - "thiserror", - "tokio 1.28.2", - "vlog", - "zksync_config", - "zksync_contracts", - "zksync_health_check", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_eth_client" -version = "1.0.0" -dependencies = [ - "anyhow", - "async-trait", - "hex", - "jsonrpc-core", - "metrics", - "parity-crypto", - "serde", - "thiserror", - "tokio 1.28.2", - "vlog", - "zksync_config", - "zksync_contracts", - "zksync_eth_signer", - "zksync_types", -] - -[[package]] -name = "zksync_eth_signer" -version = "1.0.0" -dependencies = [ - "async-trait", - "hex", - "jsonrpc-core", - "parity-crypto", - "reqwest", - "rlp", - "secp256k1 0.21.3", - "serde", - "serde_derive", - "serde_json", - "thiserror", - "zksync_types", -] - -[[package]] -name = "zksync_health_check" -version = "0.1.0" -dependencies = [ - "async-trait", -] - -[[package]] -name = "zksync_mini_merkle_tree" -version = "1.0.0" -dependencies = [ - "once_cell", - "zksync_basic_types", - "zksync_crypto", -] - -[[package]] -name = "zksync_object_store" -version = "1.0.0" -dependencies = [ - "async-trait", - "bincode", - "google-cloud-auth", - "google-cloud-storage", - "http", - "metrics", - "tokio 1.28.2", - "vlog", - "zksync_config", - "zksync_types", -] - -[[package]] -name = "zksync_prover" -version = "1.0.0" -dependencies = [ - "api", - "bincode", - "chrono", - "ctrlc", - "ethabi", - "futures 0.3.28", - "hex", - "local-ip-address", - "metrics", - "prometheus_exporter", - "prover-service", - "queues", - "reqwest", - "serde", - "serde_json", - "setup_key_generator_and_server", - "thiserror", - "tokio 1.28.2", - "vlog", - "zkevm_test_harness", - "zksync_circuit_breaker", - "zksync_config", - "zksync_dal", - "zksync_eth_client", - "zksync_object_store", - "zksync_prover_utils", - "zksync_utils", - "zksync_verification_key_generator_and_server", -] - -[[package]] -name = "zksync_prover_utils" -version = "1.0.0" -dependencies = [ - "ctrlc", - "futures 0.3.28", - "metrics", - "regex", - "reqwest", - "tokio 1.28.2", - "vlog", - "zksync_config", - "zksync_utils", -] - -[[package]] -name = "zksync_types" -version = "1.0.0" -dependencies = [ - "bigdecimal", - "blake2 0.10.6", - "chrono", - "codegen 0.1.0", - "metrics", - "num 0.3.1", - "once_cell", - "parity-crypto", - "rlp", - "serde", - "serde_json", - "serde_with", - "strum", - "thiserror", - "zk_evm", - "zkevm-assembly", - "zkevm_test_harness", - "zksync_basic_types", - "zksync_config", - "zksync_contracts", - "zksync_mini_merkle_tree", - "zksync_utils", -] - -[[package]] -name = "zksync_utils" -version = "1.0.0" -dependencies = [ - "anyhow", - "bigdecimal", - "envy", - "futures 0.3.28", - "hex", - "itertools", - "metrics", - "num 0.3.1", - "reqwest", - "serde", - "thiserror", - "tokio 1.28.2", - "vlog", - "zk_evm", - "zksync_basic_types", -] - -[[package]] -name = "zksync_verification_key_generator_and_server" -version = "1.0.0" -dependencies = [ - "bincode", - "circuit_testing", - "ff_ce", - "hex", - "itertools", - "serde_json", - "structopt", - "toml_edit 0.14.4", - "vlog", - "zksync_types", -] diff --git a/core/bin/prover_fri/Cargo.toml b/core/bin/prover_fri/Cargo.toml deleted file mode 100644 index 3de420d2cdb7..000000000000 --- a/core/bin/prover_fri/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "zksync_prover_fri" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } -prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } -zksync_object_store = { path = "../../lib/object_store", version = "1.0" } -zksync_prover_utils = {path = "../../lib/prover_utils", version = "1.0" } -zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } -zksync_witness_generator = { path = "../witness_generator", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -vk_setup_data_generator_server_fri = { path = "../vk_setup_data_generator_server_fri", version = "1.0" } - -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0" } -circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", features = ["log_tracing"]} - -tokio = { version = "1", features = ["time"] } -futures = { version = "0.3", features = ["compat"] } -ctrlc = { version = "3.1", features = ["termination"] } -metrics = "0.20.0" diff --git a/core/bin/prover_fri/src/main.rs b/core/bin/prover_fri/src/main.rs deleted file mode 100644 index 31972899cde5..000000000000 --- a/core/bin/prover_fri/src/main.rs +++ /dev/null @@ -1,128 +0,0 @@ -#![feature(generic_const_exprs)] - -use std::collections::HashMap; -use std::sync::Arc; -use tokio::sync::oneshot; -use zksync_vk_setup_data_server_fri::{get_setup_data_for_circuit_type, ProverServiceDataKey}; - -use zksync_config::configs::fri_prover_group::{CircuitIdRoundTuple, FriProverGroupConfig}; -use zksync_config::configs::{FriProverConfig, PrometheusConfig}; -use zksync_config::{ApiConfig, ObjectStoreConfig}; -use zksync_dal::connection::DbVariant; -use zksync_dal::ConnectionPool; -use zksync_object_store::ObjectStoreFactory; -use zksync_queued_job_processor::JobProcessor; -use zksync_utils::wait_for_tasks::wait_for_tasks; - -use crate::prover_job_processor::{GoldilocksProverSetupData, Prover, SetupLoadMode}; - -mod prover_job_processor; - -#[tokio::main] -async fn main() { - vlog::init(); - let sentry_guard = vlog::init_sentry(); - let prover_config = FriProverConfig::from_env(); - let prometheus_config = PrometheusConfig { - listener_port: prover_config.prometheus_port, - ..ApiConfig::from_env().prometheus - }; - - match sentry_guard { - Some(_) => vlog::info!( - "Starting Sentry url: {}", - std::env::var("MISC_SENTRY_URL").unwrap(), - ), - None => vlog::info!("No sentry url configured"), - } - - let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); - let mut stop_signal_sender = Some(stop_signal_sender); - ctrlc::set_handler(move || { - if let Some(sender) = stop_signal_sender.take() { - sender.send(()).ok(); - } - }) - .expect("Error setting Ctrl+C handler"); - - let (stop_sender, stop_receiver) = tokio::sync::watch::channel(false); - let blob_store = ObjectStoreFactory::from_env().create_store().await; - let public_blob_store = ObjectStoreFactory::new(ObjectStoreConfig::public_from_env()) - .create_store() - .await; - - vlog::info!("Starting FRI proof generation"); - let pool = ConnectionPool::new(None, DbVariant::Prover).await; - let circuit_ids_for_round_to_be_proven = FriProverGroupConfig::from_env() - .get_circuit_ids_for_group_id(prover_config.specialized_group_id) - .unwrap_or(vec![]); - - let setup_load_mode = build_prover_setup_load_mode_using_config(&prover_config); - let prover = Prover::new( - blob_store, - public_blob_store, - FriProverConfig::from_env(), - pool, - setup_load_mode, - circuit_ids_for_round_to_be_proven, - ); - let tasks = vec![ - prometheus_exporter::run_prometheus_exporter(prometheus_config.listener_port, None), - tokio::spawn(prover.run(stop_receiver, None)), - ]; - - let particular_crypto_alerts = None; - let graceful_shutdown = None::>; - let tasks_allowed_to_finish = false; - tokio::select! { - _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, - _ = stop_signal_receiver => { - vlog::info!("Stop signal received, shutting down"); - }, - } - - stop_sender.send(true).ok(); -} - -fn build_prover_setup_load_mode_using_config(config: &FriProverConfig) -> SetupLoadMode { - match config.setup_load_mode { - zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, - zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { - let cache = load_setup_data_cache(config.specialized_group_id); - SetupLoadMode::FromMemory(cache) - } - } -} - -fn load_setup_data_cache( - specialized_group_id: u8, -) -> HashMap> { - vlog::info!( - "Loading setup data cache for group {}", - specialized_group_id - ); - let prover_setup_metadata_list = FriProverGroupConfig::from_env() - .get_circuit_ids_for_group_id(specialized_group_id) - .expect( - "At least one circuit should be configured for group when running in FromMemory mode", - ); - vlog::info!( - "for group {} configured setup metadata are {:?}", - specialized_group_id, - prover_setup_metadata_list - ); - let mut cache = HashMap::new(); - for prover_setup_metadata in prover_setup_metadata_list { - let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); - let setup_data = get_setup_data_for_circuit_type(key.clone()); - cache.insert(key, Arc::new(setup_data)); - } - cache -} - -fn setup_metadata_to_setup_data_key(setup_metadata: &CircuitIdRoundTuple) -> ProverServiceDataKey { - ProverServiceDataKey { - circuit_id: setup_metadata.circuit_id, - round: setup_metadata.aggregation_round.into(), - } -} diff --git a/core/bin/prover_fri/src/prover_job_processor.rs b/core/bin/prover_fri/src/prover_job_processor.rs deleted file mode 100644 index 58840a537207..000000000000 --- a/core/bin/prover_fri/src/prover_job_processor.rs +++ /dev/null @@ -1,399 +0,0 @@ -use std::collections::HashMap; -use std::{sync::Arc, time::Instant}; -use tokio::task::JoinHandle; - -use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use circuit_definitions::boojum::algebraic_props::round_function::AbsorptionModeOverwrite; -use circuit_definitions::boojum::algebraic_props::sponge::GenericAlgebraicSponge; -use circuit_definitions::boojum::cs::implementations::pow::NoPow; -use circuit_definitions::boojum::implementations::poseidon2::Poseidon2Goldilocks; -use circuit_definitions::boojum::worker::Worker; -use circuit_definitions::circuit_definitions::base_layer::{ - ZkSyncBaseLayerCircuit, ZkSyncBaseLayerProof, -}; -use circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerProof, ZkSyncRecursiveLayerCircuit, -}; -use circuit_definitions::{ - base_layer_proof_config, recursion_layer_proof_config, ZkSyncDefaultRoundFunction, -}; -use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; -use zkevm_test_harness::prover_utils::{ - prove_base_layer_circuit, prove_recursion_layer_circuit, verify_base_layer_proof, - verify_recursion_layer_proof, -}; -use zksync_config::configs::FriProverConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::{FriCircuitKey, ObjectStore}; -use zksync_queued_job_processor::{async_trait, JobProcessor}; -use zksync_types::L1BatchNumber; - -use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; -use zksync_vk_setup_data_server_fri::{ - get_setup_data_for_circuit_type, ProverServiceDataKey, ProverSetupData, -}; -use zksync_witness_utils::{ - get_base_layer_circuit_id_for_recursive_layer, CircuitWrapper, FriProofWrapper, -}; - -pub type GoldilocksProverSetupData = ProverSetupData< - GoldilocksField, - GoldilocksField, - GenericAlgebraicSponge< - GoldilocksField, - GoldilocksField, - 8, - 12, - 4, - Poseidon2Goldilocks, - AbsorptionModeOverwrite, - >, ->; - -pub enum SetupLoadMode { - FromMemory(HashMap>), - FromDisk, -} - -pub struct Prover { - blob_store: Box, - public_blob_store: Box, - config: Arc, - prover_connection_pool: ConnectionPool, - setup_load_mode: SetupLoadMode, - // Only pick jobs for the configured circuit id and aggregation rounds. - // Empty means all jobs are picked. - circuit_ids_for_round_to_be_proven: Vec, -} - -impl Prover { - pub fn new( - blob_store: Box, - public_blob_store: Box, - config: FriProverConfig, - prover_connection_pool: ConnectionPool, - setup_load_mode: SetupLoadMode, - circuit_ids_for_round_to_be_proven: Vec, - ) -> Self { - Prover { - blob_store, - public_blob_store, - config: Arc::new(config), - prover_connection_pool, - setup_load_mode, - circuit_ids_for_round_to_be_proven, - } - } - - fn get_setup_data(&self, key: ProverServiceDataKey) -> Arc { - match &self.setup_load_mode { - SetupLoadMode::FromMemory(cache) => cache - .get(&key) - .expect("Setup data not found in cache") - .clone(), - SetupLoadMode::FromDisk => { - let started_at = Instant::now(); - let artifact: GoldilocksProverSetupData = - get_setup_data_for_circuit_type(key.clone()); - metrics::histogram!( - "prover_fri.prover.setup_data_load_time", - started_at.elapsed(), - "circuit_type" => key.circuit_id.to_string(), - ); - Arc::new(artifact) - } - } - } - - fn prove( - job: ProverJob, - config: Arc, - setup_data: Arc, - ) -> ProverArtifacts { - let proof = match job.circuit_wrapper { - CircuitWrapper::Base(base_circuit) => { - Self::prove_base_layer(job.job_id, base_circuit, config, setup_data) - } - CircuitWrapper::Recursive(recursive_circuit) => { - Self::prove_recursive_layer(job.job_id, recursive_circuit, config, setup_data) - } - }; - ProverArtifacts::new(job.block_number, proof) - } - - fn prove_recursive_layer( - job_id: u32, - circuit: ZkSyncRecursiveLayerCircuit, - config: Arc, - artifact: Arc, - ) -> FriProofWrapper { - let worker = Worker::new(); - let circuit_id = circuit.numeric_circuit_type(); - let started_at = Instant::now(); - let proof = prove_recursion_layer_circuit::( - circuit.clone(), - &worker, - recursion_layer_proof_config(), - &artifact.setup_base, - &artifact.setup, - &artifact.setup_tree, - &artifact.vk, - &artifact.vars_hint, - &artifact.wits_hint, - &artifact.finalization_hint, - ); - metrics::histogram!( - "prover_fri.prover.proof_generation_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string(), - "layer" => "recursive", - ); - if config - .recursive_layer_circuit_ids_to_be_verified - .contains(&circuit_id) - { - let started_at = Instant::now(); - let is_valid = verify_recursion_layer_proof::(&circuit, &proof, &artifact.vk); - metrics::histogram!( - "prover_fri.prover.proof_verification_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string(), - "layer" => "recursive", - ); - if !is_valid { - vlog::error!( - "Failed to verify recursive layer proof for job-id: {} circuit type: {}", - job_id, - circuit_id - ); - } - } - FriProofWrapper::Recursive(ZkSyncRecursionLayerProof::from_inner(circuit_id, proof)) - } - - fn prove_base_layer( - job_id: u32, - circuit: ZkSyncBaseLayerCircuit< - GoldilocksField, - VmWitnessOracle, - ZkSyncDefaultRoundFunction, - >, - config: Arc, - artifact: Arc, - ) -> FriProofWrapper { - let worker = Worker::new(); - let circuit_id = circuit.numeric_circuit_type(); - let started_at = Instant::now(); - let proof = prove_base_layer_circuit::( - circuit.clone(), - &worker, - base_layer_proof_config(), - &artifact.setup_base, - &artifact.setup, - &artifact.setup_tree, - &artifact.vk, - &artifact.vars_hint, - &artifact.wits_hint, - &artifact.finalization_hint, - ); - metrics::histogram!( - "prover_fri.prover.proof_generation_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string(), - "layer" => "base", - ); - if config - .base_layer_circuit_ids_to_be_verified - .contains(&circuit_id) - { - let started_at = Instant::now(); - let is_valid = verify_base_layer_proof::(&circuit, &proof, &artifact.vk); - metrics::histogram!( - "prover_fri.prover.proof_verification_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string(), - "layer" => "base", - ); - if !is_valid { - vlog::error!( - "Failed to verify base layer proof for job-id: {} circuit_type {}", - job_id, - circuit_id - ); - } - } - FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) - } -} - -pub struct ProverJob { - block_number: L1BatchNumber, - job_id: u32, - circuit_wrapper: CircuitWrapper, - setup_data_key: ProverServiceDataKey, -} - -impl ProverJob { - fn new( - block_number: L1BatchNumber, - job_id: u32, - circuit_wrapper: CircuitWrapper, - setup_data_key: ProverServiceDataKey, - ) -> Self { - Self { - block_number, - job_id, - circuit_wrapper, - setup_data_key, - } - } -} - -pub struct ProverArtifacts { - block_number: L1BatchNumber, - proof_wrapper: FriProofWrapper, -} - -impl ProverArtifacts { - fn new(block_number: L1BatchNumber, proof_wrapper: FriProofWrapper) -> Self { - Self { - block_number, - proof_wrapper, - } - } -} - -#[async_trait] -impl JobProcessor for Prover { - type Job = ProverJob; - type JobId = u32; - type JobArtifacts = ProverArtifacts; - const SERVICE_NAME: &'static str = "FriProver"; - - async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { - let mut storage = self.prover_connection_pool.access_storage().await; - let prover_job = match self.circuit_ids_for_round_to_be_proven.is_empty() { - false => { - // Specialized prover: proving subset of configured circuits. - storage - .fri_prover_jobs_dal() - .get_next_job_for_circuit_id_round(&self.circuit_ids_for_round_to_be_proven) - .await - } - true => { - // Generalized prover: proving all circuits. - storage.fri_prover_jobs_dal().get_next_job().await - } - }?; - vlog::info!("Started processing prover job: {:?}", prover_job); - - let circuit_key = FriCircuitKey { - block_number: prover_job.block_number, - sequence_number: prover_job.sequence_number, - circuit_id: prover_job.circuit_id, - aggregation_round: prover_job.aggregation_round, - depth: prover_job.depth, - }; - let started_at = Instant::now(); - let input = self - .blob_store - .get(circuit_key) - .await - .unwrap_or_else(|err| panic!("{err:?}")); - metrics::histogram!( - "prover_fri.prover.blob_fetch_time", - started_at.elapsed(), - "circuit_type" => prover_job.circuit_id.to_string(), - "aggregation_round" => format!("{:?}", prover_job.aggregation_round), - ); - let setup_data_key = ProverServiceDataKey { - circuit_id: prover_job.circuit_id, - round: prover_job.aggregation_round, - }; - - Some(( - prover_job.id, - ProverJob::new( - prover_job.block_number, - prover_job.id, - input, - setup_data_key, - ), - )) - } - - async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { - self.prover_connection_pool - .access_storage() - .await - .fri_prover_jobs_dal() - .save_proof_error(job_id, error) - .await; - } - - async fn process_job( - &self, - job: Self::Job, - _started_at: Instant, - ) -> JoinHandle { - let config = Arc::clone(&self.config); - let setup_data = self.get_setup_data(job.setup_data_key.clone()); - tokio::task::spawn_blocking(move || Self::prove(job, config, setup_data)) - } - - async fn save_result( - &self, - job_id: Self::JobId, - started_at: Instant, - artifacts: Self::JobArtifacts, - ) { - vlog::info!( - "Successfully proven job: {}, took: {:?}", - job_id, - started_at.elapsed() - ); - let proof = artifacts.proof_wrapper; - - // We save the scheduler proofs in public bucket, - // so that it can be verified independently while we're doing shadow proving - let circuit_type = match &proof { - FriProofWrapper::Base(base) => base.numeric_circuit_type(), - FriProofWrapper::Recursive(recursive_circuit) => match recursive_circuit { - ZkSyncRecursionLayerProof::SchedulerCircuit(_) => { - self.public_blob_store - .put(artifacts.block_number.0, &proof) - .await - .unwrap(); - recursive_circuit.numeric_circuit_type() - } - _ => recursive_circuit.numeric_circuit_type(), - }, - }; - - let blob_save_started_at = Instant::now(); - let blob_url = self.blob_store.put(job_id, &proof).await.unwrap(); - metrics::histogram!( - "prover_fri.prover.blob_save_time", - blob_save_started_at.elapsed(), - "circuit_type" => circuit_type.to_string(), - ); - - let mut prover_connection = self.prover_connection_pool.access_storage().await; - let mut transaction = prover_connection.start_transaction().await; - let job_metadata = transaction - .fri_prover_jobs_dal() - .save_proof(job_id, started_at.elapsed(), &blob_url) - .await; - if job_metadata.is_node_final_proof { - transaction - .fri_scheduler_dependency_tracker_dal() - .set_final_prover_job_id_for_l1_batch( - get_base_layer_circuit_id_for_recursive_layer(job_metadata.circuit_id), - job_id, - job_metadata.block_number, - ) - .await; - } - transaction.commit().await; - } -} diff --git a/core/bin/setup_key_generator_and_server/Cargo.lock b/core/bin/setup_key_generator_and_server/Cargo.lock deleted file mode 100644 index 662517186159..000000000000 --- a/core/bin/setup_key_generator_and_server/Cargo.lock +++ /dev/null @@ -1,5028 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addchain" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2e69442aa5628ea6951fa33e24efe8313f4321a91bd729fc2f75bdfc858570" -dependencies = [ - "num-bigint 0.3.3", - "num-integer", - "num-traits", -] - -[[package]] -name = "addr2line" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher", -] - -[[package]] -name = "aes-ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7729c3cde54d67063be556aeac75a81330d802f0259500ca40cb52967f975763" -dependencies = [ - "aes-soft", - "aesni", - "cipher", - "ctr", -] - -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher", - "opaque-debug", -] - -[[package]] -name = "ahash" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.10", - "once_cell", - "version_check", -] - -[[package]] -name = "aho-corasick" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" -dependencies = [ - "memchr", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anyhow" -version = "1.0.71" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" - -[[package]] -name = "api" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.3#522dcebcc7cbaf5f9ba939b1ea229cadcb02065a" -dependencies = [ - "bellman_ce", - "cfg-if 1.0.0", - "gpu-prover", - "num_cpus", - "serde", -] - -[[package]] -name = "arr_macro" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a105bfda48707cf19220129e78fca01e9639433ffaef4163546ed8fb04120a5" -dependencies = [ - "arr_macro_impl", - "proc-macro-hack", -] - -[[package]] -name = "arr_macro_impl" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" -dependencies = [ - "proc-macro-hack", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "arrayref" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8868f09ff8cea88b079da74ae569d9b8c62a23c68c746240b704ee6f7525c89c" - -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite 0.2.9", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "async-trait" -version = "0.1.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "backtrace" -version = "0.3.67" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" -dependencies = [ - "addr2line", - "cc", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" - -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - -[[package]] -name = "bellman_ce" -version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" -dependencies = [ - "arrayvec 0.7.3", - "bit-vec", - "blake2s_const", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "bigdecimal" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc403c26e6b03005522e6e8053384c4e881dfe5b2bf041c0c2c49be33d64a539" -dependencies = [ - "num-bigint 0.3.3", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bindgen" -version = "0.59.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "clap", - "env_logger", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2 1.0.60", - "quote 1.0.28", - "regex", - "rustc-hash", - "shlex", - "which", -] - -[[package]] -name = "bit-vec" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -dependencies = [ - "serde", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" - -[[package]] -name = "bitvec" -version = "0.20.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "blake2-rfc_bellman_edition" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc60350286c7c3db13b98e91dbe5c8b6830a6821bc20af5b0c310ce94d74915" -dependencies = [ - "arrayvec 0.4.12", - "byteorder", - "constant_time_eq", -] - -[[package]] -name = "blake2s_const" -version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake2s_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "block-padding", - "generic-array", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding", - "cipher", -] - -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - -[[package]] -name = "bumpalo" -version = "3.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" - -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - -[[package]] -name = "bytes" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "cc" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "rustc-serialize", - "serde", - "time 0.1.45", - "wasm-bindgen", - "winapi", -] - -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - -[[package]] -name = "circuit_testing" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#abd44b507840f836da6e084aaacb2ba8a7cb1df6" -dependencies = [ - "bellman_ce", -] - -[[package]] -name = "clang-sys" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "codegen" -version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#cad8d38f631691a6b456eb4eb7b410fd129ca006" -dependencies = [ - "ethereum-types", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "handlebars", - "hex", - "paste", - "rescue_poseidon", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "codegen" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff61280aed771c3070e7dcc9e050c66f1eb1e3b96431ba66f9f74641d02fc41d" -dependencies = [ - "indexmap", -] - -[[package]] -name = "const-oid" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" - -[[package]] -name = "cpufeatures" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" -dependencies = [ - "libc", -] - -[[package]] -name = "crossbeam" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", - "crossbeam-deque 0.7.4", - "crossbeam-epoch 0.8.2", - "crossbeam-queue 0.2.3", - "crossbeam-utils 0.7.2", -] - -[[package]] -name = "crossbeam" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-channel 0.5.8", - "crossbeam-deque 0.8.3", - "crossbeam-epoch 0.9.15", - "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", - "memoffset 0.9.0", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "cs_derive" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#22d9d3a2018df8d4ac4bc0b0ada61c191d0cee30" -dependencies = [ - "proc-macro-error", - "proc-macro2 1.0.60", - "quote 1.0.28", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" -dependencies = [ - "cipher", -] - -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.60", - "quote 1.0.28", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "debugid" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" -dependencies = [ - "serde", - "uuid", -] - -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "zeroize", -] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "convert_case", - "proc-macro2 1.0.60", - "quote 1.0.28", - "rustc_version", - "syn 1.0.109", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "crypto-common", - "subtle", -] - -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der", - "elliptic-curve", - "rfc6979", - "signature", -] - -[[package]] -name = "either" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" - -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct", - "crypto-bigint", - "der", - "digest 0.10.7", - "ff", - "generic-array", - "group", - "pkcs8", - "rand_core 0.6.4", - "sec1", - "subtle", - "zeroize", -] - -[[package]] -name = "encoding_rs" -version = "0.8.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "envy" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f47e0157f2cb54f5ae1bd371b30a2ae4311e1c028f575cd4e81de7353215965" -dependencies = [ - "serde", -] - -[[package]] -name = "errno" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "ethabi" -version = "16.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c98847055d934070b90e806e12d3936b787d0a115068981c1d8dfd5dfef5a5" -dependencies = [ - "ethereum-types", - "hex", - "serde", - "serde_json", - "sha3 0.9.1", - "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak 2.0.2", -] - -[[package]] -name = "ethereum-types" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "ff_ce" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" -dependencies = [ - "byteorder", - "ff_derive_ce", - "hex", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "ff_derive_ce" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" -dependencies = [ - "num-bigint 0.4.3", - "num-integer", - "num-traits", - "proc-macro2 1.0.60", - "quote 1.0.28", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "findshlibs" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" -dependencies = [ - "cc", - "lazy_static", - "libc", - "winapi", -] - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "franklin-crypto" -version = "0.0.5" -source = "git+ssh://git@github.com/matter-labs/franklin-crypto?branch=dev#5922873d25ecec827cd60420ca8cd84a188bb965" -dependencies = [ - "arr_macro", - "bellman_ce", - "bit-vec", - "blake2 0.9.2", - "blake2-rfc_bellman_edition", - "blake2s_simd", - "byteorder", - "digest 0.9.0", - "hex", - "indexmap", - "itertools", - "lazy_static", - "num-bigint 0.4.3", - "num-derive 0.2.5", - "num-integer", - "num-traits", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "sha3 0.9.1", - "smallvec", - "splitmut", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "franklin-crypto" -version = "0.0.5" -source = "git+https://github.com/matter-labs/franklin-crypto?branch=dev#5922873d25ecec827cd60420ca8cd84a188bb965" -dependencies = [ - "arr_macro", - "bellman_ce", - "bit-vec", - "blake2 0.9.2", - "blake2-rfc_bellman_edition", - "blake2s_simd", - "byteorder", - "digest 0.9.0", - "hex", - "indexmap", - "itertools", - "lazy_static", - "num-bigint 0.4.3", - "num-derive 0.2.5", - "num-integer", - "num-traits", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "sha3 0.9.1", - "smallvec", - "splitmut", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "futures" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" - -[[package]] -name = "futures-executor" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" - -[[package]] -name = "futures-locks" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c4e684ddb2d8a4db5ca8a02b35156da129674ba4412b6f528698d58c594954" -dependencies = [ - "futures", - "tokio 0.2.25", -] - -[[package]] -name = "futures-macro" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "futures-sink" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" - -[[package]] -name = "futures-task" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" - -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - -[[package]] -name = "futures-util" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite 0.2.9", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", -] - -[[package]] -name = "gimli" -version = "0.27.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" - -[[package]] -name = "glob" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" - -[[package]] -name = "gpu-ffi" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.3#522dcebcc7cbaf5f9ba939b1ea229cadcb02065a" -dependencies = [ - "bindgen", - "crossbeam 0.7.3", - "derivative", - "futures", - "futures-locks", - "num_cpus", -] - -[[package]] -name = "gpu-prover" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.3#522dcebcc7cbaf5f9ba939b1ea229cadcb02065a" -dependencies = [ - "bit-vec", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "franklin-crypto 0.0.5 (git+ssh://git@github.com/matter-labs/franklin-crypto?branch=dev)", - "gpu-ffi", - "itertools", - "num_cpus", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "h2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" -dependencies = [ - "bytes 1.4.0", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 1.28.2", - "tokio-util 0.7.8", - "tracing", -] - -[[package]] -name = "handlebars" -version = "4.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "headers" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" -dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", - "bytes 1.4.0", - "headers-core", - "http", - "httpdate", - "mime", - "sha1", -] - -[[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" -dependencies = [ - "http", -] - -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - -[[package]] -name = "http" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" -dependencies = [ - "bytes 1.4.0", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes 1.4.0", - "http", - "pin-project-lite 0.2.9", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.14.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" -dependencies = [ - "bytes 1.4.0", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite 0.2.9", - "socket2", - "tokio 1.28.2", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" -dependencies = [ - "http", - "hyper", - "rustls", - "tokio 1.28.2", - "tokio-rustls", -] - -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper", - "pin-project-lite 0.2.9", - "tokio 1.28.2", - "tokio-io-timeout", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes 1.4.0", - "hyper", - "native-tls", - "tokio 1.28.2", - "tokio-native-tls", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg 1.1.0", - "hashbrown", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.1", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "ipnet" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" - -[[package]] -name = "js-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" -dependencies = [ - "futures", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if 1.0.0", - "ecdsa", - "elliptic-curve", - "sha2 0.10.6", -] - -[[package]] -name = "keccak" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" -dependencies = [ - "cpufeatures", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.146" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" - -[[package]] -name = "libloading" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" -dependencies = [ - "cfg-if 1.0.0", - "winapi", -] - -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "lock_api" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" -dependencies = [ - "autocfg 1.1.0", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" - -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "metrics" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" -dependencies = [ - "ahash", - "metrics-macros", - "portable-atomic 0.3.20", -] - -[[package]] -name = "metrics-macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "miniz_oxide" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" -dependencies = [ - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", -] - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - -[[package]] -name = "num" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7a8e9be5e039e2ff869df49155f1c06bd01ade2117ec783e56ab0932b67a8f" -dependencies = [ - "num-bigint 0.3.3", - "num-complex 0.3.1", - "num-integer", - "num-iter", - "num-rational 0.3.2", - "num-traits", -] - -[[package]] -name = "num" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" -dependencies = [ - "num-bigint 0.4.3", - "num-complex 0.4.3", - "num-integer", - "num-iter", - "num-rational 0.4.1", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-bigint" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-complex" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5" -dependencies = [ - "num-traits", - "serde", -] - -[[package]] -name = "num-complex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-derive" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", -] - -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg 1.1.0", - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" -dependencies = [ - "autocfg 1.1.0", - "num-bigint 0.3.3", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "num-rational" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" -dependencies = [ - "autocfg 1.1.0", - "num-bigint 0.4.3", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "num_cpus" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" -dependencies = [ - "hermit-abi 0.2.6", - "libc", -] - -[[package]] -name = "object" -version = "0.30.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "openssl" -version = "0.10.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" -dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.88" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "opentelemetry" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" -dependencies = [ - "async-trait", - "crossbeam-channel 0.5.8", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", -] - -[[package]] -name = "opentelemetry-http" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449048140ee61e28f57abe6e9975eedc1f3a29855c7407bd6c12b18578863379" -dependencies = [ - "async-trait", - "bytes 1.4.0", - "http", - "opentelemetry", - "reqwest", -] - -[[package]] -name = "opentelemetry-otlp" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" -dependencies = [ - "async-trait", - "futures", - "futures-util", - "http", - "opentelemetry", - "opentelemetry-http", - "prost", - "prost-build", - "reqwest", - "thiserror", - "tokio 1.28.2", - "tonic", - "tonic-build", -] - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985cc35d832d412224b2cffe2f9194b1b89b6aa5d0bef76d080dce09d90e62bd" -dependencies = [ - "opentelemetry", -] - -[[package]] -name = "os_info" -version = "3.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" -dependencies = [ - "log", - "serde", - "winapi", -] - -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - -[[package]] -name = "pairing_ce" -version = "0.28.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007b21259660d025918e653508f03050bf23fb96a88601f9936329faadc597" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - -[[package]] -name = "parity-crypto" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b92ea9ddac0d6e1db7c49991e7d397d34a9fd814b4c93cda53788e8eef94e35" -dependencies = [ - "aes", - "aes-ctr", - "block-modes", - "digest 0.9.0", - "ethereum-types", - "hmac 0.10.1", - "lazy_static", - "pbkdf2 0.7.5", - "ripemd160", - "rustc-hex", - "scrypt", - "secp256k1 0.20.3", - "sha2 0.9.9", - "subtle", - "tiny-keccak 2.0.2", - "zeroize", -] - -[[package]] -name = "parity-scale-codec" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec 0.7.3", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" -dependencies = [ - "proc-macro-crate", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - -[[package]] -name = "password-hash" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54986aa4bfc9b98c6a5f40184223658d187159d7b3c6af33f2b2aa25ae1db0fa" -dependencies = [ - "base64ct", - "rand_core 0.6.4", -] - -[[package]] -name = "paste" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" - -[[package]] -name = "pbkdf2" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b8c0d71734018084da0c0354193a5edfb81b20d2d57a92c5b154aefc554a4a" -dependencies = [ - "crypto-mac 0.10.1", -] - -[[package]] -name = "pbkdf2" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf916dd32dd26297907890d99dc2740e33f6bd9073965af4ccff2967962f5508" -dependencies = [ - "base64ct", - "crypto-mac 0.10.1", - "hmac 0.10.1", - "password-hash", - "sha2 0.9.9", -] - -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - -[[package]] -name = "percent-encoding" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" - -[[package]] -name = "pest" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" -dependencies = [ - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "pest_meta" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.6", -] - -[[package]] -name = "petgraph" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "pin-project" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - -[[package]] -name = "pin-project-lite" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der", - "spki", -] - -[[package]] -name = "pkg-config" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" - -[[package]] -name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.3.3", -] - -[[package]] -name = "portable-atomic" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "version_check", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "proc-macro2" -version = "1.0.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "prost" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" -dependencies = [ - "bytes 1.4.0", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" -dependencies = [ - "bytes 1.4.0", - "heck 0.3.3", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost", - "prost-types", - "regex", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" -dependencies = [ - "bytes 1.4.0", - "prost", -] - -[[package]] -name = "prover-service" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=v1.3.3#522dcebcc7cbaf5f9ba939b1ea229cadcb02065a" -dependencies = [ - "api", - "bincode", - "crossbeam-utils 0.8.16", - "log", - "num_cpus", - "rand 0.4.6", - "serde", - "serde_json", - "zkevm_test_harness", -] - -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - -[[package]] -name = "quote" -version = "1.0.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" -dependencies = [ - "proc-macro2 1.0.60", -] - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", -] - -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.8", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift", - "winapi", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.10", -] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.8", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rayon" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" -dependencies = [ - "crossbeam-channel 0.5.8", - "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.16", - "num_cpus", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "regex" -version = "1.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax 0.7.2", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", -] - -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - -[[package]] -name = "regex-syntax" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" - -[[package]] -name = "reqwest" -version = "0.11.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" -dependencies = [ - "base64 0.21.2", - "bytes 1.4.0", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-rustls", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite 0.2.9", - "rustls", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "tokio 1.28.2", - "tokio-native-tls", - "tokio-rustls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots", - "winreg", -] - -[[package]] -name = "rescue_poseidon" -version = "0.4.1" -source = "git+https://github.com/matter-labs/rescue-poseidon.git#f611a3353e48cf42153e44d89ed90da9bc5934e8" -dependencies = [ - "addchain", - "arrayvec 0.7.3", - "blake2 0.10.6", - "byteorder", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "num-bigint 0.3.3", - "num-integer", - "num-iter", - "num-traits", - "rand 0.4.6", - "serde", - "sha3 0.9.1", - "smallvec", -] - -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint", - "hmac 0.12.1", - "zeroize", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi", -] - -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "rlp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" -dependencies = [ - "bytes 1.4.0", - "rustc-hex", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc-serialize" -version = "0.3.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "rustix" -version = "0.37.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustls" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" -dependencies = [ - "log", - "ring", - "rustls-webpki", - "sct", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" -dependencies = [ - "base64 0.21.2", -] - -[[package]] -name = "rustls-webpki" -version = "0.100.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustversion" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" - -[[package]] -name = "ryu" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" - -[[package]] -name = "salsa20" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" -dependencies = [ - "cipher", -] - -[[package]] -name = "schannel" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" -dependencies = [ - "windows-sys 0.42.0", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "scrypt" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da492dab03f925d977776a0b7233d7b934d6dc2b94faead48928e2e9bacedb9" -dependencies = [ - "base64 0.13.1", - "hmac 0.10.1", - "pbkdf2 0.6.0", - "rand 0.7.3", - "rand_core 0.5.1", - "salsa20", - "sha2 0.9.9", - "subtle", -] - -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct", - "der", - "generic-array", - "pkcs8", - "subtle", - "zeroize", -] - -[[package]] -name = "secp256k1" -version = "0.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" -dependencies = [ - "rand 0.6.5", - "secp256k1-sys", -] - -[[package]] -name = "secp256k1" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" -dependencies = [ - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", -] - -[[package]] -name = "security-framework" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" - -[[package]] -name = "sentry" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e0bd2cbc3398be701a933e5b7357a4b6b1f94038d2054f118cba90b481a9fbe" -dependencies = [ - "httpdate", - "native-tls", - "reqwest", - "sentry-backtrace", - "sentry-contexts", - "sentry-core", - "sentry-debug-images", - "sentry-panic", - "sentry-tracing", - "tokio 1.28.2", - "ureq", -] - -[[package]] -name = "sentry-backtrace" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf043f9bcb6c9ae084b7f10fb363a697c924badcbe7dac2dbeecea31271ed0c" -dependencies = [ - "backtrace", - "once_cell", - "regex", - "sentry-core", -] - -[[package]] -name = "sentry-contexts" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16bde19e361cff463253371dbabee51dab416c6f9285d6e62106539f96d12079" -dependencies = [ - "hostname", - "libc", - "os_info", - "rustc_version", - "sentry-core", - "uname", -] - -[[package]] -name = "sentry-core" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe345c342f17e48b65451f424ce0848405b6b3a84fa0007ba444b84754bf760a" -dependencies = [ - "once_cell", - "rand 0.8.5", - "sentry-types", - "serde", - "serde_json", -] - -[[package]] -name = "sentry-debug-images" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be9460cda9409f799f839510ff3b2ab8db6e457f3085298e18eefc463948e157" -dependencies = [ - "findshlibs", - "once_cell", - "sentry-core", -] - -[[package]] -name = "sentry-panic" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063ac270f11157e435f8b133a007669a3e1a7920e23374485357a8692996188f" -dependencies = [ - "sentry-backtrace", - "sentry-core", -] - -[[package]] -name = "sentry-tracing" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc167b6746500ea4bb86c2c13afe7ca6f75f2ed1bcfd84243e870780b8ced529" -dependencies = [ - "sentry-backtrace", - "sentry-core", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sentry-types" -version = "0.31.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d10a5962144f5fb65bb1290551623e6b976f442cb2fcb4e1dfe9fe6f8e8df4" -dependencies = [ - "debugid", - "getrandom 0.2.10", - "hex", - "serde", - "serde_json", - "thiserror", - "time 0.3.22", - "url", - "uuid", -] - -[[package]] -name = "serde" -version = "1.0.164" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.164" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "serde_json" -version = "1.0.97" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros", -] - -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "setup_key_generator_and_server" -version = "1.0.0" -dependencies = [ - "api", - "circuit_testing", - "itertools", - "prover-service", - "structopt", - "vlog", - "zkevm_test_harness", - "zksync_config", - "zksync_types", -] - -[[package]] -name = "sha1" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug", -] - -[[package]] -name = "sha3" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "sharded-slab" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shlex" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" - -[[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - -[[package]] -name = "slab" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" -dependencies = [ - "autocfg 1.1.0", -] - -[[package]] -name = "smallvec" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" - -[[package]] -name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der", -] - -[[package]] -name = "splitmut" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85070f382340e8b23a75808e83573ddf65f9ad9143df9573ca37c1ed2ee956a" - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "structopt" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" -dependencies = [ - "clap", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" -dependencies = [ - "heck 0.3.3", - "proc-macro-error", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "strum" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" -dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.60", - "quote 1.0.28", - "rustversion", - "syn 1.0.109", -] - -[[package]] -name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "unicode-ident", -] - -[[package]] -name = "sync_vm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#22d9d3a2018df8d4ac4bc0b0ada61c191d0cee30" -dependencies = [ - "arrayvec 0.7.3", - "cs_derive", - "derivative", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "hex", - "itertools", - "num-bigint 0.4.3", - "num-derive 0.3.3", - "num-integer", - "num-traits", - "once_cell", - "rand 0.4.6", - "rescue_poseidon", - "serde", - "sha2 0.10.6", - "sha3 0.10.6", - "smallvec", - "zk_evm", - "zkevm_opcode_defs", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 1.0.0", - "fastrand", - "redox_syscall", - "rustix", - "windows-sys 0.48.0", -] - -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "test-log" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "thread_local" -version = "1.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" -dependencies = [ - "cfg-if 1.0.0", - "once_cell", -] - -[[package]] -name = "time" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" -dependencies = [ - "itoa", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" - -[[package]] -name = "time-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" -dependencies = [ - "time-core", -] - -[[package]] -name = "tiny-keccak" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "pin-project-lite 0.1.12", - "slab", -] - -[[package]] -name = "tokio" -version = "1.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" -dependencies = [ - "autocfg 1.1.0", - "bytes 1.4.0", - "libc", - "mio", - "num_cpus", - "parking_lot", - "pin-project-lite 0.2.9", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite 0.2.9", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite 0.2.9", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-util" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" -dependencies = [ - "bytes 1.4.0", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.9", - "tokio 1.28.2", -] - -[[package]] -name = "tokio-util" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" -dependencies = [ - "bytes 1.4.0", - "futures-core", - "futures-sink", - "pin-project-lite 0.2.9", - "tokio 1.28.2", - "tracing", -] - -[[package]] -name = "toml_datetime" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" - -[[package]] -name = "toml_edit" -version = "0.19.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" -dependencies = [ - "indexmap", - "toml_datetime", - "winnow", -] - -[[package]] -name = "tonic" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" -dependencies = [ - "async-stream", - "async-trait", - "base64 0.13.1", - "bytes 1.4.0", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "prost-derive", - "tokio 1.28.2", - "tokio-stream", - "tokio-util 0.6.10", - "tower", - "tower-layer", - "tower-service", - "tracing", - "tracing-futures", -] - -[[package]] -name = "tonic-build" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" -dependencies = [ - "proc-macro2 1.0.60", - "prost-build", - "quote 1.0.28", - "syn 1.0.109", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "indexmap", - "pin-project", - "pin-project-lite 0.2.9", - "rand 0.8.5", - "slab", - "tokio 1.28.2", - "tokio-util 0.7.8", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" -dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.9", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", -] - -[[package]] -name = "tracing-core" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-opentelemetry" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" -dependencies = [ - "once_cell", - "opentelemetry", - "tracing", - "tracing-core", - "tracing-log", - "tracing-subscriber", -] - -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" -dependencies = [ - "matchers", - "nu-ansi-term", - "once_cell", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "time 0.3.22", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "typenum" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - -[[package]] -name = "ucd-trie" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" - -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "uname" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72f89f0ca32e4db1c04e2a72f5345d59796d4866a1ee0609084569f73683dc8" -dependencies = [ - "libc", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - -[[package]] -name = "unicode-ident" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "ureq" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4b45063f47caea744e48f5baa99169bd8bd9b882d80a99941141327bbb00f99" -dependencies = [ - "base64 0.21.2", - "log", - "native-tls", - "once_cell", - "url", -] - -[[package]] -name = "url" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" -dependencies = [ - "form_urlencoded", - "idna 0.4.0", - "percent-encoding", - "serde", -] - -[[package]] -name = "uuid" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" -dependencies = [ - "getrandom 0.2.10", - "serde", -] - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "vlog" -version = "1.0.0" -dependencies = [ - "chrono", - "opentelemetry", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "sentry", - "serde_json", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" -dependencies = [ - "quote 1.0.28", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" -dependencies = [ - "proc-macro2 1.0.60", - "quote 1.0.28", - "syn 2.0.18", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" - -[[package]] -name = "web-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web3" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" -dependencies = [ - "arrayvec 0.7.3", - "base64 0.13.1", - "bytes 1.4.0", - "derive_more", - "ethabi", - "ethereum-types", - "futures", - "futures-timer", - "headers", - "hex", - "idna 0.2.3", - "jsonrpc-core", - "log", - "once_cell", - "parking_lot", - "pin-project", - "reqwest", - "rlp", - "secp256k1 0.21.3", - "serde", - "serde_json", - "tiny-keccak 2.0.2", - "url", -] - -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.22.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", -] - -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" -dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" - -[[package]] -name = "winnow" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" -dependencies = [ - "memchr", -] - -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "zeroize" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" - -[[package]] -name = "zk_evm" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#c08a8581421d2a0cf1fc8cbbdcd06c00da01fe0e" -dependencies = [ - "anyhow", - "lazy_static", - "num 0.4.0", - "serde", - "serde_json", - "static_assertions", - "zk_evm_abstractions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zk_evm_abstractions" -version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#973a1f661c045e0e8b9a287505f353659279b3b3" -dependencies = [ - "anyhow", - "serde", - "static_assertions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zkevm-assembly" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.3.2#edc364e59a2eea9c4b1d4ce79f15d0b7c6b55b98" -dependencies = [ - "env_logger", - "hex", - "lazy_static", - "log", - "nom", - "num-bigint 0.4.3", - "num-traits", - "sha3 0.10.6", - "smallvec", - "structopt", - "thiserror", - "zkevm_opcode_defs", -] - -[[package]] -name = "zkevm_opcode_defs" -version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#2f69c6975a272e8c31d2d82c136a4ea81df25115" -dependencies = [ - "bitflags 2.3.2", - "blake2 0.10.6", - "ethereum-types", - "k256", - "lazy_static", - "sha2 0.10.6", - "sha3 0.10.6", -] - -[[package]] -name = "zkevm_test_harness" -version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.3#6453eab3c9c8915f588ff4eceb48d7be9a695ecb" -dependencies = [ - "bincode", - "circuit_testing", - "codegen 0.2.0", - "crossbeam 0.8.2", - "derivative", - "env_logger", - "hex", - "num-bigint 0.4.3", - "num-integer", - "num-traits", - "rayon", - "serde", - "serde_json", - "smallvec", - "structopt", - "sync_vm", - "test-log", - "tracing", - "zk_evm", - "zkevm-assembly", -] - -[[package]] -name = "zksync_basic_types" -version = "1.0.0" -dependencies = [ - "serde", - "web3", -] - -[[package]] -name = "zksync_config" -version = "1.0.0" -dependencies = [ - "bigdecimal", - "envy", - "num 0.3.1", - "once_cell", - "serde", - "serde_json", - "url", - "zksync_basic_types", - "zksync_contracts", - "zksync_utils", -] - -[[package]] -name = "zksync_contracts" -version = "1.0.0" -dependencies = [ - "ethabi", - "hex", - "once_cell", - "serde", - "serde_json", - "zksync_utils", -] - -[[package]] -name = "zksync_crypto" -version = "1.0.0" -dependencies = [ - "base64 0.13.1", - "blake2 0.10.6", - "hex", - "once_cell", - "rand 0.4.6", - "serde", - "sha2 0.9.9", - "thiserror", - "zksync_basic_types", -] - -[[package]] -name = "zksync_mini_merkle_tree" -version = "1.0.0" -dependencies = [ - "once_cell", - "zksync_basic_types", - "zksync_crypto", -] - -[[package]] -name = "zksync_types" -version = "1.0.0" -dependencies = [ - "bigdecimal", - "blake2 0.10.6", - "chrono", - "codegen 0.1.0", - "metrics", - "num 0.3.1", - "once_cell", - "parity-crypto", - "rlp", - "serde", - "serde_json", - "serde_with", - "strum", - "thiserror", - "zk_evm", - "zkevm-assembly", - "zkevm_test_harness", - "zksync_basic_types", - "zksync_config", - "zksync_contracts", - "zksync_mini_merkle_tree", - "zksync_utils", -] - -[[package]] -name = "zksync_utils" -version = "1.0.0" -dependencies = [ - "anyhow", - "bigdecimal", - "envy", - "futures", - "hex", - "itertools", - "metrics", - "num 0.3.1", - "reqwest", - "serde", - "thiserror", - "tokio 1.28.2", - "vlog", - "zk_evm", - "zksync_basic_types", -] diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index 18b7ad8fd068..741d8e0500d6 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -129,8 +129,8 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst scope.raw( vec![ - "//", - "//", + "H", + "A", ] .join("\n"), ); diff --git a/core/bin/test_node/Cargo.toml b/core/bin/test_node/Cargo.toml deleted file mode 100644 index 873e92c90cdc..000000000000 --- a/core/bin/test_node/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "zksync_test_node" -version = "1.0.0" -edition = "2018" -authors = ["The Matter Labs Team "] -homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" -license = "MIT OR Apache-2.0" -keywords = ["blockchain", "zksync"] -categories = ["cryptography"] -publish = false # We don't want to publish our binaries. - -[dependencies] -zksync_core = { path = "../zksync_core", version = "1.0" } -zksync_basic_types = { path = "../../lib/basic_types", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } - -zksync_contracts = { path = "../../lib/contracts" } -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -zksync_state = {path = "../../lib/state", version = "1.0" } -vm = {path = "../../lib/vm", version = "0.1.0"} - -anyhow = "1.0" -tokio = { version = "1", features = ["time", "rt"] } -futures = { version = "0.3", features = ["compat"] } -once_cell = "1.7" - - -jsonrpc-http-server = { git = "https://github.com/matter-labs/jsonrpc.git", branch = "master" } -jsonrpc-core = { git = "https://github.com/matter-labs/jsonrpc.git", branch = "master" } -zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0", default-features = false, features = [ - "server", "client" -] } -clap = { version = "4.2.4", features = ["derive"] } -reqwest = { version = "0.11", features = ["blocking"] } -serde = { version = "1.0", features = ["derive"] } -tracing = { version = "0.1.26", features = ["log"] } -tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter", "time", "json"] } -bigdecimal = { version = "0.2.0" } diff --git a/core/bin/test_node/README.md b/core/bin/test_node/README.md deleted file mode 100644 index 0f3efaf67268..000000000000 --- a/core/bin/test_node/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# In memory node, with fork support - -This crate provides an in-memory node that supports forking the state from other networks. - -The goal of this crate is to offer a fast solution for integration testing, bootloader and system contract testing, and -prototyping. - -Please note that this crate is still in the alpha stage, and not all functionality is fully supported. For final -testing, it is highly recommended to use the 'local-node' or a testnet. - -Current limitations: - -- No communication between Layer 1 and Layer 2 (the local node operates only on Layer 2). -- Many APIs are not yet implemented, but the basic set of APIs is supported. -- No support for accessing historical data, such as the storage state at a specific block. -- Only one transaction is allowed per Layer 1 batch. -- Fixed values are returned for zk Gas estimation. - -Current features: - -- Can fork the state of the mainnet, testnet, or a custom network at any given height. -- Uses local bootloader and system contracts, making it suitable for testing new changes. -- When running in non-fork mode, it operates deterministically (only one transaction per block, etc.), which simplifies - testing. -- Starts up quickly and comes pre-configured with a few 'rich' accounts. - -## How to - -To start a node: - -```shell -cargo run --release -p zksync_test_node run -``` - -This will run a node (with an empty state) and make it available on port 8011 - -To fork mainnet: - -```shell -cargo run --release -p zksync_test_node fork mainnet -``` - -This will run the node, forked at current head of mainnet - -You can also specify the custom http endpoint and custom forking height: - -```shell -cargo run --release -p zksync_test_node fork --fork-at 7000000 http://172.17.0.3:3060 -``` - -## Forking network & sending calls - -You can use your favorite development tool (or tools like `curl`) or zksync-foundry: - -Check testnet LINK balance - -```shell -$ cargo run --release -p zksync_test_node fork testnet - -$ zkcast call 0x40609141Db628BeEE3BfAB8034Fc2D8278D0Cc78 "name()(string)" --rpc-url http://localhost:8011 - -> ChainLink Token (goerli) - - -$ $ zkcast call 0x40609141Db628BeEE3BfAB8034Fc2D8278D0Cc78 "balanceOf(address)(uint256)" 0x40609141Db628BeEE3BfAB8034Fc2D8278D0Cc78 --rpc-url http://localhost:8011 -> 28762283719732275444443116625665 -``` - -Or Mainnet USDT: - -```shell -cargo run -p zksync_test_node fork mainnet - -zkcast call 0x493257fD37EDB34451f62EDf8D2a0C418852bA4C "name()(string)" --rpc-url http://localhost:8011 - -> Tether USD -``` - -And you can also build & deploy your own contracts: - -```shell -fzkforge zkc src/Greeter.sol:Greeter --constructor-args "ZkSync and Foundry" --private-key 7726827caac94a7f9e1b160f7ea819f172f7b6f9d2a97f992c38edeab82d4110 --rpc-url http://localhost:8011 --chain 270 - -``` - -## Testing bootloader & system contracts - -In-memory node is taking the currently compiled bootloader & system contracts - therefore easily allowing to test -changes (and together with fork, allows to see the effects of the changes on the already deployed contracts). - -You can see the bootloader logs, by setting the proper log level. In the example below, we recompile the bootloader, and -run it with mainnet fork. - -```shell - -cd etc/system-contracts -yarn preprocess && yarn hardhat run ./scripts/compile-yul.ts -cd - -RUST_LOG=vm=trace cargo run -p zksync_test_node fork --fork-at 70000000 testnet -``` - -## Replaying other network transaction locally - -Imagine, that you have a testnet transaction, that you'd like to replay locally (for example to see more debug -information). - -```shell -cargo run --release -p zksync_test_node replay_tx testnet 0x7f039bcbb1490b855be37e74cf2400503ad57f51c84856362f99b0cbf1ef478a -``` - -### How does it work - -It utilizes an in-memory database to store the state information and employs simplified hashmaps to track blocks and -transactions. - -In fork mode, it attempts to retrieve missing storage data from a remote source when it's not available locally. diff --git a/core/bin/test_node/src/fork.rs b/core/bin/test_node/src/fork.rs deleted file mode 100644 index fe12d7ae03b2..000000000000 --- a/core/bin/test_node/src/fork.rs +++ /dev/null @@ -1,301 +0,0 @@ -//! This file hold tools used for test-forking other networks. -//! -//! There is ForkStorage (that is a wrapper over InMemoryStorage) -//! And ForkDetails - that parses network address and fork height from arguments. - -use std::{ - collections::HashMap, - convert::TryInto, - future::Future, - sync::{Arc, RwLock}, -}; - -use tokio::runtime::Builder; -use zksync_basic_types::{L1BatchNumber, L2ChainId, MiniblockNumber, H256, U64}; - -use zksync_state::{InMemoryStorage, ReadStorage}; -use zksync_types::{ - api::{BlockIdVariant, BlockNumber}, - l2::L2Tx, - StorageKey, -}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use zksync_web3_decl::{jsonrpsee::http_client::HttpClient, namespaces::EthNamespaceClient}; -use zksync_web3_decl::{jsonrpsee::http_client::HttpClientBuilder, namespaces::ZksNamespaceClient}; - -use crate::node::TEST_NODE_NETWORK_ID; - -fn block_on(future: F) -> F::Output -where - F::Output: Send, -{ - std::thread::spawn(move || { - let runtime = Builder::new_current_thread() - .enable_all() - .build() - .expect("tokio runtime creation failed"); - runtime.block_on(future) - }) - .join() - .unwrap() -} - -/// In memory storage, that allows 'forking' from other network. -/// If forking is enabled, it reads missing data from remote location. -#[derive(Debug)] -pub struct ForkStorage { - pub inner: Arc>, - pub chain_id: L2ChainId, -} - -#[derive(Debug)] -pub struct ForkStorageInner { - // Underlying local storage - pub raw_storage: InMemoryStorage, - // Cache of data that was read from remote location. - pub value_read_cache: HashMap, - // Cache of factory deps that were read from remote location. - pub factory_dep_cache: HashMap>>, - // If set - it hold the necessary information on where to fetch the data. - // If not set - it will simply read from underlying storage. - pub fork: Option, -} - -impl ForkStorage { - pub fn new(fork: Option) -> Self { - let chain_id = fork - .as_ref() - .and_then(|d| d.overwrite_chain_id) - .unwrap_or(L2ChainId(TEST_NODE_NETWORK_ID)); - println!("Starting network with chain id: {:?}", chain_id); - - ForkStorage { - inner: Arc::new(RwLock::new(ForkStorageInner { - raw_storage: InMemoryStorage::with_system_contracts_and_chain_id( - chain_id, - hash_bytecode, - ), - value_read_cache: Default::default(), - fork, - factory_dep_cache: Default::default(), - })), - chain_id, - } - } - - fn read_value_internal(&self, key: &StorageKey) -> zksync_types::StorageValue { - let mut mutator = self.inner.write().unwrap(); - let local_storage = mutator.raw_storage.read_value(key); - - if let Some(fork) = &mutator.fork { - if !H256::is_zero(&local_storage) { - return local_storage; - } - - if let Some(value) = mutator.value_read_cache.get(key) { - return *value; - } - let fork_ = (*fork).clone(); - let key_ = *key; - - let client = fork.create_client(); - - let result = block_on(async move { - client - .get_storage_at( - *key_.account().address(), - h256_to_u256(*key_.key()), - Some(BlockIdVariant::BlockNumber(BlockNumber::Number(U64::from( - fork_.l2_miniblock, - )))), - ) - .await - }) - .unwrap(); - - mutator.value_read_cache.insert(*key, result); - result - } else { - local_storage - } - } - - pub fn load_factory_dep_internal(&self, hash: H256) -> Option> { - let mut mutator = self.inner.write().unwrap(); - let local_storage = mutator.raw_storage.load_factory_dep(hash); - if let Some(fork) = &mutator.fork { - if local_storage.is_some() { - return local_storage; - } - if let Some(value) = mutator.factory_dep_cache.get(&hash) { - return value.clone(); - } - - let client = fork.create_client(); - let result = block_on(async move { client.get_bytecode_by_hash(hash).await }).unwrap(); - mutator.factory_dep_cache.insert(hash, result.clone()); - result - } else { - local_storage - } - } -} - -impl ReadStorage for ForkStorage { - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - (&*self).is_write_initial(key) - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - (&*self).load_factory_dep(hash) - } - - fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { - (&*self).read_value(key) - } -} - -impl ReadStorage for &ForkStorage { - fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { - self.read_value_internal(key) - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - let mut mutator = self.inner.write().unwrap(); - mutator.raw_storage.is_write_initial(key) - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.load_factory_dep_internal(hash) - } -} - -impl ForkStorage { - pub fn set_value(&mut self, key: StorageKey, value: zksync_types::StorageValue) { - let mut mutator = self.inner.write().unwrap(); - mutator.raw_storage.set_value(key, value) - } - pub fn store_factory_dep(&mut self, hash: H256, bytecode: Vec) { - let mut mutator = self.inner.write().unwrap(); - mutator.raw_storage.store_factory_dep(hash, bytecode) - } -} - -/// Holds the information about the original chain. -#[derive(Debug, Clone)] -pub struct ForkDetails { - // URL to the server. - pub fork_url: String, - // Block number at which we forked (the next block to create is l1_block + 1) - pub l1_block: L1BatchNumber, - pub l2_miniblock: u64, - pub block_timestamp: u64, - pub overwrite_chain_id: Option, -} - -impl ForkDetails { - pub async fn from_url_and_miniblock_and_chain( - url: &str, - client: HttpClient, - miniblock: u64, - chain_id: Option, - ) -> Self { - let block_details = client - .get_block_details(MiniblockNumber(miniblock as u32)) - .await - .unwrap() - .unwrap_or_else(|| panic!("Could not find block {:?} in {:?}", miniblock, url)); - - let l1_batch_number = block_details.l1_batch_number; - - println!( - "Creating fork from {:?} L1 block: {:?} L2 block: {:?} with timestamp {:?}", - url, l1_batch_number, miniblock, block_details.timestamp - ); - - ForkDetails { - fork_url: url.to_owned(), - l1_block: l1_batch_number, - block_timestamp: block_details.timestamp, - l2_miniblock: miniblock, - overwrite_chain_id: chain_id, - } - } - - /// Create a fork from a given network at a given height. - pub async fn from_network(fork: &str, fork_at: Option) -> Self { - let (url, client) = Self::fork_to_url_and_client(fork); - let l2_miniblock = if let Some(fork_at) = fork_at { - fork_at - } else { - client.get_block_number().await.unwrap().as_u64() - }; - Self::from_url_and_miniblock_and_chain(url, client, l2_miniblock, None).await - } - - /// Create a fork from a given network, at a height BEFORE a transaction. - /// This will allow us to apply this transaction locally on top of this fork. - pub async fn from_network_tx(fork: &str, tx: H256) -> Self { - let (url, client) = Self::fork_to_url_and_client(fork); - let tx_details = client.get_transaction_by_hash(tx).await.unwrap().unwrap(); - let overwrite_chain_id = Some(L2ChainId(tx_details.chain_id.as_u32() as u16)); - let miniblock_number = MiniblockNumber(tx_details.block_number.unwrap().as_u32()); - // We have to sync to the one-miniblock before the one where transaction is. - let l2_miniblock = miniblock_number.saturating_sub(1) as u64; - - Self::from_url_and_miniblock_and_chain(url, client, l2_miniblock, overwrite_chain_id).await - } - - /// Return URL and HTTP client for a given fork name. - pub fn fork_to_url_and_client(fork: &str) -> (&str, HttpClient) { - let url = match fork { - "mainnet" => "https://mainnet.era.zksync.io:443", - "testnet" => "https://testnet.era.zksync.dev:443", - _ => fork, - }; - - let client = HttpClientBuilder::default() - .build(url) - .expect("Unable to create a client for fork"); - - (url, client) - } - - /// Returns transactions that are in the same L2 miniblock as replay_tx, but were executed before it. - pub async fn get_earlier_transactions_in_same_block(&self, replay_tx: H256) -> Vec { - let client = self.create_client(); - - let tx_details = client - .get_transaction_by_hash(replay_tx) - .await - .unwrap() - .unwrap(); - let miniblock = MiniblockNumber(tx_details.block_number.unwrap().as_u32()); - - // And we're fetching all the transactions from this miniblock. - let block_transactions: Vec = - client.get_raw_block_transactions(miniblock).await.unwrap(); - let mut tx_to_apply = Vec::new(); - - for tx in block_transactions { - let h = tx.hash(); - let l2_tx: L2Tx = tx.try_into().unwrap(); - tx_to_apply.push(l2_tx); - - if h == replay_tx { - return tx_to_apply; - } - } - panic!( - "Cound not find tx {:?} in miniblock: {:?}", - replay_tx, miniblock - ); - } - - pub fn create_client(&self) -> HttpClient { - HttpClientBuilder::default() - .build(self.fork_url.clone()) - .expect("Unable to create a client for fork") - } -} diff --git a/core/bin/test_node/src/main.rs b/core/bin/test_node/src/main.rs deleted file mode 100644 index ba5d5be830dd..000000000000 --- a/core/bin/test_node/src/main.rs +++ /dev/null @@ -1,184 +0,0 @@ -use clap::{Parser, Subcommand}; -use fork::ForkDetails; -use zks::ZkMockNamespaceImpl; - -mod fork; -mod node; -mod utils; -mod zks; - -use node::InMemoryNode; - -use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - str::FromStr, -}; - -use tracing::Level; -use tracing_subscriber::{EnvFilter, FmtSubscriber}; - -use futures::{ - channel::oneshot, - future::{self}, - FutureExt, -}; -use jsonrpc_core::IoHandler; -use zksync_basic_types::{H160, H256}; - -use zksync_core::api_server::web3::backend_jsonrpc::namespaces::{ - eth::EthNamespaceT, zks::ZksNamespaceT, -}; - -/// List of wallets (address, private key) that we seed with tokens at start. -pub const RICH_WALLETS: [(&str, &str); 4] = [ - ( - "0x36615Cf349d7F6344891B1e7CA7C72883F5dc049", - "0x7726827caac94a7f9e1b160f7ea819f172f7b6f9d2a97f992c38edeab82d4110", - ), - ( - "0xa61464658AfeAf65CccaaFD3a512b69A83B77618", - "0xac1e735be8536c6534bb4f17f06f6afc73b2b5ba84ac2cfb12f7461b20c0bbe3", - ), - ( - "0x0D43eB5B8a47bA8900d84AA36656c92024e9772e", - "0xd293c684d884d56f8d6abd64fc76757d3664904e309a0645baf8522ab6366d9e", - ), - ( - "0xA13c10C0D5bd6f79041B9835c63f91de35A15883", - "0x850683b40d4a740aa6e745f889a6fdc8327be76e122f5aba645a5b02d0248db8", - ), -]; - -async fn build_json_http(addr: SocketAddr, node: InMemoryNode) -> tokio::task::JoinHandle<()> { - let (sender, recv) = oneshot::channel::<()>(); - - let io_handler = { - let mut io = IoHandler::new(); - io.extend_with(node.to_delegate()); - io.extend_with(ZkMockNamespaceImpl.to_delegate()); - - io - }; - - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .worker_threads(1) - .build() - .unwrap(); - - let server = jsonrpc_http_server::ServerBuilder::new(io_handler) - .threads(1) - .event_loop_executor(runtime.handle().clone()) - .start_http(&addr) - .unwrap(); - - server.wait(); - let _ = sender; - }); - - tokio::spawn(recv.map(drop)) -} - -#[derive(Debug, Parser)] -#[command(author = "Matter Labs", version, about = "Test Node", long_about = None)] -struct Cli { - #[command(subcommand)] - command: Command, - #[arg(long, default_value = "8011")] - /// Port to listen on - default: 8011 - port: u16, -} - -#[derive(Debug, Subcommand)] -enum Command { - #[command(name = "run")] - Run, - #[command(name = "fork")] - Fork(ForkArgs), - #[command(name = "replay_tx")] - ReplayTx(ReplayArgs), -} - -#[derive(Debug, Parser)] -struct ForkArgs { - /// Whether to fork from existing network. - /// If not set - will start a new network from genesis. - /// If set - will try to fork a remote network. Possible values: - /// - mainnet - /// - testnet - /// - http://XXX:YY - network: String, - #[arg(long)] - // Fork at a given L2 miniblock height. - // If not set - will use the current finalized block from the network. - fork_at: Option, -} -#[derive(Debug, Parser)] -struct ReplayArgs { - /// Whether to fork from existing network. - /// If not set - will start a new network from genesis. - /// If set - will try to fork a remote network. Possible values: - /// - mainnet - /// - testnet - /// - http://XXX:YY - network: String, - /// Transaction hash to replay. - tx: H256, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let opt = Cli::parse(); - let filter = EnvFilter::from_default_env(); - - let subscriber = FmtSubscriber::builder() - .with_max_level(Level::TRACE) - .with_env_filter(filter) - .finish(); - - // Initialize the subscriber - tracing::subscriber::set_global_default(subscriber).expect("failed to set tracing subscriber"); - - let fork_details = match &opt.command { - Command::Run => None, - Command::Fork(fork) => Some(ForkDetails::from_network(&fork.network, fork.fork_at).await), - Command::ReplayTx(replay_tx) => { - Some(ForkDetails::from_network_tx(&replay_tx.network, replay_tx.tx).await) - } - }; - - // If we're replaying the transaction, we need to sync to the previous block - // and then replay all the transactions that happened in - let transactions_to_replay = if let Command::ReplayTx(replay_tx) = &opt.command { - fork_details - .as_ref() - .unwrap() - .get_earlier_transactions_in_same_block(replay_tx.tx) - .await - } else { - vec![] - }; - - let node = InMemoryNode::new(fork_details); - - if !transactions_to_replay.is_empty() { - node.apply_txs(transactions_to_replay); - } - - println!("Setting Rich accounts:"); - for (address, private_key) in RICH_WALLETS.iter() { - node.set_rich_account(H160::from_str(address).unwrap()); - println!("Address: {:?} Key: {:?}", address, private_key) - } - - let threads = build_json_http( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), opt.port), - node, - ) - .await; - - future::select_all(vec![threads]).await.0.unwrap(); - - Ok(()) -} diff --git a/core/bin/test_node/src/node.rs b/core/bin/test_node/src/node.rs deleted file mode 100644 index 0a2777e70f09..000000000000 --- a/core/bin/test_node/src/node.rs +++ /dev/null @@ -1,657 +0,0 @@ -//! In-memory node, that supports forking other networks. -use crate::{ - fork::{ForkDetails, ForkStorage}, - utils::IntoBoxedFuture, -}; - -use std::{ - collections::HashMap, - convert::TryInto, - sync::{Arc, RwLock}, -}; - -use once_cell::sync::Lazy; - -use zksync_basic_types::{AccountTreeId, Bytes, H160, H256, U256, U64}; -use zksync_contracts::BaseSystemContracts; -use zksync_core::api_server::web3::backend_jsonrpc::namespaces::eth::EthNamespaceT; -use zksync_state::{ReadStorage, StorageView, WriteStorage}; -use zksync_types::{ - api::{TransactionReceipt, TransactionVariant}, - get_code_key, get_nonce_key, - l2::L2Tx, - transaction_request::{l2_tx_from_call_req, TransactionRequest}, - tx::tx_execution_info::TxExecutionStatus, - utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, - StorageKey, StorageLogQueryType, Transaction, ACCOUNT_CODE_STORAGE_ADDRESS, - L2_ETH_TOKEN_ADDRESS, -}; -use zksync_utils::{h256_to_account_address, h256_to_u256, h256_to_u64, u256_to_h256}; - -use vm::{ - utils::{create_test_block_params, BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, ETH_CALL_GAS_LIMIT}, - vm::VmTxExecutionResult, - vm_with_bootloader::{ - init_vm_inner, push_transaction_to_bootloader_memory, BlockContextMode, BootloaderJobType, - TxExecutionMode, - }, - HistoryEnabled, OracleTools, -}; -use zksync_web3_decl::types::{Filter, FilterChanges}; - -pub const MAX_TX_SIZE: usize = 1000000; -// Timestamp of the first block (if not running in fork mode). -pub const NON_FORK_FIRST_BLOCK_TIMESTAMP: u64 = 1000; -/// Network ID we use for the test node. -pub const TEST_NODE_NETWORK_ID: u16 = 270; - -/// Basic information about the generated block (which is block l1 batch and miniblock). -/// Currently, this test node supports exactly one transaction per block. -pub struct BlockInfo { - pub batch_number: u32, - pub block_timestamp: u64, - /// Transaction included in this block. - pub tx_hash: H256, -} - -/// Information about the executed transaction. -pub struct TxExecutionInfo { - pub tx: L2Tx, - // Batch number where transaction was executed. - pub batch_number: u32, - pub miniblock_number: u64, - pub result: VmTxExecutionResult, -} - -/// Helper struct for InMemoryNode. -pub struct InMemoryNodeInner { - /// Timestamp, batch number and miniblock number that will be used by the next block. - pub current_timestamp: u64, - pub current_batch: u32, - pub current_miniblock: u64, - // Map from transaction to details about the exeuction - pub tx_results: HashMap, - // Map from batch number to information about the block. - pub blocks: HashMap, - // Underlying storage - pub fork_storage: ForkStorage, -} - -fn not_implemented() -> jsonrpc_core::BoxFuture> { - Err(jsonrpc_core::Error::method_not_found()).into_boxed_future() -} - -/// In-memory node, that can be used for local & unit testing. -/// It also supports the option of forking testnet/mainnet. -/// All contents are removed when object is destroyed. -pub struct InMemoryNode { - inner: Arc>, -} - -pub static PLAYGROUND_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::playground); - -fn contract_address_from_tx_result(execution_result: &VmTxExecutionResult) -> Option { - for query in execution_result.result.logs.storage_logs.iter().rev() { - if query.log_type == StorageLogQueryType::InitialWrite - && query.log_query.address == ACCOUNT_CODE_STORAGE_ADDRESS - { - return Some(h256_to_account_address(&u256_to_h256(query.log_query.key))); - } - } - None -} - -impl InMemoryNode { - pub fn new(fork: Option) -> Self { - InMemoryNode { - inner: Arc::new(RwLock::new(InMemoryNodeInner { - current_timestamp: fork - .as_ref() - .map(|f| f.block_timestamp + 1) - .unwrap_or(NON_FORK_FIRST_BLOCK_TIMESTAMP), - current_batch: fork.as_ref().map(|f| f.l1_block.0 + 1).unwrap_or(1), - current_miniblock: fork.as_ref().map(|f| f.l2_miniblock + 1).unwrap_or(1), - tx_results: Default::default(), - blocks: Default::default(), - fork_storage: ForkStorage::new(fork), - })), - } - } - - /// Applies multiple transactions - but still one per L1 batch. - pub fn apply_txs(&self, txs: Vec) { - println!("Running {:?} transactions (one per batch)", txs.len()); - - for tx in txs { - println!("Executing {:?}", tx.hash()); - self.run_l2_tx(tx, TxExecutionMode::VerifyExecute); - } - } - - /// Adds a lot of tokens to a given account. - pub fn set_rich_account(&self, address: H160) { - let key = storage_key_for_eth_balance(&address); - let mut inner = self.inner.write().unwrap(); - let keys = { - let mut storage_view = StorageView::new(&inner.fork_storage); - storage_view.set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); - storage_view.modified_storage_keys().clone() - }; - - for (key, value) in keys.iter() { - inner.fork_storage.set_value(*key, *value); - } - } - - /// Runs L2 'eth call' method - that doesn't commit to a block. - fn run_l2_call(&self, l2_tx: L2Tx) -> Vec { - let execution_mode = TxExecutionMode::EthCall { - missed_storage_invocation_limit: 1000000, - }; - let (mut block_context, block_properties) = create_test_block_params(); - - let inner = self.inner.write().unwrap(); - - let mut storage_view = StorageView::new(&inner.fork_storage); - - let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); - - let bootloader_code = &PLAYGROUND_SYSTEM_CONTRACTS; - block_context.block_number = inner.current_batch; - block_context.block_timestamp = inner.current_timestamp; - - // init vm - let mut vm = init_vm_inner( - &mut oracle_tools, - BlockContextMode::NewBlock(block_context.into(), Default::default()), - &block_properties, - BLOCK_GAS_LIMIT, - bootloader_code, - execution_mode, - ); - - let tx: Transaction = l2_tx.into(); - - push_transaction_to_bootloader_memory(&mut vm, &tx, execution_mode, None); - - let vm_block_result = - vm.execute_till_block_end_with_call_tracer(BootloaderJobType::TransactionExecution); - - match vm_block_result.full_result.revert_reason { - Some(result) => result.original_data, - None => vm_block_result - .full_result - .return_data - .into_iter() - .flat_map(|val| { - let bytes: [u8; 32] = val.into(); - bytes.to_vec() - }) - .collect::>(), - } - } - - fn run_l2_tx_inner( - &self, - l2_tx: L2Tx, - execution_mode: TxExecutionMode, - ) -> ( - HashMap, - VmTxExecutionResult, - BlockInfo, - HashMap>, - ) { - let (mut block_context, block_properties) = create_test_block_params(); - - let inner = self.inner.write().unwrap(); - - let mut storage_view = StorageView::new(&inner.fork_storage); - - let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); - - let bootloader_code = if execution_mode == TxExecutionMode::VerifyExecute { - &BASE_SYSTEM_CONTRACTS - } else { - &PLAYGROUND_SYSTEM_CONTRACTS - }; - - block_context.block_number = inner.current_batch; - block_context.block_timestamp = inner.current_timestamp; - let block = BlockInfo { - batch_number: block_context.block_number, - block_timestamp: block_context.block_timestamp, - tx_hash: l2_tx.hash(), - }; - - // init vm - let mut vm = init_vm_inner( - &mut oracle_tools, - BlockContextMode::NewBlock(block_context.into(), Default::default()), - &block_properties, - BLOCK_GAS_LIMIT, - bootloader_code, - execution_mode, - ); - - let tx: Transaction = l2_tx.into(); - - push_transaction_to_bootloader_memory(&mut vm, &tx, execution_mode, None); - - let tx_result = vm.execute_next_tx(u32::MAX, true).unwrap(); - - println!( - "Tx Execution results: {:?} {:?}", - tx_result.status, tx_result.result.revert_reason - ); - - vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); - - let bytecodes = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - let modified_keys = storage_view.modified_storage_keys().clone(); - (modified_keys, tx_result, block, bytecodes) - } - - /// Runs L2 transaction and commits it to a new block. - fn run_l2_tx(&self, l2_tx: L2Tx, execution_mode: TxExecutionMode) { - let tx_hash = l2_tx.hash(); - - let (keys, result, block, bytecodes) = self.run_l2_tx_inner(l2_tx.clone(), execution_mode); - - // Write all the mutated keys (storage slots). - let mut inner = self.inner.write().unwrap(); - for (key, value) in keys.iter() { - inner.fork_storage.set_value(*key, *value); - } - - // Write all the factory deps. - for (hash, code) in bytecodes.iter() { - inner.fork_storage.store_factory_dep( - u256_to_h256(*hash), - code.iter() - .flat_map(|entry| { - let mut bytes = vec![0u8; 32]; - entry.to_big_endian(&mut bytes); - bytes.to_vec() - }) - .collect(), - ) - } - let current_miniblock = inner.current_miniblock; - inner.tx_results.insert( - tx_hash, - TxExecutionInfo { - tx: l2_tx, - batch_number: block.batch_number, - miniblock_number: current_miniblock, - result, - }, - ); - inner.blocks.insert(block.batch_number, block); - { - inner.current_timestamp += 1; - inner.current_batch += 1; - inner.current_miniblock += 1; - } - } -} - -impl EthNamespaceT for InMemoryNode { - fn chain_id(&self) -> jsonrpc_core::BoxFuture> { - let inner = self.inner.read().unwrap(); - Ok(U64::from(inner.fork_storage.chain_id.0 as u64)).into_boxed_future() - } - - fn call( - &self, - req: zksync_types::transaction_request::CallRequest, - _block: Option, - ) -> jsonrpc_core::BoxFuture> { - let mut tx = l2_tx_from_call_req(req, MAX_TX_SIZE).unwrap(); - tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); - let result = self.run_l2_call(tx); - - Ok(result.into()).into_boxed_future() - } - - fn get_balance( - &self, - address: zksync_basic_types::Address, - _block: Option, - ) -> jsonrpc_core::BoxFuture> { - let balance_key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - - let balance = self - .inner - .write() - .unwrap() - .fork_storage - .read_value(&balance_key); - - Ok(h256_to_u256(balance)).into_boxed_future() - } - - fn get_block_by_number( - &self, - block_number: zksync_types::api::BlockNumber, - _full_transactions: bool, - ) -> jsonrpc_core::BoxFuture< - jsonrpc_core::Result< - Option>, - >, - > { - // Currently we support only the 'most recent' block. - let reader = self.inner.read().unwrap(); - match block_number { - zksync_types::api::BlockNumber::Committed - | zksync_types::api::BlockNumber::Finalized - | zksync_types::api::BlockNumber::Latest => {} - zksync_types::api::BlockNumber::Earliest - | zksync_types::api::BlockNumber::Pending - | zksync_types::api::BlockNumber::Number(_) => return not_implemented(), - } - - let txn: Vec = vec![]; - - let block = zksync_types::api::Block { - transactions: txn, - hash: Default::default(), - parent_hash: Default::default(), - uncles_hash: Default::default(), - author: Default::default(), - state_root: Default::default(), - transactions_root: Default::default(), - receipts_root: Default::default(), - number: U64::from(reader.current_miniblock), - l1_batch_number: Some(U64::from(reader.current_batch)), - gas_used: Default::default(), - gas_limit: Default::default(), - base_fee_per_gas: Default::default(), - extra_data: Default::default(), - logs_bloom: Default::default(), - timestamp: Default::default(), - l1_batch_timestamp: Default::default(), - difficulty: Default::default(), - total_difficulty: Default::default(), - seal_fields: Default::default(), - uncles: Default::default(), - size: Default::default(), - mix_hash: Default::default(), - nonce: Default::default(), - }; - - Ok(Some(block)).into_boxed_future() - } - - fn get_code( - &self, - address: zksync_basic_types::Address, - _block: Option, - ) -> jsonrpc_core::BoxFuture> { - let code_key = get_code_key(&address); - - let code_hash = self - .inner - .write() - .unwrap() - .fork_storage - .read_value(&code_key); - - Ok(Bytes::from(code_hash.as_bytes())).into_boxed_future() - } - - fn get_transaction_count( - &self, - address: zksync_basic_types::Address, - _block: Option, - ) -> jsonrpc_core::BoxFuture> { - let nonce_key = get_nonce_key(&address); - - let result = self - .inner - .write() - .unwrap() - .fork_storage - .read_value(&nonce_key); - Ok(h256_to_u64(result).into()).into_boxed_future() - } - - fn get_transaction_receipt( - &self, - hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> - { - let reader = self.inner.read().unwrap(); - let tx_result = reader.tx_results.get(&hash); - - let receipt = tx_result.map(|info| { - let status = if info.result.status == TxExecutionStatus::Success { - U64::from(1) - } else { - U64::from(0) - }; - - TransactionReceipt { - transaction_hash: hash, - transaction_index: U64::from(1), - block_hash: None, - block_number: Some(U64::from(info.miniblock_number)), - l1_batch_tx_index: None, - l1_batch_number: Some(U64::from(info.batch_number as u64)), - from: Default::default(), - to: Some(info.tx.execute.contract_address), - cumulative_gas_used: Default::default(), - gas_used: Some(info.tx.common_data.fee.gas_limit - info.result.gas_refunded), - contract_address: contract_address_from_tx_result(&info.result), - logs: vec![], - l2_to_l1_logs: vec![], - status: Some(status), - root: None, - logs_bloom: Default::default(), - transaction_type: None, - effective_gas_price: Some(500.into()), - } - }); - - Ok(receipt).into_boxed_future() - } - - fn send_raw_transaction( - &self, - tx_bytes: zksync_basic_types::Bytes, - ) -> jsonrpc_core::BoxFuture> { - let chain_id = TEST_NODE_NETWORK_ID; - let (tx_req, hash) = - TransactionRequest::from_bytes(&tx_bytes.0, chain_id, MAX_TX_SIZE).unwrap(); - - let mut l2_tx: L2Tx = tx_req.try_into().unwrap(); - l2_tx.set_input(tx_bytes.0, hash); - assert_eq!(hash, l2_tx.hash()); - - self.run_l2_tx(l2_tx, TxExecutionMode::VerifyExecute); - - Ok(hash).into_boxed_future() - } - - // Methods below are not currently implemented. - - fn get_block_number( - &self, - ) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn estimate_gas( - &self, - _req: zksync_types::transaction_request::CallRequest, - _block: Option, - ) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn gas_price(&self) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn new_filter(&self, _filter: Filter) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn new_block_filter(&self) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn uninstall_filter(&self, _idx: U256) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn new_pending_transaction_filter( - &self, - ) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn get_logs( - &self, - _filter: Filter, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn get_filter_logs( - &self, - _filter_index: U256, - ) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn get_filter_changes( - &self, - _filter_index: U256, - ) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn get_block_by_hash( - &self, - _hash: zksync_basic_types::H256, - _full_transactions: bool, - ) -> jsonrpc_core::BoxFuture< - jsonrpc_core::Result< - Option>, - >, - > { - not_implemented() - } - - fn get_block_transaction_count_by_number( - &self, - _block_number: zksync_types::api::BlockNumber, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn get_block_transaction_count_by_hash( - &self, - _block_hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn get_storage( - &self, - _address: zksync_basic_types::Address, - _idx: U256, - _block: Option, - ) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn get_transaction_by_hash( - &self, - _hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn get_transaction_by_block_hash_and_index( - &self, - _block_hash: zksync_basic_types::H256, - _index: zksync_basic_types::web3::types::Index, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn get_transaction_by_block_number_and_index( - &self, - _block_number: zksync_types::api::BlockNumber, - _index: zksync_basic_types::web3::types::Index, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn protocol_version(&self) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn syncing( - &self, - ) -> jsonrpc_core::BoxFuture> - { - not_implemented() - } - - fn accounts( - &self, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn coinbase( - &self, - ) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn compilers(&self) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn hashrate(&self) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn get_uncle_count_by_block_hash( - &self, - _hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn get_uncle_count_by_block_number( - &self, - _number: zksync_types::api::BlockNumber, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented() - } - - fn mining(&self) -> jsonrpc_core::BoxFuture> { - not_implemented() - } - - fn send_transaction( - &self, - _transaction_request: zksync_types::web3::types::TransactionRequest, - ) -> jsonrpc_core::BoxFuture> { - not_implemented() - } -} diff --git a/core/bin/test_node/src/utils.rs b/core/bin/test_node/src/utils.rs deleted file mode 100644 index 76f5017eeab7..000000000000 --- a/core/bin/test_node/src/utils.rs +++ /dev/null @@ -1,16 +0,0 @@ -use std::pin::Pin; - -use futures::Future; - -pub(crate) trait IntoBoxedFuture: Sized + Send + 'static { - fn into_boxed_future(self) -> Pin + Send>> { - Box::pin(async { self }) - } -} - -impl IntoBoxedFuture for Result -where - T: Send + 'static, - U: Send + 'static, -{ -} diff --git a/core/bin/test_node/src/zks.rs b/core/bin/test_node/src/zks.rs deleted file mode 100644 index c1f446073a92..000000000000 --- a/core/bin/test_node/src/zks.rs +++ /dev/null @@ -1,173 +0,0 @@ -use bigdecimal::BigDecimal; -use zksync_basic_types::{MiniblockNumber, U256}; -use zksync_core::api_server::web3::backend_jsonrpc::namespaces::zks::ZksNamespaceT; -use zksync_types::api::BridgeAddresses; - -/// Mock implementation of ZksNamespace - used only in the test node. -pub struct ZkMockNamespaceImpl; - -macro_rules! not_implemented { - () => { - Box::pin(async move { Err(jsonrpc_core::Error::method_not_found()) }) - }; -} -impl ZksNamespaceT for ZkMockNamespaceImpl { - /// We have to support this method, as zksync foundry depends on it. - /// For now, returning a fake amount of gas. - fn estimate_fee( - &self, - _req: zksync_types::transaction_request::CallRequest, - ) -> jsonrpc_core::BoxFuture> { - Box::pin(async move { - Ok(zksync_types::fee::Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }) - }) - } - - fn get_raw_block_transactions( - &self, - _block_number: MiniblockNumber, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented!() - } - - fn estimate_gas_l1_to_l2( - &self, - _req: zksync_types::transaction_request::CallRequest, - ) -> jsonrpc_core::BoxFuture> { - not_implemented!() - } - - fn get_main_contract( - &self, - ) -> jsonrpc_core::BoxFuture> { - not_implemented!() - } - - fn get_testnet_paymaster( - &self, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented!() - } - - fn get_bridge_contracts( - &self, - ) -> jsonrpc_core::BoxFuture> { - not_implemented!() - } - - fn l1_chain_id( - &self, - ) -> jsonrpc_core::BoxFuture> { - not_implemented!() - } - - fn get_confirmed_tokens( - &self, - _from: u32, - _limit: u8, - ) -> jsonrpc_core::BoxFuture>> { - not_implemented!() - } - - fn get_token_price( - &self, - _token_address: zksync_basic_types::Address, - ) -> jsonrpc_core::BoxFuture> { - not_implemented!() - } - - fn get_all_account_balances( - &self, - _address: zksync_basic_types::Address, - ) -> jsonrpc_core::BoxFuture< - jsonrpc_core::Result>, - > { - not_implemented!() - } - - fn get_l2_to_l1_msg_proof( - &self, - _block: zksync_basic_types::MiniblockNumber, - _sender: zksync_basic_types::Address, - _msg: zksync_basic_types::H256, - _l2_log_position: Option, - ) -> jsonrpc_core::BoxFuture>> - { - not_implemented!() - } - - fn get_l2_to_l1_log_proof( - &self, - _tx_hash: zksync_basic_types::H256, - _index: Option, - ) -> jsonrpc_core::BoxFuture>> - { - not_implemented!() - } - - fn get_l1_batch_number( - &self, - ) -> jsonrpc_core::BoxFuture> { - not_implemented!() - } - - fn get_block_details( - &self, - _block_number: zksync_basic_types::MiniblockNumber, - ) -> jsonrpc_core::BoxFuture< - jsonrpc_core::Result>, - > { - not_implemented!() - } - - fn get_miniblock_range( - &self, - _batch: zksync_basic_types::L1BatchNumber, - ) -> jsonrpc_core::BoxFuture< - jsonrpc_core::Result>, - > { - not_implemented!() - } - - fn set_known_bytecode( - &self, - _bytecode: zksync_basic_types::Bytes, - ) -> jsonrpc_core::BoxFuture> { - not_implemented!() - } - - fn get_transaction_details( - &self, - _hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> - { - not_implemented!() - } - - fn get_l1_batch_details( - &self, - _batch: zksync_basic_types::L1BatchNumber, - ) -> jsonrpc_core::BoxFuture< - jsonrpc_core::Result>, - > { - not_implemented!() - } - - fn get_bytecode_by_hash( - &self, - _hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>>> { - not_implemented!() - } - - fn get_l1_gas_price( - &self, - ) -> jsonrpc_core::BoxFuture> { - not_implemented!() - } -} diff --git a/core/bin/verification_key_generator_and_server/Cargo.toml b/core/bin/verification_key_generator_and_server/Cargo.toml index 2189bc408c62..837e52898765 100644 --- a/core/bin/verification_key_generator_and_server/Cargo.toml +++ b/core/bin/verification_key_generator_and_server/Cargo.toml @@ -21,6 +21,7 @@ path = "src/commitment_generator.rs" [dependencies] zksync_types = {path = "../../lib/types", version = "1.0" } +zksync_prover_utils = {path = "../../lib/prover_utils", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } circuit_testing = {git = "https://github.com/matter-labs/era-circuit_testing.git", branch = "main"} itertools = "0.10.5" @@ -30,4 +31,4 @@ serde_json = "1.0.85" hex = "0.4.3" structopt = "0.3.26" ff = { package = "ff_ce", version = "0.14.1" } -toml_edit = "0.14.4" +once_cell = "1.8.0" diff --git a/core/bin/verification_key_generator_and_server/README.md b/core/bin/verification_key_generator_and_server/README.md index a3f9d6f7c768..a3e5e3240c97 100644 --- a/core/bin/verification_key_generator_and_server/README.md +++ b/core/bin/verification_key_generator_and_server/README.md @@ -23,7 +23,7 @@ This is the library that can be used by other components to fetch the verificati The main binary that generates verification key for given circuits. Most of the heavy lifting is done by the `create_vk_for_padding_size_log_2` method from circuit_testing repo. -The results are writte to the `verification_XX_key.json` files in the current repository. +The results are written to the `verification_XX_key.json` files in the current repository. ## zksync_json_to_binary_vk_converter diff --git a/core/bin/verification_key_generator_and_server/src/commitment_generator.rs b/core/bin/verification_key_generator_and_server/src/commitment_generator.rs index 6b107bbf6fc0..e9a94a7566f2 100644 --- a/core/bin/verification_key_generator_and_server/src/commitment_generator.rs +++ b/core/bin/verification_key_generator_and_server/src/commitment_generator.rs @@ -1,12 +1,7 @@ -use ff::to_hex; -use std::fs; -use toml_edit::{Document, Item, Value}; -use zksync_types::circuit::{LEAF_CIRCUIT_INDEX, NODE_CIRCUIT_INDEX}; -use zksync_types::zkevm_test_harness::witness; -use zksync_types::zkevm_test_harness::witness::recursive_aggregation::erase_vk_type; -use zksync_verification_key_server::{ - get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, +use zksync_prover_utils::vk_commitment_helper::{ + get_toml_formatted_value, read_contract_toml, write_contract_toml, }; +use zksync_verification_key_server::generate_commitments; fn main() { vlog::info!("Starting commitment generation!"); @@ -20,79 +15,16 @@ fn read_and_update_contract_toml() { leaf_aggregation_commitment_hex, node_aggregation_commitment_hex, ) = generate_commitments(); - contract_doc["contracts"]["VK_COMMITMENT_BASIC_CIRCUITS"] = + contract_doc["contracts"]["RECURSION_CIRCUITS_SET_VKS_HASH"] = get_toml_formatted_value(basic_circuit_commitment_hex); - contract_doc["contracts"]["VK_COMMITMENT_LEAF"] = + contract_doc["contracts"]["RECURSION_LEAF_LEVEL_VK_HASH"] = get_toml_formatted_value(leaf_aggregation_commitment_hex); - contract_doc["contracts"]["VK_COMMITMENT_NODE"] = + contract_doc["contracts"]["RECURSION_NODE_LEVEL_VK_HASH"] = get_toml_formatted_value(node_aggregation_commitment_hex); vlog::info!("Updated toml content: {:?}", contract_doc.to_string()); write_contract_toml(contract_doc); } -fn get_toml_formatted_value(string_value: String) -> Item { - let mut value = Value::from(string_value); - value.decor_mut().set_prefix(""); - Item::Value(value) -} - -fn write_contract_toml(contract_doc: Document) { - let path = get_contract_toml_path(); - fs::write(path, contract_doc.to_string()).expect("Failed writing to contract.toml file"); -} - -fn read_contract_toml() -> Document { - let path = get_contract_toml_path(); - let toml_data = std::fs::read_to_string(path.clone()) - .unwrap_or_else(|_| panic!("contract.toml file does not exist on path {}", path)); - toml_data.parse::().expect("invalid config file") -} - -fn get_contract_toml_path() -> String { - let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| "/".into()); - format!("{}/etc/env/base/contracts.toml", zksync_home) -} - -fn generate_commitments() -> (String, String, String) { - let (_, basic_circuit_commitment, _) = - witness::recursive_aggregation::form_base_circuits_committment(get_vks_for_commitment( - get_vks_for_basic_circuits(), - )); - - let leaf_aggregation_vk = get_vk_for_circuit_type(LEAF_CIRCUIT_INDEX); - let node_aggregation_vk = get_vk_for_circuit_type(NODE_CIRCUIT_INDEX); - - let (_, leaf_aggregation_vk_commitment) = - witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( - leaf_aggregation_vk, - )); - - let (_, node_aggregation_vk_commitment) = - witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( - node_aggregation_vk, - )); - let basic_circuit_commitment_hex = format!("0x{}", to_hex(&basic_circuit_commitment)); - let leaf_aggregation_commitment_hex = format!("0x{}", to_hex(&leaf_aggregation_vk_commitment)); - let node_aggregation_commitment_hex = format!("0x{}", to_hex(&node_aggregation_vk_commitment)); - vlog::info!( - "basic circuit commitment {:?}", - basic_circuit_commitment_hex - ); - vlog::info!( - "leaf aggregation commitment {:?}", - leaf_aggregation_commitment_hex - ); - vlog::info!( - "node aggregation commitment {:?}", - node_aggregation_commitment_hex - ); - ( - basic_circuit_commitment_hex, - leaf_aggregation_commitment_hex, - node_aggregation_commitment_hex, - ) -} - #[cfg(test)] mod test { use super::*; diff --git a/core/bin/verification_key_generator_and_server/src/lib.rs b/core/bin/verification_key_generator_and_server/src/lib.rs index ee888cdb2cf8..5ba186a422aa 100644 --- a/core/bin/verification_key_generator_and_server/src/lib.rs +++ b/core/bin/verification_key_generator_and_server/src/lib.rs @@ -1,21 +1,37 @@ +use ff::to_hex; +use once_cell::sync::Lazy; use std::collections::HashMap; use std::path::Path; +use std::str::FromStr; use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::setup::VerificationKey; use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; use itertools::Itertools; +use structopt::lazy_static::lazy_static; +use zksync_types::circuit::SCHEDULER_CIRCUIT_INDEX; use zksync_types::circuit::{ - GEOMETRY_CONFIG, LEAF_SPLITTING_FACTOR, NODE_SPLITTING_FACTOR, SCHEDULER_UPPER_BOUND, + GEOMETRY_CONFIG, LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, + NODE_SPLITTING_FACTOR, SCHEDULER_UPPER_BOUND, }; +use zksync_types::protocol_version::{L1VerifierConfig, VerifierParams}; +use zksync_types::vk_transform::generate_vk_commitment; +use zksync_types::zkevm_test_harness::witness; use zksync_types::zkevm_test_harness::witness::full_block_artifact::BlockBasicCircuits; -use zksync_types::zkevm_test_harness::witness::recursive_aggregation::padding_aggregations; +use zksync_types::zkevm_test_harness::witness::recursive_aggregation::{ + erase_vk_type, padding_aggregations, +}; use zksync_types::zkevm_test_harness::witness::vk_set_generator::circuits_for_vk_generation; +use zksync_types::H256; #[cfg(test)] mod tests; +lazy_static! { + static ref COMMITMENTS: Lazy = Lazy::new(|| { circuit_commitments() }); +} + pub fn get_vks_for_basic_circuits( ) -> HashMap>>> { // 3-17 are the ids of basic circuits @@ -112,3 +128,61 @@ fn get_file_path(circuit_type: u8) -> String { zksync_home, circuit_type ) } + +pub fn generate_commitments() -> (String, String, String) { + let (_, basic_circuit_commitment, _) = + witness::recursive_aggregation::form_base_circuits_committment(get_vks_for_commitment( + get_vks_for_basic_circuits(), + )); + + let leaf_aggregation_vk = get_vk_for_circuit_type(LEAF_CIRCUIT_INDEX); + let node_aggregation_vk = get_vk_for_circuit_type(NODE_CIRCUIT_INDEX); + + let (_, leaf_aggregation_vk_commitment) = + witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( + leaf_aggregation_vk, + )); + + let (_, node_aggregation_vk_commitment) = + witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( + node_aggregation_vk, + )); + let basic_circuit_commitment_hex = format!("0x{}", to_hex(&basic_circuit_commitment)); + let leaf_aggregation_commitment_hex = format!("0x{}", to_hex(&leaf_aggregation_vk_commitment)); + let node_aggregation_commitment_hex = format!("0x{}", to_hex(&node_aggregation_vk_commitment)); + vlog::info!( + "basic circuit commitment {:?}", + basic_circuit_commitment_hex + ); + vlog::info!( + "leaf aggregation commitment {:?}", + leaf_aggregation_commitment_hex + ); + vlog::info!( + "node aggregation commitment {:?}", + node_aggregation_commitment_hex + ); + ( + basic_circuit_commitment_hex, + leaf_aggregation_commitment_hex, + node_aggregation_commitment_hex, + ) +} + +fn circuit_commitments() -> L1VerifierConfig { + let (basic, leaf, node) = generate_commitments(); + let scheduler = generate_vk_commitment(get_vk_for_circuit_type(SCHEDULER_CIRCUIT_INDEX)); + L1VerifierConfig { + params: VerifierParams { + recursion_node_level_vk_hash: H256::from_str(&node).expect("invalid node commitment"), + recursion_leaf_level_vk_hash: H256::from_str(&leaf).expect("invalid leaf commitment"), + recursion_circuits_set_vks_hash: H256::from_str(&basic) + .expect("invalid basic commitment"), + }, + recursion_scheduler_level_vk_hash: scheduler, + } +} + +pub fn get_cached_commitments() -> L1VerifierConfig { + **COMMITMENTS +} diff --git a/core/bin/vk_setup_data_generator_server_fri/README.md b/core/bin/vk_setup_data_generator_server_fri/README.md deleted file mode 100644 index dd095531ebe4..000000000000 --- a/core/bin/vk_setup_data_generator_server_fri/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Setup data - -## generating setup-data for specific circuit type - -`cargo +nightly-2023-05-31 run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` diff --git a/core/bin/vk_setup_data_generator_server_fri/src/lib.rs b/core/bin/vk_setup_data_generator_server_fri/src/lib.rs deleted file mode 100644 index 106d4e21af68..000000000000 --- a/core/bin/vk_setup_data_generator_server_fri/src/lib.rs +++ /dev/null @@ -1,216 +0,0 @@ -#![feature(generic_const_exprs)] -use std::fs::File; -use std::io::Read; - -use circuit_definitions::boojum::cs::implementations::hints::{ - DenseVariablesCopyHint, DenseWitnessCopyHint, -}; -use circuit_definitions::boojum::cs::implementations::polynomial_storage::{ - SetupBaseStorage, SetupStorage, -}; -use circuit_definitions::boojum::cs::implementations::setup::FinalizationHintsForProver; -use circuit_definitions::boojum::cs::implementations::verifier::VerificationKey; -use circuit_definitions::boojum::cs::oracle::merkle_tree::MerkleTreeWithCap; -use circuit_definitions::boojum::cs::oracle::TreeHasher; -use circuit_definitions::boojum::field::{PrimeField, SmallField}; - -use circuit_definitions::boojum::field::traits::field_like::PrimeFieldLikeVectorized; - -use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerVerificationKey; -use circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, -}; -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; -use zksync_config::configs::FriProverConfig; -use zksync_types::proofs::AggregationRound; - -pub mod in_memory_setup_data_source; -pub mod utils; - -#[derive(Debug, Serialize, Deserialize)] -#[serde( - bound = "F: serde::Serialize + serde::de::DeserializeOwned, P: serde::Serialize + serde::de::DeserializeOwned" -)] -pub struct ProverSetupData< - F: PrimeField + SmallField, - P: PrimeFieldLikeVectorized, - H: TreeHasher, -> { - pub setup_base: SetupBaseStorage, - pub setup: SetupStorage, - #[serde(bound( - serialize = "H::Output: serde::Serialize", - deserialize = "H::Output: serde::de::DeserializeOwned" - ))] - pub vk: VerificationKey, - #[serde(bound( - serialize = "H::Output: serde::Serialize", - deserialize = "H::Output: serde::de::DeserializeOwned" - ))] - pub setup_tree: MerkleTreeWithCap, - pub vars_hint: DenseVariablesCopyHint, - pub wits_hint: DenseWitnessCopyHint, - pub finalization_hint: FinalizationHintsForProver, -} - -enum ProverServiceDataType { - VerificationKey, - SetupData, -} - -#[derive(Debug, Clone, Eq, PartialEq, Hash)] -pub struct ProverServiceDataKey { - pub circuit_id: u8, - pub round: AggregationRound, -} - -impl ProverServiceDataKey { - pub fn new(circuit_id: u8, round: AggregationRound) -> Self { - Self { circuit_id, round } - } -} - -pub fn get_base_path() -> String { - let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| "/".into()); - format!( - "{}/core/bin/vk_setup_data_generator_server_fri/data", - zksync_home - ) -} - -pub fn get_base_vk_path() -> String { - let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| "/".into()); - format!( - "{}/core/bin/vk_setup_data_generator_server_fri/data", - zksync_home - ) -} - -fn get_file_path(key: ProverServiceDataKey, service_data_type: ProverServiceDataType) -> String { - let name = match key.round { - AggregationRound::BasicCircuits => { - format!("basic_{}", key.circuit_id) - } - AggregationRound::LeafAggregation => { - format!("leaf_{}", key.circuit_id) - } - AggregationRound::NodeAggregation => "node".to_string(), - AggregationRound::Scheduler => "scheduler".to_string(), - }; - match service_data_type { - ProverServiceDataType::VerificationKey => { - format!("{}/verification_{}_key.json", get_base_vk_path(), name) - } - ProverServiceDataType::SetupData => { - format!( - "{}/setup_{}_data.bin", - FriProverConfig::from_env().setup_data_path, - name - ) - } - } -} - -pub fn get_base_layer_vk_for_circuit_type(circuit_type: u8) -> ZkSyncBaseLayerVerificationKey { - let filepath = get_file_path( - ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits), - ProverServiceDataType::VerificationKey, - ); - vlog::info!("Fetching verification key from path: {}", filepath); - let text = std::fs::read_to_string(&filepath) - .unwrap_or_else(|_| panic!("Failed reading verification key from path: {}", filepath)); - serde_json::from_str::(&text).unwrap_or_else(|_| { - panic!( - "Failed deserializing verification key from path: {}", - filepath - ) - }) -} - -pub fn get_recursive_layer_vk_for_circuit_type( - circuit_type: u8, -) -> ZkSyncRecursionLayerVerificationKey { - let round = get_round_for_recursive_circuit_type(circuit_type); - let filepath = get_file_path( - ProverServiceDataKey::new(circuit_type, round), - ProverServiceDataType::VerificationKey, - ); - vlog::info!("Fetching verification key from path: {}", filepath); - let text = std::fs::read_to_string(&filepath) - .unwrap_or_else(|_| panic!("Failed reading verification key from path: {}", filepath)); - serde_json::from_str::(&text).unwrap_or_else(|_| { - panic!( - "Failed deserializing verification key from path: {}", - filepath - ) - }) -} - -pub fn get_round_for_recursive_circuit_type(circuit_type: u8) -> AggregationRound { - match circuit_type { - circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8 => { - AggregationRound::Scheduler - } - circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8 => { - AggregationRound::NodeAggregation - } - _ => AggregationRound::LeafAggregation, - } -} - -pub fn save_base_layer_vk(vk: ZkSyncBaseLayerVerificationKey) { - let circuit_type = vk.numeric_circuit_type(); - let filepath = get_file_path( - ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits), - ProverServiceDataType::VerificationKey, - ); - vlog::info!("saving basic verification key to: {}", filepath); - std::fs::write(filepath, serde_json::to_string_pretty(&vk).unwrap()).unwrap(); -} - -pub fn save_recursive_layer_vk(vk: ZkSyncRecursionLayerVerificationKey) { - let circuit_type = vk.numeric_circuit_type(); - let round = get_round_for_recursive_circuit_type(circuit_type); - let filepath = get_file_path( - ProverServiceDataKey::new(circuit_type, round), - ProverServiceDataType::VerificationKey, - ); - vlog::info!("saving recursive layer verification key to: {}", filepath); - std::fs::write(filepath, serde_json::to_string_pretty(&vk).unwrap()).unwrap(); -} - -pub fn get_setup_data_for_circuit_type( - key: ProverServiceDataKey, -) -> ProverSetupData -where - F: PrimeField + SmallField + Serialize + DeserializeOwned, - P: PrimeFieldLikeVectorized + Serialize + DeserializeOwned, - H: TreeHasher, - >::Output: Serialize + DeserializeOwned, -{ - let filepath = get_file_path(key.clone(), ProverServiceDataType::SetupData); - let mut file = File::open(filepath.clone()) - .unwrap_or_else(|_| panic!("Failed reading setup-data from path: {:?}", filepath)); - let mut buffer = Vec::new(); - file.read_to_end(&mut buffer).unwrap_or_else(|_| { - panic!( - "Failed reading setup-data to buffer from path: {:?}", - filepath - ) - }); - vlog::info!("loading {:?} setup data from path: {}", key, filepath); - bincode::deserialize::>(&buffer).unwrap_or_else(|_| { - panic!( - "Failed deserializing setup-data at path: {:?} for circuit: {:?}", - filepath, key - ) - }) -} - -pub fn save_setup_data(key: ProverServiceDataKey, serialized_setup_data: &Vec) { - let filepath = get_file_path(key.clone(), ProverServiceDataType::SetupData); - vlog::info!("saving {:?} setup data to: {}", key, filepath); - std::fs::write(filepath.clone(), serialized_setup_data) - .unwrap_or_else(|_| panic!("Failed saving setup-data at path: {:?}", filepath)); -} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/main.rs b/core/bin/vk_setup_data_generator_server_fri/src/main.rs deleted file mode 100644 index 589af0504cb2..000000000000 --- a/core/bin/vk_setup_data_generator_server_fri/src/main.rs +++ /dev/null @@ -1,54 +0,0 @@ -#![feature(generic_const_exprs)] - -use crate::in_memory_setup_data_source::InMemoryDataSource; -use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; -use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; -use zkevm_test_harness::compute_setups::{ - generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs, -}; -use zkevm_test_harness::data_source::SetupDataSource; -use zksync_vk_setup_data_server_fri::{save_base_layer_vk, save_recursive_layer_vk}; - -mod in_memory_setup_data_source; -mod vk_generator; - -fn save_vks(source: &dyn SetupDataSource) { - for base_circuit_type in - (BaseLayerCircuitType::VM as u8)..=(BaseLayerCircuitType::L1MessagesHasher as u8) - { - let vk = source - .get_base_layer_vk(base_circuit_type) - .unwrap_or_else(|_| panic!("No vk exist for circuit type: {}", base_circuit_type)); - save_base_layer_vk(vk); - } - for leaf_circuit_type in (ZkSyncRecursionLayerStorageType::LeafLayerCircuitForMainVM as u8) - ..=(ZkSyncRecursionLayerStorageType::LeafLayerCircuitForL1MessagesHasher as u8) - { - let vk = source - .get_recursion_layer_vk(leaf_circuit_type) - .unwrap_or_else(|_| panic!("No vk exist for circuit type: {}", leaf_circuit_type)); - save_recursive_layer_vk(vk); - } - save_recursive_layer_vk( - source - .get_recursion_layer_node_vk() - .expect("No vk exist for node layer circuit"), - ); - save_recursive_layer_vk( - source - .get_recursion_layer_vk(ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8) - .expect("No vk exist for scheduler circuit"), - ); -} - -fn generate_vks() { - let mut in_memory_source = InMemoryDataSource::new(); - generate_base_layer_vks_and_proofs(&mut in_memory_source).expect("Failed generating base vk's"); - generate_recursive_layer_vks_and_proofs(&mut in_memory_source) - .expect("Failed generating recursive vk's"); - save_vks(&in_memory_source); -} - -fn main() { - generate_vks(); -} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/core/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs deleted file mode 100644 index d2e0858ea323..000000000000 --- a/core/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs +++ /dev/null @@ -1,137 +0,0 @@ -use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use circuit_definitions::boojum::worker::Worker; -use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; -use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; -use circuit_definitions::{ - ZkSyncDefaultRoundFunction, BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, -}; -use structopt::StructOpt; -use zkevm_test_harness::geometry_config::get_geometry_config; -use zkevm_test_harness::prover_utils::{ - create_base_layer_setup_data, create_recursive_layer_setup_data, -}; -use zksync_types::proofs::AggregationRound; -use zksync_vk_setup_data_server_fri::utils::{ - get_basic_circuits, get_leaf_circuits, get_node_circuit, get_scheduler_circuit, CYCLE_LIMIT, -}; -use zksync_vk_setup_data_server_fri::{ - get_round_for_recursive_circuit_type, save_setup_data, ProverServiceDataKey, ProverSetupData, -}; - -#[derive(Debug, StructOpt)] -#[structopt( - name = "Generate setup data for individual circuit", - about = "Tool for generating setup data for individual circuit" -)] -struct Opt { - /// Numeric circuit type valid value are - /// 1. for base layer [1-13]. - /// 2. for recursive layer [1-15]. - #[structopt(long)] - numeric_circuit: u8, - /// Boolean representing whether to generate for base layer or for recursive layer. - #[structopt(short = "b", long = "is_base_layer")] - is_base_layer: bool, -} - -fn main() { - let opt = Opt::from_args(); - match opt.is_base_layer { - true => { - let circuit = get_base_layer_circuit(opt.numeric_circuit); - generate_base_layer_setup_data(circuit); - } - false => { - let circuit = get_recursive_circuit(opt.numeric_circuit); - generate_recursive_layer_setup_data(circuit); - } - } -} - -fn get_base_layer_circuit( - id: u8, -) -> ZkSyncBaseLayerCircuit< - GoldilocksField, - VmWitnessOracle, - ZkSyncDefaultRoundFunction, -> { - get_basic_circuits(CYCLE_LIMIT, get_geometry_config()) - .into_iter() - .find(|circuit| id == circuit.numeric_circuit_type()) - .unwrap_or_else(|| panic!("No basic circuit found for id: {}", id)) -} - -fn get_recursive_circuit(id: u8) -> ZkSyncRecursiveLayerCircuit { - let mut recursive_circuits = get_leaf_circuits(); - recursive_circuits.push(get_node_circuit()); - recursive_circuits.push(get_scheduler_circuit()); - recursive_circuits - .into_iter() - .find(|circuit| id == circuit.numeric_circuit_type()) - .unwrap_or_else(|| panic!("No recursive circuit found for id: {}", id)) -} - -fn generate_recursive_layer_setup_data(circuit: ZkSyncRecursiveLayerCircuit) { - let circuit_type = circuit.numeric_circuit_type(); - vlog::info!( - "starting setup data generator for recursive layer circuit: {}.", - circuit_type - ); - let worker = Worker::new(); - let (setup_base, setup, vk, setup_tree, vars_hint, wits_hint, finalization_hint) = - create_recursive_layer_setup_data( - circuit.clone(), - &worker, - BASE_LAYER_FRI_LDE_FACTOR, - BASE_LAYER_CAP_SIZE, - ); - let prover_setup_data = ProverSetupData { - setup_base, - setup, - vk: vk.clone(), - setup_tree, - vars_hint, - wits_hint, - finalization_hint, - }; - let serialized = bincode::serialize(&prover_setup_data).expect("Failed serializing setup data"); - let round = get_round_for_recursive_circuit_type(circuit_type); - save_setup_data(ProverServiceDataKey::new(circuit_type, round), &serialized); -} - -fn generate_base_layer_setup_data( - circuit: ZkSyncBaseLayerCircuit< - GoldilocksField, - VmWitnessOracle, - ZkSyncDefaultRoundFunction, - >, -) { - let circuit_type = circuit.numeric_circuit_type(); - vlog::info!( - "starting setup data generator for base layer circuit: {}.", - circuit_type - ); - let worker = Worker::new(); - let (setup_base, setup, vk, setup_tree, vars_hint, wits_hint, finalization_hint) = - create_base_layer_setup_data( - circuit.clone(), - &worker, - BASE_LAYER_FRI_LDE_FACTOR, - BASE_LAYER_CAP_SIZE, - ); - let prover_setup_data = ProverSetupData { - setup_base, - setup, - vk: vk.clone(), - setup_tree, - vars_hint, - wits_hint, - finalization_hint, - }; - let serialized = bincode::serialize(&prover_setup_data).expect("Failed serializing setup data"); - save_setup_data( - ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits), - &serialized, - ); -} diff --git a/core/bin/zksync_core/Cargo.toml b/core/bin/zksync_core/Cargo.toml index 4b4e12961046..226a77035cc6 100644 --- a/core/bin/zksync_core/Cargo.toml +++ b/core/bin/zksync_core/Cargo.toml @@ -36,6 +36,8 @@ zksync_object_store = { path = "../../lib/object_store", version = "1.0" } zksync_health_check = { path = "../../lib/health_check", version = "0.1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } +multivm = { path = "../../lib/multivm", version = "0.1.0" } + clap = { version = "4.2.4", features = ["derive"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" @@ -60,13 +62,13 @@ jsonrpc-ws-server = { git = "https://github.com/matter-labs/jsonrpc.git", branch jsonrpc-derive = { git = "https://github.com/matter-labs/jsonrpc.git", branch = "master" } jsonrpc-pubsub = { git = "https://github.com/matter-labs/jsonrpc.git", branch = "master" } num = { version = "0.3.1", features = ["serde"] } -bigdecimal = { version = "=0.2.0", features = ["serde"] } +bigdecimal = { version = "0.2.2", features = ["serde"] } reqwest = { version = "0.11", features = ["blocking", "json"] } hex = "0.4" governor = "0.4.2" -hyper = "0.14.26" tower-http = { version = "0.4.1", features = ["full"] } tower = { version = "0.4.13", features = ["full"] } +axum = { version = "0.6.19", default-features = false, features = ["http1", "json", "tokio"] } actix-rt = "2.2.0" actix-cors = "0.6.0-beta.2" @@ -74,12 +76,12 @@ actix-web = "4.0.0-beta.8" tracing = "0.1.26" +[target.'cfg(not(target_env = "msvc"))'.dependencies] +tikv-jemallocator = "0.5" + [dev-dependencies] db_test_macro = { path = "../../lib/db_test_macro", version = "0.1.0" } assert_matches = "1.5" once_cell = "1.7" tempfile = "3.0.2" - -[features] -openzeppelin_tests = [] diff --git a/core/bin/zksync_core/src/api_server/contract_verification/api_decl.rs b/core/bin/zksync_core/src/api_server/contract_verification/api_decl.rs new file mode 100644 index 000000000000..1b7d07b42767 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/contract_verification/api_decl.rs @@ -0,0 +1,52 @@ +use actix_web::web; + +use zksync_dal::connection::ConnectionPool; + +#[derive(Debug, Clone)] +pub struct RestApi { + pub(super) master_connection_pool: ConnectionPool, + pub(super) replica_connection_pool: ConnectionPool, +} + +impl RestApi { + pub fn new( + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, + ) -> Self { + Self { + master_connection_pool, + replica_connection_pool, + } + } + + /// Creates an actix-web `Scope`, which can be mounted to the Http server. + pub fn into_scope(self) -> actix_web::Scope { + web::scope("") + .app_data(web::Data::new(self)) + .route("/contract_verification", web::post().to(Self::verification)) + .route( + "/contract_verification/zksolc_versions", + web::get().to(Self::zksolc_versions), + ) + .route( + "/contract_verification/solc_versions", + web::get().to(Self::solc_versions), + ) + .route( + "/contract_verification/zkvyper_versions", + web::get().to(Self::zkvyper_versions), + ) + .route( + "/contract_verification/vyper_versions", + web::get().to(Self::vyper_versions), + ) + .route( + "/contract_verification/{id}", + web::get().to(Self::verification_request_status), + ) + .route( + "/contract_verification/info/{address}", + web::get().to(Self::verification_info), + ) + } +} diff --git a/core/bin/zksync_core/src/api_server/contract_verification/api_impl.rs b/core/bin/zksync_core/src/api_server/contract_verification/api_impl.rs new file mode 100644 index 000000000000..ef1f8ba897b6 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/contract_verification/api_impl.rs @@ -0,0 +1,184 @@ +use std::time::Instant; + +use actix_web::{ + web::{self, Json}, + HttpResponse, Result as ActixResult, +}; +use serde::Serialize; + +use zksync_types::{contract_verification_api::VerificationIncomingRequest, Address}; + +use super::api_decl::RestApi; + +fn ok_json(data: impl Serialize) -> ActixResult { + Ok(HttpResponse::Ok().json(data)) +} + +impl RestApi { + #[tracing::instrument(skip(query))] + fn validate_contract_verification_query( + query: &VerificationIncomingRequest, + ) -> Result<(), HttpResponse> { + if query.source_code_data.compiler_type() != query.compiler_versions.compiler_type() { + return Err(HttpResponse::BadRequest().body("incorrect compiler versions")); + } + + Ok(()) + } + + /// Add a contract verification job to the queue if the requested contract wasn't previously verified. + #[tracing::instrument(skip(self_, request))] + pub async fn verification( + self_: web::Data, + Json(request): Json, + ) -> ActixResult { + let start = Instant::now(); + if let Err(res) = Self::validate_contract_verification_query(&request) { + return Ok(res); + } + let mut storage = self_ + .master_connection_pool + .access_storage_tagged("api") + .await; + + if !storage + .storage_logs_dal() + .is_contract_deployed_at_address(request.contract_address) + .await + { + return Ok( + HttpResponse::BadRequest().body("There is no deployed contract on this address") + ); + } + if storage + .contract_verification_dal() + .is_contract_verified(request.contract_address) + .await + { + return Ok(HttpResponse::BadRequest().body("This contract is already verified")); + } + + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(request) + .await + .unwrap(); + + metrics::histogram!("api.contract_verification.call", start.elapsed(), "method" => "contract_verification"); + ok_json(request_id) + } + + #[tracing::instrument(skip(self_))] + pub async fn verification_request_status( + self_: web::Data, + id: web::Path, + ) -> ActixResult { + let start = Instant::now(); + + let status = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await + .contract_verification_dal() + .get_verification_request_status(*id) + .await + .unwrap(); + + metrics::histogram!("api.contract_verification.call", start.elapsed(), "method" => "contract_verification_request_status"); + match status { + Some(status) => ok_json(status), + None => Ok(HttpResponse::NotFound().finish()), + } + } + + #[tracing::instrument(skip(self_))] + pub async fn zksolc_versions(self_: web::Data) -> ActixResult { + let start = Instant::now(); + + let versions = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await + .contract_verification_dal() + .get_zksolc_versions() + .await + .unwrap(); + + metrics::histogram!("api.contract_verification.call", start.elapsed(), "method" => "contract_verification_zksolc_versions"); + ok_json(versions) + } + + #[tracing::instrument(skip(self_))] + pub async fn solc_versions(self_: web::Data) -> ActixResult { + let start = Instant::now(); + + let versions = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await + .contract_verification_dal() + .get_solc_versions() + .await + .unwrap(); + + metrics::histogram!("api.contract_verification.call", start.elapsed(), "method" => "contract_verification_solc_versions"); + ok_json(versions) + } + + #[tracing::instrument(skip(self_))] + pub async fn zkvyper_versions(self_: web::Data) -> ActixResult { + let start = Instant::now(); + + let versions = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await + .contract_verification_dal() + .get_zkvyper_versions() + .await + .unwrap(); + + metrics::histogram!("api.contract_verification.call", start.elapsed(), "method" => "contract_verification_zkvyper_versions"); + ok_json(versions) + } + + #[tracing::instrument(skip(self_))] + pub async fn vyper_versions(self_: web::Data) -> ActixResult { + let start = Instant::now(); + + let versions = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await + .contract_verification_dal() + .get_vyper_versions() + .await + .unwrap(); + + metrics::histogram!("api.contract_verification.call", start.elapsed(), "method" => "contract_verification_vyper_versions"); + ok_json(versions) + } + + #[tracing::instrument(skip(self_))] + pub async fn verification_info( + self_: web::Data, + address: web::Path
, + ) -> ActixResult { + let start = Instant::now(); + + let info = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await + .contract_verification_dal() + .get_contract_verification_info(*address) + .await + .unwrap(); + + metrics::histogram!("api.contract_verification.call", start.elapsed(), "method" => "contract_verification_info"); + match info { + Some(info) => ok_json(info), + None => Ok(HttpResponse::NotFound().finish()), + } + } +} diff --git a/core/bin/zksync_core/src/api_server/explorer/mod.rs b/core/bin/zksync_core/src/api_server/contract_verification/mod.rs similarity index 69% rename from core/bin/zksync_core/src/api_server/explorer/mod.rs rename to core/bin/zksync_core/src/api_server/contract_verification/mod.rs index ec35078fb013..e49d56a695e2 100644 --- a/core/bin/zksync_core/src/api_server/explorer/mod.rs +++ b/core/bin/zksync_core/src/api_server/contract_verification/mod.rs @@ -1,22 +1,20 @@ -use std::net::SocketAddr; -use std::time::Duration; +use std::{net::SocketAddr, time::Duration}; -use zksync_config::configs::api::ExplorerApiConfig; +use actix_cors::Cors; +use actix_web::{ + dev::Server, + {web, App, HttpResponse, HttpServer}, +}; +use tokio::{sync::watch, task::JoinHandle}; + +use zksync_config::configs::api::ContractVerificationApiConfig; use zksync_dal::connection::ConnectionPool; -use zksync_types::Address; use zksync_utils::panic_notify::{spawn_panic_handler, ThreadPanicNotify}; -use actix_cors::Cors; -use actix_web::dev::Server; -use actix_web::{web, App, HttpResponse, HttpServer}; -use tokio::sync::watch; -use tokio::task::JoinHandle; - use api_decl::RestApi; pub mod api_decl; pub mod api_impl; -pub mod network_stats; fn start_server(api: RestApi, bind_to: SocketAddr, threads: usize) -> Server { HttpServer::new(move || { @@ -43,46 +41,40 @@ fn start_server(api: RestApi, bind_to: SocketAddr, threads: usize) -> Server { .shutdown_timeout(60) .keep_alive(Duration::from_secs(10)) .client_request_timeout(Duration::from_secs(60)) + .disable_signals() .run() } /// Start HTTP REST API pub fn start_server_thread_detached( - api_config: ExplorerApiConfig, - l2_erc20_bridge_addr: Address, - fee_account_addr: Address, master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, + api_config: ContractVerificationApiConfig, mut stop_receiver: watch::Receiver, ) -> JoinHandle<()> { let (handler, panic_sender) = spawn_panic_handler(); std::thread::Builder::new() - .name("explorer-api".to_string()) + .name("contract-verification-api".to_string()) .spawn(move || { let _panic_sentinel = ThreadPanicNotify(panic_sender.clone()); actix_rt::System::new().block_on(async move { let bind_address = api_config.bind_addr(); let threads = api_config.threads_per_server as usize; - let api = RestApi::new( - master_connection_pool, - replica_connection_pool, - api_config, - l2_erc20_bridge_addr, - fee_account_addr, - ); - api.spawn_network_stats_updater(panic_sender, stop_receiver.clone()); + let api = RestApi::new(master_connection_pool, replica_connection_pool); let server = start_server(api, bind_address, threads); let close_handle = server.handle(); actix_rt::spawn(async move { if stop_receiver.changed().await.is_ok() { close_handle.stop(true).await; - vlog::info!("Stop signal received, explorer API is shutting down"); + vlog::info!( + "Stop signal received, contract verification API is shutting down" + ); } }); - server.await.expect("Explorer API crashed"); + server.await.expect("Contract verification API crashed"); }); }) .expect("Failed to spawn thread for REST API"); diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs index 458b7833d5f1..b6a299eb46ca 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -20,7 +20,7 @@ use vm::{ HistoryDisabled, VmInstance, }; use zksync_config::constants::ZKPORTER_IS_AVAILABLE; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; use zksync_state::{PostgresStorage, ReadStorage, StorageView, WriteStorage}; use zksync_types::{ api, get_nonce_key, @@ -29,12 +29,12 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; -use super::{vm_metrics, BlockArgs, TxExecutionArgs, TxSharedArgs}; +use super::{vm_metrics, BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit}; #[allow(clippy::too_many_arguments)] pub(super) fn apply_vm_in_sandbox( - rt_handle: tokio::runtime::Handle, - shared_args: &TxSharedArgs, + vm_permit: VmPermit, + shared_args: TxSharedArgs, execution_args: &TxExecutionArgs, connection_pool: &ConnectionPool, tx: Transaction, @@ -45,6 +45,7 @@ pub(super) fn apply_vm_in_sandbox( let stage_started_at = Instant::now(); let span = tracing::debug_span!("initialization").entered(); + let rt_handle = vm_permit.rt_handle(); let mut connection = rt_handle.block_on(connection_pool.access_storage_tagged("api")); let connection_acquire_time = stage_started_at.elapsed(); // We don't want to emit too many logs. @@ -56,8 +57,9 @@ pub(super) fn apply_vm_in_sandbox( } let resolve_started_at = Instant::now(); - let (state_block_number, vm_block_number) = - rt_handle.block_on(block_args.resolve_block_numbers(&mut connection)); + let (state_block_number, vm_block_number) = rt_handle + .block_on(block_args.resolve_block_numbers(&mut connection)) + .expect("Failed resolving block numbers"); let resolve_time = resolve_started_at.elapsed(); // We don't want to emit too many logs. if resolve_time > Duration::from_millis(10) { @@ -67,10 +69,15 @@ pub(super) fn apply_vm_in_sandbox( ); } + if block_args.resolves_to_latest_sealed_miniblock() { + shared_args + .caches + .schedule_values_update(state_block_number); + } let block_timestamp = block_args.block_timestamp_seconds(); - let storage = PostgresStorage::new(rt_handle, connection, state_block_number, false) - .with_factory_deps_cache(shared_args.factory_deps_cache.clone()); + let storage = PostgresStorage::new(rt_handle.clone(), connection, state_block_number, false) + .with_caches(shared_args.caches); // Moving `storage_read_cache` to `storage_view`. It will be moved back once execution is finished and `storage_view` is not needed. let mut storage_view = StorageView::new_with_read_keys(storage, storage_read_cache); @@ -99,7 +106,7 @@ pub(super) fn apply_vm_in_sandbox( default_aa_code_hash: h256_to_u256(shared_args.base_system_contracts.default_aa.hash), zkporter_is_available: ZKPORTER_IS_AVAILABLE, }; - let &TxSharedArgs { + let TxSharedArgs { l1_gas_price, fair_l2_gas_price, .. @@ -150,6 +157,7 @@ pub(super) fn apply_vm_in_sandbox( vm_execution_took, storage_view.metrics(), ); + drop(vm_permit); // Ensure that the permit lives until this point // Move `read_storage_keys` from `storage_view` back to cache. (result, storage_view.into_read_storage_keys()) @@ -163,30 +171,37 @@ impl BlockArgs { ) } + fn resolves_to_latest_sealed_miniblock(&self) -> bool { + matches!( + self.block_id, + api::BlockId::Number( + api::BlockNumber::Pending | api::BlockNumber::Latest | api::BlockNumber::Committed + ) + ) + } + async fn resolve_block_numbers( &self, connection: &mut StorageProcessor<'_>, - ) -> (MiniblockNumber, L1BatchNumber) { - if self.is_pending_miniblock() { + ) -> Result<(MiniblockNumber, L1BatchNumber), SqlxError> { + Ok(if self.is_pending_miniblock() { let sealed_l1_batch_number = connection .blocks_web3_dal() .get_sealed_l1_batch_number() - .await - .unwrap(); + .await?; let sealed_miniblock_number = connection .blocks_web3_dal() .get_sealed_miniblock_number() - .await - .unwrap(); + .await?; (sealed_miniblock_number, sealed_l1_batch_number + 1) } else { let l1_batch_number = connection .storage_web3_dal() - .get_provisional_l1_batch_number_of_miniblock_unchecked(self.resolved_block_number) - .await - .unwrap(); + .resolve_l1_batch_number_of_miniblock(self.resolved_block_number) + .await? + .expected_l1_batch(); (self.resolved_block_number, l1_batch_number) - } + }) } fn block_timestamp_seconds(&self) -> u64 { diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs index 51d5807e6fb2..8f083a0022c3 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs @@ -63,6 +63,7 @@ impl TxExecutionArgs { let added_balance = match &tx.common_data { ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, ExecuteTransactionCommon::L1(_) => U256::zero(), + ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), }; Self { @@ -76,9 +77,8 @@ impl TxExecutionArgs { } } -#[allow(clippy::too_many_arguments)] pub(crate) async fn execute_tx_eth_call( - vm_permit: &VmPermit<'_>, // Proof that permit was acquired. + vm_permit: VmPermit, shared_args: TxSharedArgs, connection_pool: ConnectionPool, mut tx: L2Tx, @@ -112,7 +112,7 @@ pub(crate) async fn execute_tx_eth_call( #[tracing::instrument(skip_all)] pub(crate) async fn execute_tx_with_pending_state( - vm_permit: &VmPermit<'_>, // Proof that permit was acquired. + vm_permit: VmPermit, mut shared_args: TxSharedArgs, execution_args: TxExecutionArgs, connection_pool: ConnectionPool, @@ -148,7 +148,7 @@ pub(crate) async fn execute_tx_with_pending_state( #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all)] async fn execute_tx_in_sandbox( - vm_permit: &VmPermit<'_>, + vm_permit: VmPermit, shared_args: TxSharedArgs, execution_args: TxExecutionArgs, connection_pool: ConnectionPool, @@ -167,14 +167,13 @@ async fn execute_tx_in_sandbox( .as_ref() .map_or(0, |deps| deps.len() as u16); - let rt_handle = vm_permit.rt_handle(); let moved_cache = mem::take(storage_read_cache); let (execution_result, moved_cache) = tokio::task::spawn_blocking(move || { let span = span!(Level::DEBUG, "execute_in_sandbox").entered(); let execution_mode = execution_args.execution_mode; let result = apply::apply_vm_in_sandbox( - rt_handle, - &shared_args, + vm_permit, + shared_args, &execution_args, &connection_pool, tx, diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs index 38046a401bc8..2fe593c57e21 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -1,11 +1,15 @@ -use std::time::{Duration, Instant}; +use tokio::runtime::Handle; + +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; -use tokio::runtime::{Handle, Runtime}; use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; use zksync_config::constants::PUBLISH_BYTECODE_OVERHEAD; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; -use zksync_state::{FactoryDepsCache, PostgresStorage, ReadStorage, StorageView}; +use zksync_state::{PostgresStorage, PostgresStorageCaches, ReadStorage, StorageView}; use zksync_types::{api, AccountTreeId, MiniblockNumber, U256}; use zksync_utils::bytecode::{compress_bytecode, hash_bytecode}; @@ -22,18 +26,58 @@ pub(super) use self::{ }; /// Permit to invoke VM code. +/// /// Any publicly-facing method that invokes VM is expected to accept a reference to this structure, /// as a proof that the caller obtained a token from `VmConcurrencyLimiter`, -#[derive(Debug)] -pub struct VmPermit<'a> { - _permit: tokio::sync::SemaphorePermit<'a>, +#[derive(Debug, Clone)] +pub struct VmPermit { /// A handle to the runtime that is used to query the VM storage. rt_handle: Handle, + _permit: Arc, } -impl<'a> VmPermit<'a> { - fn rt_handle(&self) -> Handle { - self.rt_handle.clone() +impl VmPermit { + fn rt_handle(&self) -> &Handle { + &self.rt_handle + } +} + +/// Barrier-like synchronization primitive allowing to close a [`VmConcurrencyLimiter`] it's attached to +/// so that it doesn't issue new permits, and to wait for all permits to drop. +#[derive(Debug, Clone)] +pub struct VmConcurrencyBarrier { + limiter: Arc, + max_concurrency: usize, +} + +impl VmConcurrencyBarrier { + /// Shuts down the related VM concurrency limiter so that it won't issue new permits. + pub fn close(&self) { + self.limiter.close(); + vlog::info!("VM concurrency limiter closed"); + } + + /// Waits until all permits issued by the VM concurrency limiter are dropped. + pub async fn wait_until_stopped(self) { + const POLL_INTERVAL: Duration = Duration::from_millis(50); + + assert!( + self.limiter.is_closed(), + "Cannot wait on non-closed VM concurrency limiter" + ); + + loop { + let current_permits = self.limiter.available_permits(); + vlog::debug!( + "Waiting until all VM permits are dropped; currently remaining: {} / {}", + self.max_concurrency - current_permits, + self.max_concurrency + ); + if current_permits == self.max_concurrency { + return; + } + tokio::time::sleep(POLL_INTERVAL).await; + } } } @@ -49,55 +93,32 @@ impl<'a> VmPermit<'a> { #[derive(Debug)] pub struct VmConcurrencyLimiter { /// Semaphore that limits the number of concurrent VM executions. - limiter: tokio::sync::Semaphore, - /// A dedicated runtime used to query the VM storage in the API. - vm_runtime: RuntimeAccess, -} - -/// Either a dedicated runtime, or a handle to the externally creatd runtime. -#[derive(Debug)] -enum RuntimeAccess { - Owned(Runtime), - Handle(Handle), -} - -impl RuntimeAccess { - fn handle(&self) -> Handle { - match self { - RuntimeAccess::Owned(rt) => rt.handle().clone(), - RuntimeAccess::Handle(handle) => handle.clone(), - } - } + limiter: Arc, + rt_handle: Handle, } impl VmConcurrencyLimiter { - pub fn new(max_concurrency: Option) -> Self { - if let Some(max_concurrency) = max_concurrency { - vlog::info!("Initializing the VM concurrency limiter with a separate runtime. Max concurrency: {:?}", max_concurrency); - let vm_runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .expect("Failed to initialize VM runtime"); - Self { - limiter: tokio::sync::Semaphore::new(max_concurrency), - vm_runtime: RuntimeAccess::Owned(vm_runtime), - } - } else { - // Default concurrency is chosen to be beyond the number of connections in the pool / - // amount of blocking threads in the tokio threadpool. - // The real "concurrency limiter" will be represented by the lesser of these values. - const DEFAULT_CONCURRENCY_LIMIT: usize = 2048; - vlog::info!("Initializing the VM concurrency limiter with the default runtime"); - Self { - limiter: tokio::sync::Semaphore::new(DEFAULT_CONCURRENCY_LIMIT), - vm_runtime: RuntimeAccess::Handle(tokio::runtime::Handle::current()), - } - } + /// Creates a limiter together with a barrier allowing to control its shutdown. + pub fn new(max_concurrency: usize) -> (Self, VmConcurrencyBarrier) { + vlog::info!( + "Initializing the VM concurrency limiter with max concurrency {max_concurrency}" + ); + let limiter = Arc::new(tokio::sync::Semaphore::new(max_concurrency)); + + let this = Self { + limiter: Arc::clone(&limiter), + rt_handle: Handle::current(), + }; + let barrier = VmConcurrencyBarrier { + limiter, + max_concurrency, + }; + (this, barrier) } /// Waits until there is a free slot in the concurrency limiter. /// Returns a permit that should be dropped when the VM execution is finished. - pub async fn acquire(&self) -> VmPermit<'_> { + pub async fn acquire(&self) -> Option { let available_permits = self.limiter.available_permits(); metrics::histogram!( "api.web3.sandbox.semaphore.permits", @@ -105,11 +126,7 @@ impl VmConcurrencyLimiter { ); let start = Instant::now(); - let permit = self - .limiter - .acquire() - .await - .expect("Semaphore is never closed"); + let permit = Arc::clone(&self.limiter).acquire_owned().await.ok()?; let elapsed = start.elapsed(); // We don't want to emit too many logs. if elapsed > Duration::from_millis(10) { @@ -118,10 +135,10 @@ impl VmConcurrencyLimiter { ); } metrics::histogram!("api.web3.sandbox", elapsed, "stage" => "vm_concurrency_limiter_acquire"); - VmPermit { - _permit: permit, - rt_handle: self.vm_runtime.handle(), - } + Some(VmPermit { + rt_handle: self.rt_handle.clone(), + _permit: Arc::new(permit), + }) } } @@ -162,9 +179,10 @@ async fn get_pending_state( /// Returns the number of the pubdata that the transaction will spend on factory deps. pub(super) async fn get_pubdata_for_factory_deps( + _vm_permit: &VmPermit, connection_pool: &ConnectionPool, factory_deps: &[Vec], - factory_deps_cache: FactoryDepsCache, + storage_caches: PostgresStorageCaches, ) -> u32 { if factory_deps.is_empty() { return 0; // Shortcut for the common case allowing to not acquire DB connections etc. @@ -180,7 +198,7 @@ pub(super) async fn get_pubdata_for_factory_deps( tokio::task::spawn_blocking(move || { let connection = rt_handle.block_on(connection_pool.access_storage_tagged("api")); let storage = PostgresStorage::new(rt_handle, connection, block_number, false) - .with_factory_deps_cache(factory_deps_cache); + .with_caches(storage_caches); let mut storage_view = StorageView::new(storage); let effective_lengths = factory_deps.iter().map(|bytecode| { @@ -208,7 +226,7 @@ pub(crate) struct TxSharedArgs { pub l1_gas_price: u64, pub fair_l2_gas_price: u64, pub base_system_contracts: BaseSystemContracts, - pub factory_deps_cache: FactoryDepsCache, + pub caches: PostgresStorageCaches, } /// Information about a block provided to VM. @@ -238,7 +256,9 @@ impl BlockArgs { .blocks_web3_dal() .resolve_block_id(block_id) .await?; - let Some(resolved_block_number) = resolved_block_number else { return Ok(None) }; + let Some(resolved_block_number) = resolved_block_number else { + return Ok(None); + }; let block_timestamp_s = connection .blocks_web3_dal() diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs index fe88cb42ce33..0b09d03cdbfe 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -15,7 +15,7 @@ use super::{ impl TxSharedArgs { pub async fn validate_tx_with_pending_state( mut self, - vm_permit: &VmPermit<'_>, // Proof that permit was acquired. + vm_permit: VmPermit, connection_pool: ConnectionPool, tx: L2Tx, computational_gas_limit: u32, @@ -26,7 +26,7 @@ impl TxSharedArgs { self.adjust_l1_gas_price(tx.common_data.fee.gas_per_pubdata_limit); self.validate_tx_in_sandbox( connection_pool, - vm_permit.rt_handle(), + vm_permit, tx, block_args, computational_gas_limit, @@ -47,7 +47,7 @@ impl TxSharedArgs { async fn validate_tx_in_sandbox( self, connection_pool: ConnectionPool, - rt_handle: tokio::runtime::Handle, + vm_permit: VmPermit, tx: L2Tx, block_args: BlockArgs, computational_gas_limit: u32, @@ -64,8 +64,8 @@ impl TxSharedArgs { let (validation_result, _) = tokio::task::spawn_blocking(move || { let span = tracing::debug_span!("validate_in_sandbox").entered(); let result = apply::apply_vm_in_sandbox( - rt_handle, - &self, + vm_permit, + self, &execution_args, &connection_pool, tx, diff --git a/core/bin/zksync_core/src/api_server/explorer/api_decl.rs b/core/bin/zksync_core/src/api_server/explorer/api_decl.rs deleted file mode 100644 index cde6ef551eb1..000000000000 --- a/core/bin/zksync_core/src/api_server/explorer/api_decl.rs +++ /dev/null @@ -1,97 +0,0 @@ -use zksync_config::configs::api::ExplorerApiConfig; -use zksync_dal::connection::ConnectionPool; -use zksync_types::Address; - -use actix_web::web; -use futures::channel::mpsc; -use tokio::sync::watch; - -use super::network_stats::SharedNetworkStats; - -#[derive(Debug, Clone)] -pub struct RestApi { - pub(super) master_connection_pool: ConnectionPool, - pub(super) replica_connection_pool: ConnectionPool, - pub(super) network_stats: SharedNetworkStats, - pub(super) api_config: ExplorerApiConfig, - pub(super) l2_erc20_bridge_addr: Address, - pub(super) fee_account_addr: Address, -} - -impl RestApi { - pub fn new( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - api_config: ExplorerApiConfig, - l2_erc20_bridge_addr: Address, - fee_account_addr: Address, - ) -> Self { - Self { - master_connection_pool, - replica_connection_pool, - network_stats: SharedNetworkStats::default(), - api_config, - l2_erc20_bridge_addr, - fee_account_addr, - } - } - - /// Creates an actix-web `Scope`, which can be mounted to the Http server. - pub fn into_scope(self) -> actix_web::Scope { - web::scope("") - .app_data(web::Data::new(self)) - .route("/network_stats", web::get().to(Self::network_stats)) - .route("/blocks", web::get().to(Self::block_pagination)) - .route("/block/{number}", web::get().to(Self::block_details)) - .route("/l1_batches", web::get().to(Self::l1_batch_pagination)) - .route("/l1_batch/{number}", web::get().to(Self::l1_batch_details)) - .route("/transactions", web::get().to(Self::transaction_pagination)) - .route( - "/transaction/{hash}", - web::get().to(Self::transaction_details), - ) - .route("/account/{address}", web::get().to(Self::account_details)) - .route("/contract/{address}", web::get().to(Self::contract_details)) - .route("/address/{address}", web::get().to(Self::address_details)) - .route("/token/{address}", web::get().to(Self::token_details)) - .route("/events", web::get().to(Self::events_pagination)) - .route( - "/contract_verification", - web::post().to(Self::contract_verification), - ) - .route( - "/contract_verification/zksolc_versions", - web::get().to(Self::contract_verification_zksolc_versions), - ) - .route( - "/contract_verification/solc_versions", - web::get().to(Self::contract_verification_solc_versions), - ) - .route( - "/contract_verification/zkvyper_versions", - web::get().to(Self::contract_verification_zkvyper_versions), - ) - .route( - "/contract_verification/vyper_versions", - web::get().to(Self::contract_verification_vyper_versions), - ) - .route( - "/contract_verification/{id}", - web::get().to(Self::contract_verification_request_status), - ) - } - - // Spawns future updating SharedNetworkStats in the current `actix::System` - pub fn spawn_network_stats_updater( - &self, - panic_notify: mpsc::Sender, - stop_receiver: watch::Receiver, - ) { - self.network_stats.clone().start_updater_detached( - panic_notify, - self.replica_connection_pool.clone(), - self.api_config.network_stats_interval(), - stop_receiver, - ); - } -} diff --git a/core/bin/zksync_core/src/api_server/explorer/api_impl.rs b/core/bin/zksync_core/src/api_server/explorer/api_impl.rs deleted file mode 100644 index 9ac46c327fca..000000000000 --- a/core/bin/zksync_core/src/api_server/explorer/api_impl.rs +++ /dev/null @@ -1,617 +0,0 @@ -use std::time::Instant; - -use actix_web::{ - web::{self, Json}, - HttpResponse, Result as ActixResult, -}; -use serde::Serialize; - -use zksync_types::{ - explorer_api::{ - AccountDetails, AccountType, AddressDetails, BlocksQuery, ContractDetails, EventsQuery, - L1BatchesQuery, PaginationQuery, TransactionsQuery, VerificationIncomingRequest, - }, - Address, L1BatchNumber, MiniblockNumber, H256, -}; - -use super::api_decl::RestApi; - -fn ok_json(data: impl Serialize) -> ActixResult { - Ok(HttpResponse::Ok().json(data)) -} - -impl RestApi { - #[tracing::instrument(skip(self_))] - pub async fn network_stats(self_: web::Data) -> ActixResult { - let start = Instant::now(); - - let stats = self_.network_stats.read().await; - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "network_stats"); - ok_json(stats) - } - - #[tracing::instrument(skip(self_))] - pub async fn address_details( - self_: web::Data, - address: web::Path
, - ) -> ActixResult { - let start = Instant::now(); - - let account_type = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .accounts_dal() - .get_account_type(*address) - .await - .unwrap(); - let response = match account_type { - AccountType::EOA => ok_json(AddressDetails::Account( - self_.account_details_inner(address).await, - )), - AccountType::Contract => { - // If account type is a contract, then `contract_details_inner` must return `Some`. - let contract_details = self_ - .contract_details_inner(address) - .await - .expect("Failed to get contract info"); - ok_json(AddressDetails::Contract(contract_details)) - } - }; - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "address_details"); - response - } - - async fn account_details_inner(&self, address: web::Path
) -> AccountDetails { - let mut storage = self - .replica_connection_pool - .access_storage_tagged("api") - .await; - - let balances = storage - .explorer() - .accounts_dal() - .get_balances_for_address(*address) - .await - .unwrap(); - let (sealed_nonce, verified_nonce) = storage - .explorer() - .accounts_dal() - .get_account_nonces(*address) - .await - .unwrap(); - - let account_type = storage - .explorer() - .accounts_dal() - .get_account_type(*address) - .await - .unwrap(); - - AccountDetails { - address: *address, - balances, - sealed_nonce, - verified_nonce, - account_type, - } - } - - #[tracing::instrument(skip(self_))] - pub async fn account_details( - self_: web::Data, - address: web::Path
, - ) -> ActixResult { - let start = Instant::now(); - let account_details = self_.account_details_inner(address).await; - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "account_details"); - ok_json(account_details) - } - - async fn contract_details_inner(&self, address: web::Path
) -> Option { - let mut storage = self - .replica_connection_pool - .access_storage_tagged("api") - .await; - let contract_info = storage - .explorer() - .misc_dal() - .get_contract_info(*address) - .await - .unwrap(); - if let Some(contract_info) = contract_info { - let contract_stats = storage - .explorer() - .misc_dal() - .get_contract_stats(*address) - .await - .unwrap(); - let balances = storage - .explorer() - .accounts_dal() - .get_balances_for_address(*address) - .await - .unwrap(); - Some(ContractDetails { - info: contract_info, - stats: contract_stats, - balances, - }) - } else { - None - } - } - - #[tracing::instrument(skip(self_))] - pub async fn contract_details( - self_: web::Data, - address: web::Path
, - ) -> ActixResult { - let start = Instant::now(); - - let response = match self_.contract_details_inner(address).await { - Some(contract_details) => ok_json(contract_details), - None => Ok(HttpResponse::NotFound().finish()), - }; - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_details"); - response - } - - #[tracing::instrument] - fn validate_transactions_query(query: TransactionsQuery) -> Result<(), HttpResponse> { - if query.from_block_number.is_none() - && query.block_number.is_none() - && query.from_tx_index.is_some() - { - return Err(HttpResponse::BadRequest() - .body("Can't use `fromTxIndex` without `fromBlockNumber` or `blockNumber`")); - } - if query.account_address.is_some() && query.contract_address.is_some() { - return Err(HttpResponse::BadRequest() - .body("Can't use both `accountAddress` and `contractAddress`")); - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - fn validate_pagination_query(&self, pagination: PaginationQuery) -> Result<(), HttpResponse> { - if pagination.limit > self.api_config.req_entities_limit() { - return Err(HttpResponse::BadRequest().body(format!( - "Limit should not exceed {}", - self.api_config.req_entities_limit() - ))); - } - if pagination.offset + pagination.limit > self.api_config.offset_limit() { - return Err(HttpResponse::BadRequest().body(format!( - "(offset + limit) should not exceed {}", - self.api_config.offset_limit() - ))); - } - - Ok(()) - } - - #[tracing::instrument(skip(self_))] - pub async fn transaction_pagination( - self_: web::Data, - web::Query(mut query): web::Query, - ) -> ActixResult { - let start = Instant::now(); - if let Err(res) = Self::validate_transactions_query(query) { - return Ok(res); - } - if let Err(res) = self_.validate_pagination_query(query.pagination) { - return Ok(res); - } - - let mut storage = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await; - if let Some(address) = query.address { - match storage - .explorer() - .accounts_dal() - .get_account_type(address) - .await - .unwrap() - { - AccountType::EOA => query.account_address = Some(address), - AccountType::Contract => query.contract_address = Some(address), - } - } - - let response = if let Some(account_address) = query.account_address { - // If there is filter by account address - // we should query transactions from `events` table. - storage - .explorer() - .transactions_dal() - .get_account_transactions_page( - account_address, - query.tx_position(), - query.block_number, - query.pagination, - self_.api_config.offset_limit(), - self_.l2_erc20_bridge_addr, - ) - .await - .unwrap() - } else { - // If there is no filter by account address - // we can query transactions directly from `transactions` table. - storage - .explorer() - .transactions_dal() - .get_transactions_page( - query.tx_position(), - query.block_number, - query.l1_batch_number, - query.contract_address, - query.pagination, - self_.api_config.offset_limit(), - self_.l2_erc20_bridge_addr, - ) - .await - .unwrap() - }; - - let query_type = if query.l1_batch_number.is_some() { - "l1_batch_txs" - } else if query.block_number.is_some() { - "block_txs" - } else if query.account_address.is_some() { - "account_txs" - } else if query.contract_address.is_some() { - "contract_txs" - } else { - "all_txs" - }; - let metric_endpoint_name = format!("transaction_pagination_{}", query_type); - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => metric_endpoint_name); - - ok_json(response) - } - - #[tracing::instrument(skip(self_))] - pub async fn transaction_details( - self_: web::Data, - hash: web::Path, - ) -> ActixResult { - let start = Instant::now(); - - let tx_details = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .transactions_dal() - .get_transaction_details(*hash, self_.l2_erc20_bridge_addr) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "transaction_details"); - match tx_details { - Some(tx_details) => ok_json(tx_details), - None => Ok(HttpResponse::NotFound().finish()), - } - } - - #[tracing::instrument(skip(self_))] - pub async fn block_pagination( - self_: web::Data, - web::Query(query): web::Query, - ) -> ActixResult { - let start = Instant::now(); - if let Err(res) = self_.validate_pagination_query(query.pagination) { - return Ok(res); - } - - let blocks = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .blocks_dal() - .get_blocks_page(query, self_.network_stats.read().await.last_verified) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "block_pagination"); - ok_json(blocks) - } - - #[tracing::instrument(skip(self_))] - pub async fn block_details( - self_: web::Data, - number: web::Path, - ) -> ActixResult { - let start = Instant::now(); - - let block_details = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .blocks_dal() - .get_block_details(MiniblockNumber(*number), self_.fee_account_addr) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "block_details"); - match block_details { - Some(block_details) => ok_json(block_details), - None => Ok(HttpResponse::NotFound().finish()), - } - } - - #[tracing::instrument(skip(self_))] - pub async fn l1_batch_pagination( - self_: web::Data, - web::Query(query): web::Query, - ) -> ActixResult { - let start = Instant::now(); - if let Err(res) = self_.validate_pagination_query(query.pagination) { - return Ok(res); - } - let last_verified_miniblock = self_.network_stats.read().await.last_verified; - let mut storage = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await; - - let last_verified_l1_batch = storage - .blocks_web3_dal() - .get_l1_batch_number_of_miniblock(last_verified_miniblock) - .await - .unwrap() - .expect("Verified miniblock must be included in l1 batch"); - - let l1_batches = storage - .explorer() - .blocks_dal() - .get_l1_batches_page(query, last_verified_l1_batch) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "l1_batch_pagination"); - ok_json(l1_batches) - } - - #[tracing::instrument(skip(self_))] - pub async fn l1_batch_details( - self_: web::Data, - number: web::Path, - ) -> ActixResult { - let start = Instant::now(); - - let l1_batch_details = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .blocks_dal() - .get_l1_batch_details(L1BatchNumber(*number)) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "l1_batch_details"); - match l1_batch_details { - Some(l1_batch_details) => ok_json(l1_batch_details), - None => Ok(HttpResponse::NotFound().finish()), - } - } - - #[tracing::instrument(skip(self_))] - pub async fn token_details( - self_: web::Data, - address: web::Path
, - ) -> ActixResult { - let start = Instant::now(); - - let token_details = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .misc_dal() - .get_token_details(*address) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "token_details"); - match token_details { - Some(token_details) => ok_json(token_details), - None => Ok(HttpResponse::NotFound().finish()), - } - } - - #[tracing::instrument(skip(query))] - fn validate_contract_verification_query( - query: &VerificationIncomingRequest, - ) -> Result<(), HttpResponse> { - if query.source_code_data.compiler_type() != query.compiler_versions.compiler_type() { - return Err(HttpResponse::BadRequest().body("incorrect compiler versions")); - } - - Ok(()) - } - - /// Add a contract verification job to the queue if the requested contract wasn't previously verified. - #[tracing::instrument(skip(self_, request))] - pub async fn contract_verification( - self_: web::Data, - Json(request): Json, - ) -> ActixResult { - let start = Instant::now(); - if let Err(res) = Self::validate_contract_verification_query(&request) { - return Ok(res); - } - let mut storage = self_ - .master_connection_pool - .access_storage_tagged("api") - .await; - - if !storage - .storage_logs_dal() - .is_contract_deployed_at_address(request.contract_address) - .await - { - return Ok( - HttpResponse::BadRequest().body("There is no deployed contract on this address") - ); - } - if storage - .explorer() - .contract_verification_dal() - .is_contract_verified(request.contract_address) - .await - { - return Ok(HttpResponse::BadRequest().body("This contract is already verified")); - } - - let request_id = storage - .explorer() - .contract_verification_dal() - .add_contract_verification_request(request) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification"); - ok_json(request_id) - } - - #[tracing::instrument(skip(self_))] - pub async fn events_pagination( - self_: web::Data, - web::Query(query): web::Query, - ) -> ActixResult { - let start = Instant::now(); - if let Err(res) = self_.validate_pagination_query(query.pagination) { - return Ok(res); - } - - let events = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .events_dal() - .get_events_page(query, self_.api_config.offset_limit()) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "events_pagination"); - - ok_json(events) - } - - #[tracing::instrument(skip(self_))] - pub async fn contract_verification_request_status( - self_: web::Data, - id: web::Path, - ) -> ActixResult { - let start = Instant::now(); - - let status = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .contract_verification_dal() - .get_verification_request_status(*id) - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_request_status"); - match status { - Some(status) => ok_json(status), - None => Ok(HttpResponse::NotFound().finish()), - } - } - - #[tracing::instrument(skip(self_))] - pub async fn contract_verification_zksolc_versions( - self_: web::Data, - ) -> ActixResult { - let start = Instant::now(); - - let versions = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .contract_verification_dal() - .get_zksolc_versions() - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_zksolc_versions"); - ok_json(versions) - } - - #[tracing::instrument(skip(self_))] - pub async fn contract_verification_solc_versions( - self_: web::Data, - ) -> ActixResult { - let start = Instant::now(); - - let versions = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .contract_verification_dal() - .get_solc_versions() - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_solc_versions"); - ok_json(versions) - } - - #[tracing::instrument(skip(self_))] - pub async fn contract_verification_zkvyper_versions( - self_: web::Data, - ) -> ActixResult { - let start = Instant::now(); - - let versions = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .contract_verification_dal() - .get_zkvyper_versions() - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_zkvyper_versions"); - ok_json(versions) - } - - #[tracing::instrument(skip(self_))] - pub async fn contract_verification_vyper_versions( - self_: web::Data, - ) -> ActixResult { - let start = Instant::now(); - - let versions = self_ - .replica_connection_pool - .access_storage_tagged("api") - .await - .explorer() - .contract_verification_dal() - .get_vyper_versions() - .await - .unwrap(); - - metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_vyper_versions"); - ok_json(versions) - } -} diff --git a/core/bin/zksync_core/src/api_server/explorer/network_stats.rs b/core/bin/zksync_core/src/api_server/explorer/network_stats.rs deleted file mode 100644 index dcda339b2703..000000000000 --- a/core/bin/zksync_core/src/api_server/explorer/network_stats.rs +++ /dev/null @@ -1,87 +0,0 @@ -use futures::channel::mpsc; -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::{watch, RwLock}; -use tokio::{runtime::Runtime, time}; -use zksync_dal::ConnectionPool; -use zksync_types::{api, MiniblockNumber}; -use zksync_utils::panic_notify::ThreadPanicNotify; - -#[derive(Default, Debug, Serialize, Deserialize, Clone)] -pub struct NetworkStats { - pub last_sealed: MiniblockNumber, - pub last_verified: MiniblockNumber, - pub total_transactions: usize, -} - -#[derive(Debug, Default, Clone)] -pub struct SharedNetworkStats(Arc>); - -impl SharedNetworkStats { - pub async fn read(&self) -> NetworkStats { - (*self.0.as_ref().read().await).clone() - } - - pub fn start_updater_detached( - self, - panic_notify: mpsc::Sender, - connection_pool: ConnectionPool, - polling_interval: Duration, - stop_receiver: watch::Receiver, - ) { - std::thread::Builder::new() - .name("explorer-stats-updater".to_string()) - .spawn(move || { - let _panic_sentinel = ThreadPanicNotify(panic_notify.clone()); - - let runtime = Runtime::new().expect("Failed to create tokio runtime"); - - let stats_update_task = async move { - let mut timer = time::interval(polling_interval); - loop { - if *stop_receiver.borrow() { - vlog::warn!( - "Stop signal received, explorer_stats_updater is shutting down" - ); - break; - } - - timer.tick().await; - - let mut storage = connection_pool.access_storage_tagged("api").await; - - let last_sealed = storage - .blocks_web3_dal() - .get_sealed_miniblock_number() - .await - .unwrap(); - let last_verified = storage - .blocks_web3_dal() - .resolve_block_id(api::BlockId::Number(api::BlockNumber::Finalized)) - .await - .unwrap() - .unwrap_or(MiniblockNumber(0)); - let prev_stats = self.read().await; - let new_transactions = storage - .explorer() - .transactions_dal() - .get_transactions_count_between(prev_stats.last_sealed + 1, last_sealed) - .await - .unwrap(); - - let stats = NetworkStats { - last_sealed, - last_verified, - total_transactions: prev_stats.total_transactions + new_transactions, - }; - - // save stats to state - *self.0.as_ref().write().await = stats; - } - }; - runtime.block_on(stats_update_task); - }) - .expect("Failed to start thread for network stats updating"); - } -} diff --git a/core/bin/zksync_core/src/api_server/healthcheck.rs b/core/bin/zksync_core/src/api_server/healthcheck.rs index 73d958d97c60..af3409f9f4e1 100644 --- a/core/bin/zksync_core/src/api_server/healthcheck.rs +++ b/core/bin/zksync_core/src/api_server/healthcheck.rs @@ -1,83 +1,90 @@ -use actix_web::dev::Server; -use actix_web::{get, web, App, HttpResponse, HttpServer, Responder}; -use serde::Serialize; -use std::{net::SocketAddr, sync::Arc}; +use axum::{extract::State, http::StatusCode, routing::get, Json, Router}; use tokio::sync::watch; -use zksync_health_check::{CheckHealth, CheckHealthStatus}; -use zksync_utils::panic_notify::{spawn_panic_handler, ThreadPanicNotify}; -#[derive(Serialize)] -pub struct Response { - pub message: String, +use std::{collections::HashSet, net::SocketAddr, sync::Arc, time::Duration}; + +use zksync_health_check::{AppHealth, CheckHealth}; + +type SharedHealthchecks = Arc<[Box]>; + +async fn check_health(health_checks: State) -> (StatusCode, Json) { + let response = AppHealth::new(&health_checks).await; + let response_code = if response.is_ready() { + StatusCode::OK + } else { + StatusCode::SERVICE_UNAVAILABLE + }; + (response_code, Json(response)) } -#[get("/health")] -async fn healthcheck(healthchecks: web::Data<[Box]>) -> impl Responder { - for healthcheck in healthchecks.iter() { - match healthcheck.check_health().await { - CheckHealthStatus::NotReady(message) => { - let response = Response { message }; - return HttpResponse::ServiceUnavailable().json(response); - } - CheckHealthStatus::Ready => (), +async fn run_server( + bind_address: &SocketAddr, + health_checks: Vec>, + mut stop_receiver: watch::Receiver, +) { + let mut health_check_names = HashSet::with_capacity(health_checks.len()); + for check in &health_checks { + let health_check_name = check.name(); + if !health_check_names.insert(health_check_name) { + vlog::warn!( + "Health check with name `{health_check_name}` is defined multiple times; only the last mention \ + will be present in `/health` endpoint output" + ); } } - let response = Response { - message: "Everything is working fine".to_string(), - }; - HttpResponse::Ok().json(response) -} + vlog::debug!( + "Starting healthcheck server with checks {health_check_names:?} on {bind_address}" + ); + + let health_checks = SharedHealthchecks::from(health_checks); + let app = Router::new() + .route("/health", get(check_health)) + .with_state(health_checks); -fn run_server(bind_address: SocketAddr, healthchecks: Vec>) -> Server { - let healthchecks: Arc<[Box]> = healthchecks.into(); - let data = web::Data::from(healthchecks); - HttpServer::new(move || App::new().service(healthcheck).app_data(data.clone())) - .workers(1) - .bind(bind_address) - .unwrap() - .run() + axum::Server::bind(bind_address) + .serve(app.into_make_service()) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + vlog::warn!("Stop signal sender for healthcheck server was dropped without sending a signal"); + } + vlog::info!("Stop signal received, healthcheck server is shutting down"); + }) + .await + .expect("Healthcheck server failed"); + vlog::info!("Healthcheck server shut down"); } +#[derive(Debug)] pub struct HealthCheckHandle { server: tokio::task::JoinHandle<()>, stop_sender: watch::Sender, } impl HealthCheckHandle { - pub async fn stop(self) { - self.stop_sender.send(true).ok(); - self.server.await.unwrap(); - } -} + pub fn spawn_server(addr: SocketAddr, healthchecks: Vec>) -> Self { + let (stop_sender, stop_receiver) = watch::channel(false); + let server = tokio::spawn(async move { + run_server(&addr, healthchecks, stop_receiver).await; + }); -/// Start HTTP healthcheck API -pub fn start_server_thread_detached( - addr: SocketAddr, - healthchecks: Vec>, -) -> HealthCheckHandle { - let (handler, panic_sender) = spawn_panic_handler(); - let (stop_sender, mut stop_receiver) = watch::channel(false); - std::thread::Builder::new() - .name("healthcheck".to_string()) - .spawn(move || { - let _panic_sentinel = ThreadPanicNotify(panic_sender.clone()); + Self { + server, + stop_sender, + } + } - actix_rt::System::new().block_on(async move { - let server = run_server(addr, healthchecks); - let close_handle = server.handle(); - actix_rt::spawn(async move { - if stop_receiver.changed().await.is_ok() { - close_handle.stop(true).await; - vlog::info!("Stop signal received, Health api is shutting down"); - } - }); - server.await.expect("Health api crashed"); - }); - }) - .expect("Failed to spawn thread for REST API"); + pub async fn stop(self) { + // Paradoxically, `hyper` server is quite slow to shut down if it isn't queried during shutdown: + // https://github.com/hyperium/hyper/issues/3188. It is thus recommended to set a timeout for shutdown. + const GRACEFUL_SHUTDOWN_WAIT: Duration = Duration::from_secs(10); - HealthCheckHandle { - server: handler, - stop_sender, + self.stop_sender.send(true).ok(); + let server_result = tokio::time::timeout(GRACEFUL_SHUTDOWN_WAIT, self.server).await; + if let Ok(server_result) = server_result { + // Propagate potential panics from the server task. + server_result.unwrap(); + } else { + vlog::debug!("Timed out {GRACEFUL_SHUTDOWN_WAIT:?} waiting for healthcheck server to gracefully shut down"); + } } } diff --git a/core/bin/zksync_core/src/api_server/mod.rs b/core/bin/zksync_core/src/api_server/mod.rs index 5476e2775325..a224c371502b 100644 --- a/core/bin/zksync_core/src/api_server/mod.rs +++ b/core/bin/zksync_core/src/api_server/mod.rs @@ -1,6 +1,6 @@ // Everywhere in this module the word "block" actually means "miniblock". +pub mod contract_verification; pub mod execution_sandbox; -pub mod explorer; pub mod healthcheck; pub mod tx_sender; pub mod web3; diff --git a/core/bin/zksync_core/src/api_server/tx_sender/error.rs b/core/bin/zksync_core/src/api_server/tx_sender/error.rs index e5045b1ee689..847a8206807c 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/error.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/error.rs @@ -22,6 +22,8 @@ pub enum SubmitTxError { Unexecutable(String), #[error("too many transactions")] RateLimitExceeded, + #[error("server shutting down")] + ServerShuttingDown, #[error("failed to include transaction in the system. reason: {0}")] BootloaderFailure(String), #[error("failed to validate the transaction. reason: {0}")] @@ -69,37 +71,36 @@ pub enum SubmitTxError { impl SubmitTxError { pub fn grafana_error_code(&self) -> &'static str { match self { - SubmitTxError::NonceIsTooHigh(_, _, _) => "nonce-is-too-high", - SubmitTxError::NonceIsTooLow(_, _, _) => "nonce-is-too-low", - SubmitTxError::IncorrectTx(_) => "incorrect-tx", - SubmitTxError::NotEnoughBalanceForFeeValue(_, _, _) => "not-enough-balance-for-fee", - SubmitTxError::ExecutionReverted(_, _) => "execution-reverted", - SubmitTxError::GasLimitIsTooBig => "gas-limit-is-too-big", - SubmitTxError::Unexecutable(_) => "unexecutable", - SubmitTxError::RateLimitExceeded => "rate-limit-exceeded", - SubmitTxError::BootloaderFailure(_) => "bootloader-failure", - SubmitTxError::ValidationFailed(_) => "validation-failed", - SubmitTxError::FailedToChargeFee(_) => "failed-too-charge-fee", - SubmitTxError::PaymasterValidationFailed(_) => "failed-paymaster-validation", - SubmitTxError::PrePaymasterPreparationFailed(_) => "failed-prepaymaster-preparation", - SubmitTxError::FromIsNotAnAccount => "from-is-not-an-account", - SubmitTxError::MaxFeePerGasTooLow => "max-fee-per-gas-too-low", - SubmitTxError::MaxPriorityFeeGreaterThanMaxFee => { - "max-priority-fee-greater-than-max-fee" - } - SubmitTxError::UnexpectedVMBehavior(_) => "unexpected-vm-behavior", - SubmitTxError::UnrealisticPubdataPriceLimit => "unrealistic-pubdata-price-limit", - SubmitTxError::TooManyFactoryDependencies(_, _) => "too-many-factory-dependencies", - SubmitTxError::FeePerGasTooHigh => "gas-price-limit-too-high", - SubmitTxError::FeePerPubdataByteTooHigh => "pubdata-price-limit-too-high", - SubmitTxError::InsufficientFundsForTransfer => "insufficient-funds-for-transfer", - SubmitTxError::IntrinsicGas => "intrinsic-gas", - SubmitTxError::ProxyError(_) => "proxy-error", + Self::NonceIsTooHigh(_, _, _) => "nonce-is-too-high", + Self::NonceIsTooLow(_, _, _) => "nonce-is-too-low", + Self::IncorrectTx(_) => "incorrect-tx", + Self::NotEnoughBalanceForFeeValue(_, _, _) => "not-enough-balance-for-fee", + Self::ExecutionReverted(_, _) => "execution-reverted", + Self::GasLimitIsTooBig => "gas-limit-is-too-big", + Self::Unexecutable(_) => "unexecutable", + Self::RateLimitExceeded => "rate-limit-exceeded", + Self::ServerShuttingDown => "shutting-down", + Self::BootloaderFailure(_) => "bootloader-failure", + Self::ValidationFailed(_) => "validation-failed", + Self::FailedToChargeFee(_) => "failed-too-charge-fee", + Self::PaymasterValidationFailed(_) => "failed-paymaster-validation", + Self::PrePaymasterPreparationFailed(_) => "failed-prepaymaster-preparation", + Self::FromIsNotAnAccount => "from-is-not-an-account", + Self::MaxFeePerGasTooLow => "max-fee-per-gas-too-low", + Self::MaxPriorityFeeGreaterThanMaxFee => "max-priority-fee-greater-than-max-fee", + Self::UnexpectedVMBehavior(_) => "unexpected-vm-behavior", + Self::UnrealisticPubdataPriceLimit => "unrealistic-pubdata-price-limit", + Self::TooManyFactoryDependencies(_, _) => "too-many-factory-dependencies", + Self::FeePerGasTooHigh => "gas-price-limit-too-high", + Self::FeePerPubdataByteTooHigh => "pubdata-price-limit-too-high", + Self::InsufficientFundsForTransfer => "insufficient-funds-for-transfer", + Self::IntrinsicGas => "intrinsic-gas", + Self::ProxyError(_) => "proxy-error", } } pub fn data(&self) -> Vec { - if let SubmitTxError::ExecutionReverted(_, data) = self { + if let Self::ExecutionReverted(_, data) = self { data.clone() } else { Vec::new() @@ -110,35 +111,29 @@ impl SubmitTxError { impl From for SubmitTxError { fn from(err: SandboxExecutionError) -> SubmitTxError { match err { - SandboxExecutionError::Revert(reason, data) => { - SubmitTxError::ExecutionReverted(reason, data) - } - SandboxExecutionError::BootloaderFailure(reason) => { - SubmitTxError::BootloaderFailure(reason) - } + SandboxExecutionError::Revert(reason, data) => Self::ExecutionReverted(reason, data), + SandboxExecutionError::BootloaderFailure(reason) => Self::BootloaderFailure(reason), SandboxExecutionError::AccountValidationFailed(reason) => { - SubmitTxError::ValidationFailed(reason) + Self::ValidationFailed(reason) } SandboxExecutionError::PaymasterValidationFailed(reason) => { - SubmitTxError::PaymasterValidationFailed(reason) + Self::PaymasterValidationFailed(reason) } SandboxExecutionError::PrePaymasterPreparationFailed(reason) => { - SubmitTxError::PrePaymasterPreparationFailed(reason) - } - SandboxExecutionError::FailedToChargeFee(reason) => { - SubmitTxError::FailedToChargeFee(reason) + Self::PrePaymasterPreparationFailed(reason) } - SandboxExecutionError::FromIsNotAnAccount => SubmitTxError::FromIsNotAnAccount, + SandboxExecutionError::FailedToChargeFee(reason) => Self::FailedToChargeFee(reason), + SandboxExecutionError::FromIsNotAnAccount => Self::FromIsNotAnAccount, SandboxExecutionError::InnerTxError => { - SubmitTxError::ExecutionReverted("Bootloader-based tx failed".to_owned(), vec![]) + Self::ExecutionReverted("Bootloader-based tx failed".to_owned(), vec![]) } SandboxExecutionError::UnexpectedVMBehavior(reason) => { - SubmitTxError::UnexpectedVMBehavior(reason) + Self::UnexpectedVMBehavior(reason) } SandboxExecutionError::FailedToPayForTransaction(reason) => { - SubmitTxError::FailedToChargeFee(reason) + Self::FailedToChargeFee(reason) } - SandboxExecutionError::Unexecutable(reason) => SubmitTxError::Unexecutable(reason), + SandboxExecutionError::Unexecutable(reason) => Self::Unexecutable(reason), } } } diff --git a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs index 88c0ed8bee90..c462f0cf8423 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs @@ -19,12 +19,9 @@ use vm::{ VmExecutionResult, }; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; -use zksync_contracts::{ - BaseSystemContracts, SystemContractCode, ESTIMATE_FEE_BLOCK_CODE, - PLAYGROUND_BLOCK_BOOTLOADER_CODE, -}; +use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool}; -use zksync_state::FactoryDepsCache; +use zksync_state::PostgresStorageCaches; use zksync_types::{ fee::{Fee, TransactionExecutionMetrics}, get_code_key, get_intrinsic_constants, @@ -34,7 +31,7 @@ use zksync_types::{ AccountTreeId, Address, ExecuteTransactionCommon, Nonce, StorageKey, Transaction, H160, H256, MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256, }; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_utils::h256_to_u256; // Local uses use crate::api_server::execution_sandbox::{ @@ -54,6 +51,32 @@ pub(super) use self::{error::SubmitTxError, proxy::TxProxy}; type TxSenderRateLimiter = RateLimiter>; +/// Smart contracts to be used in the API sandbox requests, e.g. for estimating gas and +/// performing `eth_call` requests. +#[derive(Debug, Clone)] +pub struct ApiContracts { + /// Contracts to be used when estimating gas. + /// These contracts (mainly, bootloader) normally should be tuned to provide accurate + /// execution metrics. + estimate_gas: BaseSystemContracts, + /// Contracts to be used when performing `eth_call` requests. + /// These contracts (mainly, bootloader) normally should be tuned to provide better UX + /// exeprience (e.g. revert messages). + eth_call: BaseSystemContracts, +} + +impl ApiContracts { + /// Loads the contracts from the local file system. + /// This method is *currently* preferred to be used in all contexts, + /// given that there is no way to fetch "playground" contracts from the main node. + pub fn load_from_disk() -> Self { + Self { + estimate_gas: BaseSystemContracts::estimate_gas(), + eth_call: BaseSystemContracts::playground(), + } + } +} + /// Builder for the `TxSender`. #[derive(Debug)] pub struct TxSenderBuilder { @@ -113,53 +136,26 @@ impl TxSenderBuilder { pub async fn build( self, l1_gas_price_source: Arc, - default_aa_hash: H256, vm_concurrency_limiter: Arc, - factory_deps_cache: FactoryDepsCache, + api_contracts: ApiContracts, + storage_caches: PostgresStorageCaches, ) -> TxSender { assert!( self.master_connection_pool.is_some() || self.proxy.is_some(), "Either master connection pool or proxy must be set" ); - let mut storage = self - .replica_connection_pool - .access_storage_tagged("api") - .await; - let default_aa_bytecode = storage - .storage_dal() - .get_factory_dep(default_aa_hash) - .await - .expect("Default AA hash must be present in the database"); - drop(storage); - - let default_aa_contract = SystemContractCode { - code: bytes_to_be_words(default_aa_bytecode), - hash: default_aa_hash, - }; - - let playground_base_system_contracts = BaseSystemContracts { - default_aa: default_aa_contract.clone(), - bootloader: PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(), - }; - - let estimate_fee_base_system_contracts = BaseSystemContracts { - default_aa: default_aa_contract, - bootloader: ESTIMATE_FEE_BLOCK_CODE.clone(), - }; - TxSender(Arc::new(TxSenderInner { sender_config: self.config, master_connection_pool: self.master_connection_pool, replica_connection_pool: self.replica_connection_pool, l1_gas_price_source, - playground_base_system_contracts, - estimate_fee_base_system_contracts, + api_contracts, rate_limiter: self.rate_limiter, proxy: self.proxy, state_keeper_config: self.state_keeper_config, vm_concurrency_limiter, - factory_deps_cache, + storage_caches, })) } } @@ -207,8 +203,7 @@ pub struct TxSenderInner { pub replica_connection_pool: ConnectionPool, // Used to keep track of gas prices for the fee ticker. pub l1_gas_price_source: Arc, - pub(super) playground_base_system_contracts: BaseSystemContracts, - estimate_fee_base_system_contracts: BaseSystemContracts, + pub(super) api_contracts: ApiContracts, /// Optional rate limiter that will limit the amount of transactions per second sent from a single entity. rate_limiter: Option, /// Optional transaction proxy to be used for transaction submission. @@ -219,11 +214,11 @@ pub struct TxSenderInner { state_keeper_config: Option, /// Used to limit the amount of VMs that can be executed simultaneously. pub(super) vm_concurrency_limiter: Arc, - // Smart contract source code cache. - pub(super) factory_deps_cache: FactoryDepsCache, + // Caches used in VM execution. + storage_caches: PostgresStorageCaches, } -pub struct TxSender(pub Arc>); +pub struct TxSender(pub(super) Arc>); // Custom implementation is required due to generic param: // Even though it's under `Arc`, compiler doesn't generate the `Clone` implementation unless @@ -241,6 +236,14 @@ impl std::fmt::Debug for TxSender { } impl TxSender { + pub(crate) fn vm_concurrency_limiter(&self) -> Arc { + Arc::clone(&self.0.vm_concurrency_limiter) + } + + pub(crate) fn storage_caches(&self) -> PostgresStorageCaches { + self.0.storage_caches.clone() + } + #[tracing::instrument(skip(self, tx))] pub async fn submit_tx(&self, tx: L2Tx) -> Result { if let Some(rate_limiter) = &self.0.rate_limiter { @@ -256,8 +259,10 @@ impl TxSender { let shared_args = self.shared_args(); let vm_permit = self.0.vm_concurrency_limiter.acquire().await; + let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; + let (_, tx_metrics) = execute_tx_with_pending_state( - &vm_permit, + vm_permit.clone(), shared_args.clone(), TxExecutionArgs::for_validation(&tx), self.0.replica_connection_pool.clone(), @@ -277,13 +282,12 @@ impl TxSender { let computational_gas_limit = self.0.sender_config.validation_computational_gas_limit; let validation_result = shared_args .validate_tx_with_pending_state( - &vm_permit, + vm_permit, self.0.replica_connection_pool.clone(), tx.clone(), computational_gas_limit, ) .await; - drop(vm_permit); // Unblock other VMs to enter. metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "3_verify_execute"); stage_started_at = Instant::now(); @@ -366,8 +370,8 @@ impl TxSender { operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), l1_gas_price: self.0.l1_gas_price_source.estimate_effective_gas_price(), fair_l2_gas_price: self.0.sender_config.fair_l2_gas_price, - base_system_contracts: self.0.playground_base_system_contracts.clone(), - factory_deps_cache: self.0.factory_deps_cache.clone(), + base_system_contracts: self.0.api_contracts.eth_call.clone(), + caches: self.storage_caches(), } } @@ -531,7 +535,7 @@ impl TxSender { #[allow(clippy::too_many_arguments)] async fn estimate_gas_step( &self, - vm_permit: &VmPermit<'_>, + vm_permit: VmPermit, mut tx: Transaction, gas_per_pubdata_byte: u64, tx_gas_limit: u32, @@ -557,6 +561,14 @@ impl TxSender { ExecuteTransactionCommon::L2(l2_common_data) => { l2_common_data.fee.gas_limit = gas_limit_with_overhead.into(); } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + common_data.gas_limit = gas_limit_with_overhead.into(); + + let required_funds = + common_data.gas_limit * common_data.max_fee_per_gas + tx.execute.value; + + common_data.to_mint = required_funds; + } } let shared_args = self.shared_args_for_gas_estimate(l1_gas_price); @@ -574,7 +586,9 @@ impl TxSender { .await; if let Err(err) = self.ensure_tx_executable(tx, &tx_metrics, false) { - let SubmitTxError::Unexecutable(message) = err else { unreachable!() }; + let SubmitTxError::Unexecutable(message) = err else { + unreachable!() + }; return Err(SandboxExecutionError::Unexecutable(message)); } exec_result @@ -585,8 +599,8 @@ impl TxSender { operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), l1_gas_price, fair_l2_gas_price: self.0.sender_config.fair_l2_gas_price, - base_system_contracts: self.0.estimate_fee_base_system_contracts.clone(), - factory_deps_cache: self.0.factory_deps_cache.clone(), + base_system_contracts: self.0.api_contracts.estimate_gas.clone(), + caches: self.storage_caches(), } } @@ -623,6 +637,9 @@ impl TxSender { ExecuteTransactionCommon::L1(common_data) => { common_data.max_fee_per_gas = base_fee.into(); } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + common_data.max_fee_per_gas = base_fee.into(); + } } let hashed_key = get_code_key(&tx.initiator_account()); @@ -662,6 +679,10 @@ impl TxSender { l2_common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); } + // Acquire the vm token for the whole duration of the binary search. + let vm_permit = self.0.vm_concurrency_limiter.acquire().await; + let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; + // We already know how many gas is needed to cover for the publishing of the bytecodes. // For L1->L2 transactions all the bytecodes have been made available on L1, so no funds need to be // spent on re-publishing those. @@ -669,11 +690,13 @@ impl TxSender { 0 } else { let pubdata_for_factory_deps = get_pubdata_for_factory_deps( + &vm_permit, &self.0.replica_connection_pool, tx.execute.factory_deps.as_deref().unwrap_or_default(), - self.0.factory_deps_cache.clone(), + self.storage_caches(), ) .await; + if pubdata_for_factory_deps > MAX_PUBDATA_PER_BLOCK { return Err(SubmitTxError::Unexecutable( "exceeds limit for published pubdata".to_string(), @@ -700,8 +723,6 @@ impl TxSender { estimation_started_at.elapsed(), ); - // Acquire the vm token for the whole duration of the binary search. - let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let mut number_of_iterations = 0usize; while lower_bound + acceptable_overestimation < upper_bound { let mid = (lower_bound + upper_bound) / 2; @@ -712,7 +733,7 @@ impl TxSender { let try_gas_limit = gas_for_bytecodes_pubdata + mid; let result = self .estimate_gas_step( - &vm_permit, + vm_permit.clone(), tx.clone(), gas_per_pubdata_byte, try_gas_limit, @@ -750,7 +771,7 @@ impl TxSender { let suggested_gas_limit = tx_body_gas_limit + gas_for_bytecodes_pubdata; let result = self .estimate_gas_step( - &vm_permit, + vm_permit, tx.clone(), gas_per_pubdata_byte, suggested_gas_limit, @@ -760,7 +781,6 @@ impl TxSender { ) .await; - drop(vm_permit); // Unblock other VMs to enter. match result { Err(err) => Err(err.into()), Ok(_) => { @@ -798,9 +818,11 @@ impl TxSender { tx: L2Tx, ) -> Result, SubmitTxError> { let vm_permit = self.0.vm_concurrency_limiter.acquire().await; + let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; + let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; let result = execute_tx_eth_call( - &vm_permit, + vm_permit, self.shared_args(), self.0.replica_connection_pool.clone(), tx, @@ -809,7 +831,6 @@ impl TxSender { false, ) .await?; - drop(vm_permit); // Unblock other VMs to enter. Ok(match result.revert_reason { Some(result) => result.original_data, diff --git a/core/bin/zksync_core/src/api_server/web3/api_health_check.rs b/core/bin/zksync_core/src/api_server/web3/api_health_check.rs deleted file mode 100644 index 08bd73ecd547..000000000000 --- a/core/bin/zksync_core/src/api_server/web3/api_health_check.rs +++ /dev/null @@ -1,26 +0,0 @@ -use async_trait::async_trait; -use tokio::sync::watch; -use zksync_health_check::{CheckHealth, CheckHealthStatus}; - -/// HealthCheck used to verify if the Api is ready. -/// Used in the /health endpoint -#[derive(Clone, Debug)] -pub struct ApiHealthCheck { - receiver: watch::Receiver, -} - -impl ApiHealthCheck { - pub(super) fn new(receiver: watch::Receiver) -> ApiHealthCheck { - ApiHealthCheck { receiver } - } -} - -#[async_trait] -impl CheckHealth for ApiHealthCheck { - async fn check_health(&self) -> CheckHealthStatus { - match *self.receiver.borrow() { - CheckHealthStatus::Ready => CheckHealthStatus::Ready, - CheckHealthStatus::NotReady(ref error) => CheckHealthStatus::NotReady(error.clone()), - } - } -} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs index 9cbe67e6c074..3775da78e41a 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs @@ -5,8 +5,9 @@ use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig, H256}, + api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, transaction_request::CallRequest, + H256, }; #[rpc] diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs index db2190919c5f..00ba9379ae5f 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs @@ -11,10 +11,9 @@ use zksync_types::{ TransactionVariant, }, transaction_request::CallRequest, - web3::types::{Index, SyncState}, + web3::types::{FeeHistory, Index, SyncState}, Address, Bytes, H256, U256, U64, }; -use zksync_web3_decl::error::Web3Error; use zksync_web3_decl::types::{Block, Filter, FilterChanges, Log}; // Local uses @@ -168,11 +167,13 @@ pub trait EthNamespaceT { #[rpc(name = "eth_mining")] fn mining(&self) -> BoxFuture>; - #[rpc(name = "eth_sendTransaction")] - fn send_transaction( + #[rpc(name = "eth_feeHistory")] + fn fee_history( &self, - transaction_request: zksync_types::web3::types::TransactionRequest, - ) -> BoxFuture>; + block_count: U64, + newest_block: BlockNumber, + reward_percentiles: Vec, + ) -> BoxFuture>; } impl EthNamespaceT for EthNamespace { @@ -492,21 +493,18 @@ impl EthNamespaceT for EthNamespa Box::pin(async move { Ok(self_.mining_impl()) }) } - fn send_transaction( + fn fee_history( &self, - _transaction_request: zksync_types::web3::types::TransactionRequest, - ) -> BoxFuture> { - #[cfg(feature = "openzeppelin_tests")] + block_count: U64, + newest_block: BlockNumber, + reward_percentiles: Vec, + ) -> BoxFuture> { let self_ = self.clone(); Box::pin(async move { - #[cfg(feature = "openzeppelin_tests")] - return self_ - .send_transaction_impl(_transaction_request) + self_ + .fee_history_impl(block_count, newest_block, reward_percentiles) .await - .map_err(into_jsrpc_error); - - #[cfg(not(feature = "openzeppelin_tests"))] - Err(into_jsrpc_error(Web3Error::NotImplemented)) + .map_err(into_jsrpc_error) }) } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs index fe0dc9a9d60b..765a85bea2d7 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs @@ -8,13 +8,14 @@ use jsonrpc_derive::rpc; // Workspace uses use zksync_types::{ - api::{BridgeAddresses, L2ToL1LogProof, TransactionDetails}, - explorer_api::{BlockDetails, L1BatchDetails}, + api::{ + BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, ProtocolVersion, + TransactionDetails, + }, fee::Fee, transaction_request::CallRequest, - Address, Bytes, L1BatchNumber, MiniblockNumber, H256, U256, U64, + Address, L1BatchNumber, MiniblockNumber, H256, U256, U64, }; -use zksync_web3_decl::error::Web3Error; use zksync_web3_decl::types::Token; // Local uses @@ -81,9 +82,6 @@ pub trait ZksNamespaceT { #[rpc(name = "zks_getL1BatchBlockRange")] fn get_miniblock_range(&self, batch: L1BatchNumber) -> BoxFuture>>; - #[rpc(name = "zks_setKnownBytecode")] - fn set_known_bytecode(&self, bytecode: Bytes) -> BoxFuture>; - #[rpc(name = "zks_getTransactionDetails")] fn get_transaction_details(&self, hash: H256) -> BoxFuture>>; @@ -104,6 +102,12 @@ pub trait ZksNamespaceT { #[rpc(name = "zks_getL1GasPrice")] fn get_l1_gas_price(&self) -> BoxFuture>; + + #[rpc(name = "zks_getProtocolVersion")] + fn get_protocol_version( + &self, + version_id: Option, + ) -> BoxFuture>>; } impl ZksNamespaceT for ZksNamespace { @@ -248,18 +252,6 @@ impl ZksNamespaceT for ZksNamespa }) } - fn set_known_bytecode(&self, _bytecode: Bytes) -> BoxFuture> { - #[cfg(feature = "openzeppelin_tests")] - let self_ = self.clone(); - Box::pin(async move { - #[cfg(feature = "openzeppelin_tests")] - return Ok(self_.set_known_bytecode_impl(_bytecode)); - - #[cfg(not(feature = "openzeppelin_tests"))] - Err(into_jsrpc_error(Web3Error::NotImplemented)) - }) - } - fn get_raw_block_transactions( &self, block_number: MiniblockNumber, @@ -295,4 +287,12 @@ impl ZksNamespaceT for ZksNamespa let self_ = self.clone(); Box::pin(async move { Ok(self_.get_l1_gas_price_impl()) }) } + + fn get_protocol_version( + &self, + version_id: Option, + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.get_protocol_version_impl(version_id).await) }) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs index 290c1c770ccd..4766b1e8878a 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs @@ -4,7 +4,7 @@ use zksync_types::{ TransactionReceipt, TransactionVariant, }, transaction_request::CallRequest, - web3::types::{Index, SyncState}, + web3::types::{FeeHistory, Index, SyncState}, Address, Bytes, H256, U256, U64, }; use zksync_web3_decl::{ @@ -227,4 +227,15 @@ impl EthNamespaceServer for EthNa async fn mining(&self) -> RpcResult { Ok(self.mining_impl()) } + + async fn fee_history( + &self, + block_count: U64, + newest_block: BlockNumber, + reward_percentiles: Vec, + ) -> RpcResult { + self.fee_history_impl(block_count, newest_block, reward_percentiles) + .await + .map_err(into_jsrpc_error) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index eef2575c21b4..61b95d723ea3 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -3,11 +3,13 @@ use bigdecimal::BigDecimal; use std::collections::HashMap; use zksync_types::{ - api::{BridgeAddresses, L2ToL1LogProof, TransactionDetails, U64}, - explorer_api::{BlockDetails, L1BatchDetails}, + api::{ + BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, ProtocolVersion, + TransactionDetails, + }, fee::Fee, transaction_request::CallRequest, - Address, L1BatchNumber, MiniblockNumber, H256, U256, + Address, L1BatchNumber, MiniblockNumber, H256, U256, U64, }; use zksync_web3_decl::{ jsonrpsee::core::{async_trait, RpcResult}, @@ -143,4 +145,11 @@ impl ZksNamespaceServer for ZksNa async fn get_l1_gas_price(&self) -> RpcResult { Ok(self.get_l1_gas_price_impl()) } + + async fn get_protocol_version( + &self, + version_id: Option, + ) -> RpcResult> { + Ok(self.get_protocol_version_impl(version_id).await) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/mod.rs b/core/bin/zksync_core/src/api_server/web3/mod.rs index 5c46cfbbda8b..8cf1c69c8041 100644 --- a/core/bin/zksync_core/src/api_server/web3/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/mod.rs @@ -1,40 +1,47 @@ -// Built-in uses -use std::collections::HashMap; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; - // External uses -use futures::channel::oneshot; -use futures::FutureExt; -use jsonrpc_core::IoHandler; +use futures::future; +use jsonrpc_core::{IoHandler, MetaIoHandler}; use jsonrpc_http_server::hyper; use jsonrpc_pubsub::PubSubHandler; +use serde::Deserialize; use tokio::sync::{watch, RwLock}; use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; +// Built-in uses +use std::{net::SocketAddr, sync::Arc, time::Duration}; + // Workspace uses use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_eth_signer::{EthereumSigner, PrivateKeySigner}; -use zksync_types::{api, Address, MiniblockNumber, H256}; +use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_types::{api, MiniblockNumber}; use zksync_web3_decl::{ error::Web3Error, - jsonrpsee::{server::ServerBuilder, RpcModule}, + jsonrpsee::{ + server::{BatchRequestConfig, ServerBuilder}, + RpcModule, + }, namespaces::{ DebugNamespaceServer, EnNamespaceServer, EthNamespaceServer, NetNamespaceServer, Web3NamespaceServer, ZksNamespaceServer, }, }; -use self::state::InternalApiConfig; -use crate::l1_gas_price::L1GasPriceProvider; -use crate::sync_layer::SyncState; - // Local uses -use super::tx_sender::TxSender; -use crate::api_server::web3::api_health_check::ApiHealthCheck; -use backend_jsonrpc::{ +use crate::{ + api_server::{execution_sandbox::VmConcurrencyBarrier, tx_sender::TxSender}, + l1_gas_price::L1GasPriceProvider, + sync_layer::SyncState, +}; + +pub mod backend_jsonrpc; +pub mod backend_jsonrpsee; +pub mod namespaces; +mod pubsub_notifier; +pub mod state; + +// Uses from submodules. +use self::backend_jsonrpc::{ error::internal_error, namespaces::{ debug::DebugNamespaceT, en::EnNamespaceT, eth::EthNamespaceT, net::NetNamespaceT, @@ -42,20 +49,15 @@ use backend_jsonrpc::{ }, pub_sub::Web3PubSub, }; -use namespaces::{ +use self::namespaces::{ DebugNamespace, EnNamespace, EthNamespace, EthSubscribe, NetNamespace, Web3Namespace, ZksNamespace, }; -use pubsub_notifier::{notify_blocks, notify_logs, notify_txs}; -use state::{Filters, RpcState}; -use zksync_health_check::CheckHealthStatus; +use self::pubsub_notifier::{notify_blocks, notify_logs, notify_txs}; +use self::state::{Filters, InternalApiConfig, RpcState}; -pub mod api_health_check; -pub mod backend_jsonrpc; -pub mod backend_jsonrpsee; -pub mod namespaces; -mod pubsub_notifier; -pub mod state; +/// Timeout for graceful shutdown logic within API servers. +const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5); #[derive(Debug, Clone, Copy)] enum ApiBackend { @@ -69,6 +71,39 @@ enum ApiTransport { Http(SocketAddr), } +#[derive(Debug, Deserialize, Clone, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum Namespace { + Eth, + Net, + Web3, + Debug, + Zks, + En, + Pubsub, +} + +impl Namespace { + pub const ALL: &'static [Namespace] = &[ + Namespace::Eth, + Namespace::Net, + Namespace::Web3, + Namespace::Debug, + Namespace::Zks, + Namespace::En, + Namespace::Pubsub, + ]; + + pub const NON_DEBUG: &'static [Namespace] = &[ + Namespace::Eth, + Namespace::Net, + Namespace::Web3, + Namespace::Zks, + Namespace::En, + Namespace::Pubsub, + ]; +} + #[derive(Debug)] pub struct ApiBuilder { backend: ApiBackend, @@ -76,14 +111,16 @@ pub struct ApiBuilder { config: InternalApiConfig, transport: Option, tx_sender: Option>, + vm_barrier: Option, filters_limit: Option, subscriptions_limit: Option, + batch_request_size_limit: Option, + response_body_size_limit: Option, sync_state: Option, threads: Option, vm_concurrency_limit: Option, polling_interval: Option, - accounts: HashMap, - debug_namespace_config: Option<(BaseSystemContractsHashes, u64, Option)>, + namespaces: Option>, } impl ApiBuilder { @@ -94,13 +131,15 @@ impl ApiBuilder { pool, sync_state: None, tx_sender: None, + vm_barrier: None, filters_limit: None, subscriptions_limit: None, + batch_request_size_limit: None, + response_body_size_limit: None, threads: None, vm_concurrency_limit: None, polling_interval: None, - debug_namespace_config: None, - accounts: Default::default(), + namespaces: None, config, } } @@ -112,13 +151,15 @@ impl ApiBuilder { pool, sync_state: None, tx_sender: None, + vm_barrier: None, filters_limit: None, subscriptions_limit: None, + batch_request_size_limit: None, + response_body_size_limit: None, threads: None, vm_concurrency_limit: None, polling_interval: None, - debug_namespace_config: None, - accounts: Default::default(), + namespaces: None, config, } } @@ -133,8 +174,13 @@ impl ApiBuilder { self } - pub fn with_tx_sender(mut self, tx_sender: TxSender) -> Self { + pub fn with_tx_sender( + mut self, + tx_sender: TxSender, + vm_barrier: VmConcurrencyBarrier, + ) -> Self { self.tx_sender = Some(tx_sender); + self.vm_barrier = Some(vm_barrier); self } @@ -148,6 +194,16 @@ impl ApiBuilder { self } + pub fn with_batch_request_size_limit(mut self, batch_request_size_limit: usize) -> Self { + self.batch_request_size_limit = Some(batch_request_size_limit); + self + } + + pub fn with_response_body_size_limit(mut self, response_body_size_limit: usize) -> Self { + self.response_body_size_limit = Some(response_body_size_limit); + self + } + pub fn with_sync_state(mut self, sync_state: SyncState) -> Self { self.sync_state = Some(sync_state); self @@ -168,34 +224,8 @@ impl ApiBuilder { self } - pub fn enable_debug_namespace( - mut self, - base_system_contract_hashes: BaseSystemContractsHashes, - fair_l2_gas_price: u64, - cache_misses_limit: Option, - ) -> Self { - self.debug_namespace_config = Some(( - base_system_contract_hashes, - fair_l2_gas_price, - cache_misses_limit, - )); - self - } - - pub fn enable_oz_tests(mut self, account_pks: Vec) -> Self { - if cfg!(feature = "openzeppelin_tests") { - self.accounts = account_pks - .into_iter() - .map(|pk| { - let signer = PrivateKeySigner::new(pk); - let address = futures::executor::block_on(signer.get_address()) - .expect("Failed to get address of a signer"); - (address, signer) - }) - .collect(); - } else { - vlog::info!("OpenZeppelin tests are not enabled, ignoring `enable_oz_tests` call"); - } + pub fn enable_api_namespaces(mut self, namespaces: Vec) -> Self { + self.namespaces = Some(namespaces); self } } @@ -210,42 +240,53 @@ impl ApiBuilder { tx_sender: self.tx_sender.clone().expect("TxSender is not provided"), sync_state: self.sync_state.clone(), api_config: self.config.clone(), - accounts: self.accounts.clone(), - #[cfg(feature = "openzeppelin_tests")] - known_bytecodes: Arc::new(RwLock::new(Default::default())), } } - async fn build_rpc_module(&self) -> RpcModule> { + async fn build_rpc_module(&self) -> RpcModule<()> { let zksync_network_id = self.config.l2_chain_id; let rpc_app = self.build_rpc_state(); - // Declare namespaces we have. - let eth = EthNamespace::new(rpc_app.clone()); - let net = NetNamespace::new(zksync_network_id); - let web3 = Web3Namespace; - let zks = ZksNamespace::new(rpc_app.clone()); - let en = EnNamespace::new(rpc_app.clone()); - // Collect all the methods into a single RPC module. - let mut rpc: RpcModule<_> = eth.into_rpc(); - rpc.merge(net.into_rpc()) - .expect("Can't merge net namespace"); - rpc.merge(web3.into_rpc()) - .expect("Can't merge web3 namespace"); - rpc.merge(zks.into_rpc()) - .expect("Can't merge zks namespace"); - rpc.merge(en.into_rpc()).expect("Can't merge en namespace"); - - if let Some((hashes, fair_l2_gas_price, cache_misses_limit)) = self.debug_namespace_config { + let namespaces = self.namespaces.as_ref().unwrap(); + let mut rpc = RpcModule::new(()); + if namespaces.contains(&Namespace::Eth) { + rpc.merge(EthNamespace::new(rpc_app.clone()).into_rpc()) + .expect("Can't merge eth namespace"); + } + if namespaces.contains(&Namespace::Net) { + rpc.merge(NetNamespace::new(zksync_network_id).into_rpc()) + .expect("Can't merge net namespace"); + } + if namespaces.contains(&Namespace::Web3) { + rpc.merge(Web3Namespace.into_rpc()) + .expect("Can't merge web3 namespace"); + } + if namespaces.contains(&Namespace::Zks) { + rpc.merge(ZksNamespace::new(rpc_app.clone()).into_rpc()) + .expect("Can't merge zks namespace"); + } + if namespaces.contains(&Namespace::En) { + rpc.merge(EnNamespace::new(rpc_app.clone()).into_rpc()) + .expect("Can't merge en namespace"); + } + if namespaces.contains(&Namespace::Debug) { + let hashes = BaseSystemContractsHashes { + default_aa: rpc_app.tx_sender.0.sender_config.default_aa, + bootloader: rpc_app.tx_sender.0.sender_config.bootloader, + }; rpc.merge( DebugNamespace::new( rpc_app.connection_pool, hashes, - fair_l2_gas_price, - cache_misses_limit, - rpc_app.tx_sender.0.vm_concurrency_limiter.clone(), - rpc_app.tx_sender.0.factory_deps_cache.clone(), + rpc_app.tx_sender.0.sender_config.fair_l2_gas_price, + rpc_app + .tx_sender + .0 + .sender_config + .vm_execution_cache_misses_limit, + rpc_app.tx_sender.vm_concurrency_limiter(), + rpc_app.tx_sender.storage_caches(), ) .await .into_rpc(), @@ -258,10 +299,26 @@ impl ApiBuilder { pub async fn build( mut self, stop_receiver: watch::Receiver, - ) -> (Vec>, ApiHealthCheck) { + ) -> (Vec>, ReactiveHealthCheck) { if self.filters_limit.is_none() { vlog::warn!("Filters limit is not set - unlimited filters are allowed"); } + + if self.namespaces.is_none() { + vlog::warn!("debug_ API namespace will be disabled by default in ApiBuilder"); + self.namespaces = Some(Namespace::NON_DEBUG.to_vec()); + } + + if self + .namespaces + .as_ref() + .unwrap() + .contains(&Namespace::Pubsub) + && matches!(&self.transport, Some(ApiTransport::Http(_))) + { + vlog::debug!("pubsub API is not supported for HTTP transport, ignoring"); + } + match (&self.transport, self.subscriptions_limit) { (Some(ApiTransport::WebSocket(_)), None) => { vlog::warn!( @@ -278,37 +335,38 @@ impl ApiBuilder { match (self.backend, self.transport.take()) { (ApiBackend::Jsonrpc, Some(ApiTransport::Http(addr))) => { - let (api_health_check, status_sender) = self.create_health_check(); + let (api_health_check, health_updater) = ReactiveHealthCheck::new("http_api"); ( vec![ - self.build_jsonrpc_http(addr, stop_receiver, status_sender) + self.build_jsonrpc_http(addr, stop_receiver, health_updater) .await, ], api_health_check, ) } (ApiBackend::Jsonrpc, Some(ApiTransport::WebSocket(addr))) => { - let (api_health_check, status_sender) = self.create_health_check(); + let (api_health_check, health_updater) = ReactiveHealthCheck::new("ws_api"); ( - self.build_jsonrpc_ws(addr, stop_receiver, status_sender), + self.build_jsonrpc_ws(addr, stop_receiver, health_updater) + .await, api_health_check, ) } (ApiBackend::Jsonrpsee, Some(ApiTransport::Http(addr))) => { - let (api_health_check, status_sender) = self.create_health_check(); + let (api_health_check, health_updater) = ReactiveHealthCheck::new("http_api"); ( vec![ - self.build_jsonrpsee_http(addr, stop_receiver, status_sender) + self.build_jsonrpsee_http(addr, stop_receiver, health_updater) .await, ], api_health_check, ) } (ApiBackend::Jsonrpsee, Some(ApiTransport::WebSocket(addr))) => { - let (api_health_check, status_sender) = self.create_health_check(); + let (api_health_check, health_updater) = ReactiveHealthCheck::new("ws_api"); ( vec![ - self.build_jsonrpsee_ws(addr, stop_receiver, status_sender) + self.build_jsonrpsee_ws(addr, stop_receiver, health_updater) .await, ], api_health_check, @@ -318,279 +376,355 @@ impl ApiBuilder { } } - fn create_health_check(&self) -> (ApiHealthCheck, watch::Sender) { - let (status_sender, receiver) = - watch::channel(CheckHealthStatus::NotReady("Api is not ready".into())); - (ApiHealthCheck::new(receiver), status_sender) - } - async fn build_jsonrpc_http( self, addr: SocketAddr, mut stop_receiver: watch::Receiver, - api_health_check: watch::Sender, + health_updater: HealthUpdater, ) -> tokio::task::JoinHandle<()> { - let io_handler = { - let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self.build_rpc_state(); - let mut io = IoHandler::new(); - io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); - io.extend_with(ZksNamespace::new(rpc_state.clone()).to_delegate()); - io.extend_with(EnNamespace::new(rpc_state.clone()).to_delegate()); - io.extend_with(Web3Namespace.to_delegate()); - io.extend_with(NetNamespace::new(zksync_network_id).to_delegate()); - if let Some((hashes, fair_l2_gas_price, cache_misses_limit)) = - self.debug_namespace_config - { - io.extend_with( - DebugNamespace::new( - rpc_state.connection_pool, - hashes, - fair_l2_gas_price, - cache_misses_limit, - rpc_state.tx_sender.0.vm_concurrency_limiter.clone(), - rpc_state.tx_sender.0.factory_deps_cache.clone(), - ) - .await - .to_delegate(), - ); - } + if self.batch_request_size_limit.is_some() { + vlog::info!("`batch_request_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); + } + if self.response_body_size_limit.is_some() { + vlog::info!("`response_body_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); + } - io - }; + let mut io_handler = IoHandler::new(); + self.extend_jsonrpc_methods(&mut io_handler).await; + let vm_barrier = self.vm_barrier.unwrap(); - let (sender, recv) = oneshot::channel::<()>(); - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .worker_threads(self.threads.unwrap()) - .build() - .unwrap(); + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("jsonrpc-http-worker") + .worker_threads(self.threads.unwrap()) + .build() + .unwrap(); + tokio::task::spawn_blocking(move || { let server = jsonrpc_http_server::ServerBuilder::new(io_handler) .threads(1) .event_loop_executor(runtime.handle().clone()) .start_http(&addr) .unwrap(); - let close_handler = server.close_handle(); - std::thread::spawn(move || { - let stop_signal = futures::executor::block_on(stop_receiver.changed()); - if stop_signal.is_ok() { - vlog::info!("Stop signal received, web3 HTTP JSON RPC API is shutting down"); - close_handler.close(); + let close_handle = server.close_handle(); + let closing_vm_barrier = vm_barrier.clone(); + runtime.handle().spawn(async move { + if stop_receiver.changed().await.is_ok() { + vlog::info!("Stop signal received, HTTP JSON-RPC server is shutting down"); + closing_vm_barrier.close(); + close_handle.close(); } }); - api_health_check.send(CheckHealthStatus::Ready).unwrap(); + + health_updater.update(HealthStatus::Ready.into()); server.wait(); - runtime.shutdown_timeout(Duration::from_secs(10)); + drop(health_updater); + vlog::info!("HTTP JSON-RPC server stopped"); + runtime.block_on(Self::wait_for_vm(vm_barrier, "HTTP")); + runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); + }) + } + + async fn wait_for_vm(vm_barrier: VmConcurrencyBarrier, transport: &str) { + let wait_for_vm = + tokio::time::timeout(GRACEFUL_SHUTDOWN_TIMEOUT, vm_barrier.wait_until_stopped()); + if wait_for_vm.await.is_err() { + vlog::warn!( + "VM execution on {transport} JSON-RPC server didn't stop after {GRACEFUL_SHUTDOWN_TIMEOUT:?}; \ + forcing shutdown anyway" + ); + } else { + vlog::info!("VM execution on {transport} JSON-RPC server stopped"); + } + } + + async fn extend_jsonrpc_methods(&self, io: &mut MetaIoHandler) + where + T: jsonrpc_core::Metadata, + { + let zksync_network_id = self.config.l2_chain_id; + let rpc_state = self.build_rpc_state(); + let namespaces = self.namespaces.as_ref().unwrap(); + if namespaces.contains(&Namespace::Eth) { + io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); + } + if namespaces.contains(&Namespace::Zks) { + io.extend_with(ZksNamespace::new(rpc_state.clone()).to_delegate()); + } + if namespaces.contains(&Namespace::En) { + io.extend_with(EnNamespace::new(rpc_state.clone()).to_delegate()); + } + if namespaces.contains(&Namespace::Web3) { + io.extend_with(Web3Namespace.to_delegate()); + } + if namespaces.contains(&Namespace::Net) { + io.extend_with(NetNamespace::new(zksync_network_id).to_delegate()); + } + if namespaces.contains(&Namespace::Debug) { + let hashes = BaseSystemContractsHashes { + default_aa: rpc_state.tx_sender.0.sender_config.default_aa, + bootloader: rpc_state.tx_sender.0.sender_config.bootloader, + }; + let debug_ns = DebugNamespace::new( + rpc_state.connection_pool, + hashes, + rpc_state.tx_sender.0.sender_config.fair_l2_gas_price, + rpc_state + .tx_sender + .0 + .sender_config + .vm_execution_cache_misses_limit, + rpc_state.tx_sender.vm_concurrency_limiter(), + rpc_state.tx_sender.storage_caches(), + ) + .await; + io.extend_with(debug_ns.to_delegate()); + } + } + + async fn build_jsonrpc_ws( + self, + addr: SocketAddr, + mut stop_receiver: watch::Receiver, + health_updater: HealthUpdater, + ) -> Vec> { + if self.batch_request_size_limit.is_some() { + vlog::info!("`batch_request_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); + } + if self.response_body_size_limit.is_some() { + vlog::info!("`response_body_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); + } + + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("jsonrpc-ws-worker") + .worker_threads(self.threads.unwrap()) + .build() + .unwrap(); + + let mut io_handler = PubSubHandler::default(); + let mut notify_handles = Vec::new(); + + if self + .namespaces + .as_ref() + .unwrap() + .contains(&Namespace::Pubsub) + { + let pub_sub = EthSubscribe::new(runtime.handle().clone()); + let polling_interval = self.polling_interval.expect("Polling interval is not set"); + notify_handles.extend([ + tokio::spawn(notify_blocks( + pub_sub.active_block_subs.clone(), + self.pool.clone(), + polling_interval, + stop_receiver.clone(), + )), + tokio::spawn(notify_txs( + pub_sub.active_tx_subs.clone(), + self.pool.clone(), + polling_interval, + stop_receiver.clone(), + )), + tokio::spawn(notify_logs( + pub_sub.active_log_subs.clone(), + self.pool.clone(), + polling_interval, + stop_receiver.clone(), + )), + ]); + io_handler.extend_with(pub_sub.to_delegate()); + } + + self.extend_jsonrpc_methods(&mut io_handler).await; - let _ = sender; + let max_connections = self.subscriptions_limit.unwrap_or(usize::MAX); + let vm_barrier = self.vm_barrier.unwrap(); + let server_handle = tokio::task::spawn_blocking(move || { + let server = jsonrpc_ws_server::ServerBuilder::with_meta_extractor( + io_handler, + |context: &jsonrpc_ws_server::RequestContext| { + Arc::new(jsonrpc_pubsub::Session::new(context.sender())) + }, + ) + .event_loop_executor(runtime.handle().clone()) + .max_connections(max_connections) + .session_stats(TrackOpenWsConnections) + .start(&addr) + .unwrap(); + + let close_handle = server.close_handle(); + let closing_vm_barrier = vm_barrier.clone(); + runtime.handle().spawn(async move { + if stop_receiver.changed().await.is_ok() { + vlog::info!("Stop signal received, WS JSON-RPC server is shutting down"); + closing_vm_barrier.close(); + close_handle.close(); + } + }); + + health_updater.update(HealthStatus::Ready.into()); + server.wait().unwrap(); + drop(health_updater); + vlog::info!("WS JSON-RPC server stopped"); + runtime.block_on(Self::wait_for_vm(vm_barrier, "WS")); + runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); }); - tokio::spawn(recv.map(drop)) + notify_handles.push(server_handle); + notify_handles } async fn build_jsonrpsee_http( self, addr: SocketAddr, - mut stop_receiver: watch::Receiver, - api_health_check: watch::Sender, + stop_receiver: watch::Receiver, + health_updater: HealthUpdater, ) -> tokio::task::JoinHandle<()> { let rpc = self.build_rpc_module().await; + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("jsonrpsee-http-worker") + .worker_threads(self.threads.unwrap()) + .build() + .unwrap(); + let vm_barrier = self.vm_barrier.unwrap(); + let batch_request_config = if let Some(limit) = self.batch_request_size_limit { + BatchRequestConfig::Limit(limit as u32) + } else { + BatchRequestConfig::Unlimited + }; + let response_body_size_limit = self + .response_body_size_limit + .map(|limit| limit as u32) + .unwrap_or(u32::MAX); // Start the server in a separate tokio runtime from a dedicated thread. - let (sender, recv) = oneshot::channel::<()>(); - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .worker_threads(self.threads.unwrap()) - .build() - .unwrap(); + tokio::task::spawn_blocking(move || { + runtime.block_on(Self::run_jsonrpsee_server( + true, + rpc, + addr, + stop_receiver, + health_updater, + vm_barrier, + batch_request_config, + response_body_size_limit, + )); + runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); + }) + } - // Setup CORS. - let cors = CorsLayer::new() + #[allow(clippy::too_many_arguments)] + async fn run_jsonrpsee_server( + is_http: bool, + rpc: RpcModule<()>, + addr: SocketAddr, + mut stop_receiver: watch::Receiver, + health_updater: HealthUpdater, + vm_barrier: VmConcurrencyBarrier, + batch_request_config: BatchRequestConfig, + response_body_size_limit: u32, + ) { + let transport = if is_http { "HTTP" } else { "WS" }; + // Setup CORS. + let cors = is_http.then(|| { + CorsLayer::new() // Allow `POST` when accessing the resource .allow_methods([hyper::Method::POST]) // Allow requests from any origin .allow_origin(tower_http::cors::Any) - .allow_headers([hyper::header::CONTENT_TYPE]); - - // Setup metrics for the number of in-flight txs. - let (in_flight_requests_layer, counter) = InFlightRequestsLayer::pair(); - runtime.spawn(counter.run_emitter(Duration::from_secs(10), |count| async move { - metrics::histogram!("api.web3.in_flight_requests", count as f64, "scheme" => "http"); - })); - - // Prepare middleware. - let middleware = tower::ServiceBuilder::new() - .layer(in_flight_requests_layer) - .layer(cors); - - runtime.block_on(async move { - let server = ServerBuilder::default() - .http_only() - .max_connections(5000) - .set_middleware(middleware) - .build(addr) - .await - .expect("Can't start the HTTP JSON RPC server"); - - let server_handle = server - .start(rpc) - .expect("Failed to start HTTP JSON RPC application"); - - let close_handle = server_handle.clone(); - tokio::spawn(async move { - if stop_receiver.changed().await.is_ok() { - vlog::info!( - "Stop signal received, web3 HTTP JSON RPC API is shutting down" - ); - close_handle.stop().unwrap(); - } - }); - api_health_check.send(CheckHealthStatus::Ready).unwrap(); - server_handle.stopped().await; - vlog::info!("HTTP JSON RPC API stopped"); + .allow_headers([hyper::header::CONTENT_TYPE]) + }); + // Setup metrics for the number of in-flight requests. + let (in_flight_requests, counter) = InFlightRequestsLayer::pair(); + tokio::spawn(counter.run_emitter(Duration::from_secs(10), move |count| { + metrics::histogram!("api.web3.in_flight_requests", count as f64, "scheme" => transport); + future::ready(()) + })); + // Assemble server middleware. + let middleware = tower::ServiceBuilder::new() + .layer(in_flight_requests) + .option_layer(cors); + + let server_builder = if is_http { + ServerBuilder::default().http_only().max_connections(5_000) + } else { + ServerBuilder::default().ws_only() + }; + + let server = server_builder + .set_batch_request_config(batch_request_config) + .set_middleware(middleware) + .max_response_body_size(response_body_size_limit) + .build(addr) + .await + .unwrap_or_else(|err| { + panic!("Failed building {} JSON-RPC server: {}", transport, err); }); - runtime.shutdown_timeout(Duration::from_secs(10)); - sender.send(()).unwrap(); + let server_handle = server.start(rpc); + + let close_handle = server_handle.clone(); + let closing_vm_barrier = vm_barrier.clone(); + tokio::spawn(async move { + if stop_receiver.changed().await.is_ok() { + vlog::info!("Stop signal received, {transport} JSON-RPC server is shutting down"); + closing_vm_barrier.close(); + close_handle.stop().ok(); + } }); + health_updater.update(HealthStatus::Ready.into()); - // Notifier for the rest of application about the end of the task. - tokio::spawn(recv.map(drop)) + server_handle.stopped().await; + drop(health_updater); + vlog::info!("{transport} JSON-RPC server stopped"); + Self::wait_for_vm(vm_barrier, transport).await; } async fn build_jsonrpsee_ws( self, addr: SocketAddr, - mut stop_receiver: watch::Receiver, - api_health_check: watch::Sender, + stop_receiver: watch::Receiver, + health_updater: HealthUpdater, ) -> tokio::task::JoinHandle<()> { vlog::warn!( "`eth_subscribe` is not implemented for jsonrpsee backend, use jsonrpc instead" ); let rpc = self.build_rpc_module().await; - - // Start the server in a separate tokio runtime from a dedicated thread. - let (sender, recv) = oneshot::channel::<()>(); - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .worker_threads(self.threads.unwrap()) - .build() - .unwrap(); - - runtime.block_on(async move { - let server = ServerBuilder::default() - .ws_only() - .build(addr) - .await - .expect("Can't start the WS JSON RPC server"); - - let server_handle = server - .start(rpc) - .expect("Failed to start WS JSON RPC application"); - - api_health_check.send(CheckHealthStatus::Ready).unwrap(); - let close_handle = server_handle.clone(); - tokio::spawn(async move { - if stop_receiver.changed().await.is_ok() { - vlog::info!("Stop signal received, web3 WS JSON RPC API is shutting down"); - close_handle.stop().unwrap(); - } - }); - server_handle.stopped().await; - }); - runtime.shutdown_timeout(Duration::from_secs(10)); - sender.send(()).unwrap(); - }); - - // Notifier for the rest of application about the end of the task. - tokio::spawn(recv.map(drop)) - } - - fn build_jsonrpc_ws( - self, - addr: SocketAddr, - mut stop_receiver: watch::Receiver, - api_health_check: watch::Sender, - ) -> Vec> { - let jsonrpc_runtime = tokio::runtime::Builder::new_multi_thread() + let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() + .thread_name("jsonrpsee-ws-worker") .worker_threads(self.threads.unwrap()) .build() .unwrap(); + let vm_barrier = self.vm_barrier.unwrap(); - let pub_sub = EthSubscribe::new(jsonrpc_runtime.handle().clone()); - let polling_interval = self.polling_interval.expect("Polling interval is not set"); - - let mut notify_handles = vec![ - tokio::spawn(notify_blocks( - pub_sub.active_block_subs.clone(), - self.pool.clone(), - polling_interval, - stop_receiver.clone(), - )), - tokio::spawn(notify_txs( - pub_sub.active_tx_subs.clone(), - self.pool.clone(), - polling_interval, - stop_receiver.clone(), - )), - tokio::spawn(notify_logs( - pub_sub.active_log_subs.clone(), - self.pool.clone(), - polling_interval, - stop_receiver.clone(), - )), - ]; - - let (sender, recv) = oneshot::channel::<()>(); - let io = { - let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self.build_rpc_state(); - let mut io = PubSubHandler::default(); - io.extend_with(pub_sub.to_delegate()); - io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); - io.extend_with(ZksNamespace::new(rpc_state.clone()).to_delegate()); - io.extend_with(EnNamespace::new(rpc_state).to_delegate()); - io.extend_with(Web3Namespace.to_delegate()); - io.extend_with(NetNamespace::new(zksync_network_id).to_delegate()); - io + let batch_request_config = if let Some(limit) = self.batch_request_size_limit { + BatchRequestConfig::Limit(limit as u32) + } else { + BatchRequestConfig::Unlimited }; + let response_body_size_limit = self + .response_body_size_limit + .map(|limit| limit as u32) + .unwrap_or(u32::MAX); - std::thread::spawn(move || { - let server = jsonrpc_ws_server::ServerBuilder::with_meta_extractor( - io, - |context: &jsonrpc_ws_server::RequestContext| { - Arc::new(jsonrpc_pubsub::Session::new(context.sender())) - }, - ) - .event_loop_executor(jsonrpc_runtime.handle().clone()) - .max_connections(self.subscriptions_limit.unwrap_or(usize::MAX)) - .session_stats(TrackOpenWsConnections) - .start(&addr) - .unwrap(); - let close_handler = server.close_handle(); - - std::thread::spawn(move || { - let stop_signal = futures::executor::block_on(stop_receiver.changed()); - if stop_signal.is_ok() { - close_handler.close(); - vlog::info!("Stop signal received, WS JSON RPC API is shutting down"); - } - }); - - api_health_check.send(CheckHealthStatus::Ready).unwrap(); - server.wait().unwrap(); - jsonrpc_runtime.shutdown_timeout(Duration::from_secs(10)); - let _ = sender; - }); - - notify_handles.push(tokio::spawn(recv.map(drop))); - notify_handles + // Start the server in a separate tokio runtime from a dedicated thread. + tokio::task::spawn_blocking(move || { + runtime.block_on(Self::run_jsonrpsee_server( + false, + rpc, + addr, + stop_receiver, + health_updater, + vm_barrier, + batch_request_config, + response_body_size_limit, + )); + runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); + }) } } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs index bd5cdf2c3ee2..da1af8238315 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -4,10 +4,11 @@ use zksync_contracts::{ BaseSystemContracts, BaseSystemContractsHashes, PLAYGROUND_BLOCK_BOOTLOADER_CODE, }; use zksync_dal::ConnectionPool; -use zksync_state::FactoryDepsCache; +use zksync_state::PostgresStorageCaches; use zksync_types::{ api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, - transaction_request::{l2_tx_from_call_req, CallRequest}, + l2::L2Tx, + transaction_request::CallRequest, vm_trace::{Call, VmTrace}, AccountTreeId, H256, USED_BOOTLOADER_MEMORY_BYTES, }; @@ -26,7 +27,7 @@ pub struct DebugNamespace { base_system_contracts: BaseSystemContracts, vm_execution_cache_misses_limit: Option, vm_concurrency_limiter: Arc, - factory_deps_cache: FactoryDepsCache, + storage_caches: PostgresStorageCaches, } impl DebugNamespace { @@ -36,7 +37,7 @@ impl DebugNamespace { fair_l2_gas_price: u64, vm_execution_cache_misses_limit: Option, vm_concurrency_limiter: Arc, - factory_deps_cache: FactoryDepsCache, + storage_caches: PostgresStorageCaches, ) -> Self { let mut storage = connection_pool.access_storage_tagged("api").await; @@ -57,28 +58,31 @@ impl DebugNamespace { base_system_contracts, vm_execution_cache_misses_limit, vm_concurrency_limiter, - factory_deps_cache, + storage_caches, } } #[tracing::instrument(skip(self))] pub async fn debug_trace_block_impl( &self, - block: BlockId, + block_id: BlockId, options: Option, ) -> Result, Web3Error> { const METHOD_NAME: &str = "debug_trace_block"; + let start = Instant::now(); let only_top_call = options .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); let mut connection = self.connection_pool.access_storage_tagged("api").await; - let block_number = resolve_block(&mut connection, block, METHOD_NAME).await?; + let block_number = resolve_block(&mut connection, block_id, METHOD_NAME).await?; let call_trace = connection .blocks_web3_dal() .get_trace_for_miniblock(block_number) .await; + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block_id.extract_block_tag()); + Ok(call_trace .into_iter() .map(|call_trace| { @@ -116,19 +120,20 @@ impl DebugNamespace { }) } - #[tracing::instrument(skip(self, request, block))] + #[tracing::instrument(skip(self, request, block_id))] pub async fn debug_trace_call_impl( &self, request: CallRequest, - block: Option, + block_id: Option, options: Option, ) -> Result { + const METHOD_NAME: &str = "debug_trace_call"; let start = Instant::now(); let only_top_call = options .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); let mut connection = self.connection_pool.access_storage_tagged("api").await; let block_args = BlockArgs::new(&mut connection, block) .await @@ -136,13 +141,15 @@ impl DebugNamespace { .ok_or(Web3Error::NoBlock)?; drop(connection); - let tx = l2_tx_from_call_req(request, USED_BOOTLOADER_MEMORY_BYTES)?; + let tx = L2Tx::from_request(request.into(), USED_BOOTLOADER_MEMORY_BYTES)?; let shared_args = self.shared_args(); let vm_permit = self.vm_concurrency_limiter.acquire().await; + let vm_permit = vm_permit.ok_or(Web3Error::InternalError)?; + // We don't need properly trace if we only need top call let result = execute_tx_eth_call( - &vm_permit, + vm_permit, shared_args, self.connection_pool.clone(), tx.clone(), @@ -155,7 +162,6 @@ impl DebugNamespace { let submit_tx_error = SubmitTxError::from(err); Web3Error::SubmitTransactionError(submit_tx_error.to_string(), submit_tx_error.data()) })?; - drop(vm_permit); // Unblock other VMs to enter. let (output, revert_reason) = match result.revert_reason { Some(result) => (vec![], Some(result.revert_reason.to_string())), @@ -182,7 +188,8 @@ impl DebugNamespace { trace, ); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => "debug_trace_call"); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); + Ok(call.into()) } @@ -192,7 +199,7 @@ impl DebugNamespace { l1_gas_price: 100_000, fair_l2_gas_price: self.fair_l2_gas_price, base_system_contracts: self.base_system_contracts.clone(), - factory_deps_cache: self.factory_deps_cache.clone(), + caches: self.storage_caches.clone(), } } } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs index 10c60dba0cce..16f9a57daad4 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -1,16 +1,22 @@ -use itertools::Itertools; - use std::time::Instant; +use crate::{ + api_server::{ + execution_sandbox::BlockArgs, + web3::{backend_jsonrpc::error::internal_error, resolve_block, state::RpcState}, + }, + l1_gas_price::L1GasPriceProvider, +}; use zksync_types::{ api::{ BlockId, BlockNumber, GetLogsFilter, Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, l2::{L2Tx, TransactionType}, - transaction_request::{l2_tx_from_call_req, CallRequest}, + transaction_request::CallRequest, utils::decompose_full_nonce, - web3::types::{SyncInfo, SyncState}, + web3, + web3::types::{FeeHistory, SyncInfo, SyncState}, AccountTreeId, Bytes, MiniblockNumber, StorageKey, H256, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, U256, }; @@ -19,24 +25,6 @@ use zksync_web3_decl::{ error::Web3Error, types::{Address, Block, Filter, FilterChanges, Log, TypedFilter, U64}, }; -#[cfg(feature = "openzeppelin_tests")] -use { - zksync_eth_signer::EthereumSigner, - zksync_types::{ - api::TransactionRequest, storage::CONTRACT_DEPLOYER_ADDRESS, - transaction_request::Eip712Meta, web3::contract::tokens::Tokenizable, Eip712Domain, - EIP_712_TX_TYPE, - }, - zksync_utils::bytecode::hash_bytecode, -}; - -use crate::{ - api_server::{ - execution_sandbox::BlockArgs, - web3::{backend_jsonrpc::error::internal_error, resolve_block, state::RpcState}, - }, - l1_gas_price::L1GasPriceProvider, -}; pub const EVENT_TOPIC_NUMBER_LIMIT: usize = 4; pub const PROTOCOL_VERSION: &str = "zks/1"; @@ -79,15 +67,16 @@ impl EthNamespace { block_number } - #[tracing::instrument(skip(self, request, block))] + #[tracing::instrument(skip(self, request, block_id))] pub async fn call_impl( &self, request: CallRequest, - block: Option, + block_id: Option, ) -> Result { + const METHOD_NAME: &str = "call"; let start = Instant::now(); - let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); let mut connection = self .state .connection_pool @@ -99,33 +88,14 @@ impl EthNamespace { .ok_or(Web3Error::NoBlock)?; drop(connection); - let mut request_with_set_nonce = request.clone(); - self.state - .set_nonce_for_call_request(&mut request_with_set_nonce) - .await?; - - #[cfg(not(feature = "openzeppelin_tests"))] - let tx = l2_tx_from_call_req(request, self.state.api_config.max_tx_size)?; - #[cfg(feature = "openzeppelin_tests")] - let tx: L2Tx = self - .convert_evm_like_deploy_requests(tx_req_from_call_req( - request, - self.state.api_config.max_tx_size, - )?)? - .try_into()?; + let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; let call_result = self.state.tx_sender.eth_call(block_args, tx).await; - let mut res_bytes = call_result + let res_bytes = call_result .map_err(|err| Web3Error::SubmitTransactionError(err.to_string(), err.data()))?; - if cfg!(feature = "openzeppelin_tests") - && res_bytes.len() >= 100 - && hex::encode(&res_bytes[96..100]).as_str() == "08c379a0" - { - res_bytes = res_bytes[96..].to_vec(); - } + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => "call"); Ok(res_bytes.into()) } @@ -152,20 +122,11 @@ impl EthNamespace { .eip712_meta .is_some(); - #[cfg(not(feature = "openzeppelin_tests"))] - let mut tx: L2Tx = l2_tx_from_call_req( - request_with_gas_per_pubdata_overridden, + let mut tx: L2Tx = L2Tx::from_request( + request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, )?; - #[cfg(feature = "openzeppelin_tests")] - let mut tx: L2Tx = self - .convert_evm_like_deploy_requests(tx_req_from_call_req( - request_with_gas_per_pubdata_overridden, - self.state.api_config.max_tx_size, - )?)? - .try_into()?; - // The user may not include the proper transaction type during the estimation of // the gas fee. However, it is needed for the bootloader checks to pass properly. if is_eip712 { @@ -200,7 +161,9 @@ impl EthNamespace { let start = Instant::now(); let price = self.state.tx_sender.gas_price(); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + Ok(price.into()) } @@ -208,7 +171,7 @@ impl EthNamespace { pub async fn get_balance_impl( &self, address: Address, - block: Option, + block_id: Option, ) -> Result { const METHOD_NAME: &str = "get_balance"; @@ -218,7 +181,7 @@ impl EthNamespace { .connection_pool .access_storage_tagged("api") .await; - let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); let block_number = resolve_block(&mut connection, block, METHOD_NAME).await?; let balance = connection .storage_web3_dal() @@ -229,7 +192,9 @@ impl EthNamespace { ) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); + Ok(balance) } @@ -281,7 +246,7 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub async fn get_block_impl( &self, - block: BlockId, + block_id: BlockId, full_transactions: bool, ) -> Result>, Web3Error> { let start = Instant::now(); @@ -297,18 +262,23 @@ impl EthNamespace { .access_storage_tagged("api") .await .blocks_web3_dal() - .get_block_by_web3_block_id(block, full_transactions, self.state.api_config.l2_chain_id) + .get_block_by_web3_block_id( + block_id, + full_transactions, + self.state.api_config.l2_chain_id, + ) .await .map_err(|err| internal_error(method_name, err)); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => method_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => method_name, "block_id" => block_id.extract_block_tag()); + block } #[tracing::instrument(skip(self))] pub async fn get_block_transaction_count_impl( &self, - block: BlockId, + block_id: BlockId, ) -> Result, Web3Error> { const METHOD_NAME: &str = "get_block_transaction_count"; @@ -319,11 +289,12 @@ impl EthNamespace { .access_storage_tagged("api") .await .blocks_web3_dal() - .get_block_tx_count(block) + .get_block_tx_count(block_id) .await .map_err(|err| internal_error(METHOD_NAME, err)); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block_id.extract_block_tag()); + tx_count } @@ -331,7 +302,7 @@ impl EthNamespace { pub async fn get_code_impl( &self, address: Address, - block: Option, + block_id: Option, ) -> Result { const METHOD_NAME: &str = "get_code"; @@ -341,7 +312,7 @@ impl EthNamespace { .connection_pool .access_storage_tagged("api") .await; - let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); let block_number = resolve_block(&mut connection, block, METHOD_NAME).await?; let contract_code = connection .storage_web3_dal() @@ -349,7 +320,8 @@ impl EthNamespace { .await .map_err(|err| internal_error(METHOD_NAME, err))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); + Ok(contract_code.unwrap_or_default().into()) } @@ -363,12 +335,12 @@ impl EthNamespace { &self, address: Address, idx: U256, - block: Option, + block_id: Option, ) -> Result { const METHOD_NAME: &str = "get_storage_at"; let start = Instant::now(); - let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx)); let mut connection = self .state @@ -382,7 +354,8 @@ impl EthNamespace { .await .map_err(|err| internal_error(METHOD_NAME, err))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); + Ok(value) } @@ -391,15 +364,16 @@ impl EthNamespace { pub async fn get_transaction_count_impl( &self, address: Address, - block: Option, + block_id: Option, ) -> Result { let start = Instant::now(); - let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); let method_name = match block { BlockId::Number(BlockNumber::Pending) => "get_pending_transaction_count", _ => "get_historical_transaction_count", }; + let mut connection = self .state .connection_pool @@ -424,7 +398,8 @@ impl EthNamespace { let account_nonce = full_nonce.map(|nonce| decompose_full_nonce(nonce).0); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => method_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => method_name, "block_id" => block.extract_block_tag()); + account_nonce } @@ -657,7 +632,7 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub fn accounts_impl(&self) -> Vec
{ - self.state.accounts.keys().cloned().sorted().collect() + Vec::new() } #[tracing::instrument(skip(self))] @@ -679,6 +654,58 @@ impl EthNamespace { } } + #[tracing::instrument(skip(self))] + pub async fn fee_history_impl( + &self, + block_count: U64, + newest_block: BlockNumber, + _reward_percentiles: Vec, + ) -> Result { + const METHOD_NAME: &str = "fee_history"; + + let start = Instant::now(); + + // Limit `block_count`. + let block_count = block_count + .as_u64() + .min(self.state.api_config.fee_history_limit) + .max(1); + + let mut connection = self + .state + .connection_pool + .access_storage_tagged("api") + .await; + let newest_miniblock = + resolve_block(&mut connection, BlockId::Number(newest_block), METHOD_NAME).await?; + + let mut base_fee_per_gas = connection + .blocks_web3_dal() + .get_fee_history(newest_miniblock, block_count) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; + // DAL method returns fees in DESC order while we need ASC. + base_fee_per_gas.reverse(); + + let oldest_block = newest_miniblock.0 + 1 - base_fee_per_gas.len() as u32; + // We do not store gas used ratio for blocks, returns array of zeroes as a placeholder. + let gas_used_ratio = vec![0.0; base_fee_per_gas.len()]; + // Effective priority gas price is currently 0, returns `reward: null` as a placeholder. + let reward = None; + + // `base_fee_per_gas` for next miniblock cannot be calculated, appending last fee as a placeholder. + base_fee_per_gas.push(*base_fee_per_gas.last().unwrap()); + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => newest_block.to_string()); + + Ok(FeeHistory { + oldest_block: web3::types::BlockNumber::Number(oldest_block.into()), + base_fee_per_gas, + gas_used_ratio, + reward, + }) + } + #[tracing::instrument(skip(self, typed_filter))] async fn filter_changes( &self, @@ -795,129 +822,6 @@ impl EthNamespace { Ok(res) } - - #[cfg(feature = "openzeppelin_tests")] - pub fn send_transaction_impl( - &self, - transaction_request: zksync_types::web3::types::TransactionRequest, - ) -> Result { - let nonce = if let Some(nonce) = transaction_request.nonce { - nonce - } else { - self.state - .connection_pool - .access_storage_tagged("api") - .await - .transactions_web3_dal() - .next_nonce_by_initiator_account(transaction_request.from) - .map_err(|err| internal_error("send_transaction", err))? - }; - let mut eip712_meta = Eip712Meta::default(); - eip712_meta.gas_per_pubdata = U256::from(MAX_GAS_PER_PUBDATA_BYTE); - let fair_l2_gas_price = self.state.tx_sender.0.state_keeper_config.fair_l2_gas_price; - let transaction_request = TransactionRequest { - nonce, - from: Some(transaction_request.from), - to: transaction_request.to, - value: transaction_request.value.unwrap_or(U256::from(0)), - gas_price: U256::from(fair_l2_gas_price), - gas: transaction_request.gas.unwrap(), - max_priority_fee_per_gas: Some(U256::from(fair_l2_gas_price)), - input: transaction_request.data.unwrap_or_default(), - v: None, - r: None, - s: None, - raw: None, - transaction_type: Some(EIP_712_TX_TYPE.into()), - access_list: None, - eip712_meta: Some(eip712_meta), - chain_id: None, - }; - let transaction_request = self.convert_evm_like_deploy_requests(transaction_request)?; - - let bytes = if let Some(signer) = transaction_request - .from - .and_then(|from| self.state.accounts.get(&from).cloned()) - { - let chain_id = self.state.api_config.l2_chain_id; - let domain = Eip712Domain::new(chain_id); - let signature = signer - .sign_typed_data(&domain, &transaction_request) - .await - .map_err(|err| internal_error("send_transaction", err))?; - - let encoded_tx = transaction_request.get_signed_bytes(&signature, chain_id); - Bytes(encoded_tx) - } else { - return Err(internal_error("send_transaction", "Account not found")); - }; - - self.send_raw_transaction_impl(bytes) - } - - #[cfg(feature = "openzeppelin_tests")] - /// Converts EVM-like transaction requests of deploying contracts to zkEVM format. - /// These feature is needed to run openzeppelin tests - /// because they use `truffle` which uses `web3.js` to generate transaction requests. - /// Note, that we can remove this method when ZkSync support - /// will be added for `truffle`. - fn convert_evm_like_deploy_requests( - &self, - mut transaction_request: TransactionRequest, - ) -> Result { - if transaction_request.to.unwrap_or(Address::zero()) == Address::zero() { - transaction_request.to = Some(CONTRACT_DEPLOYER_ADDRESS); - transaction_request.transaction_type = Some(EIP_712_TX_TYPE.into()); - - const BYTECODE_CHUNK_LEN: usize = 32; - - let data = transaction_request.input.0; - let (bytecode, constructor_calldata) = - data.split_at(data.len() / BYTECODE_CHUNK_LEN * BYTECODE_CHUNK_LEN); - let mut bytecode = bytecode.to_vec(); - let mut constructor_calldata = constructor_calldata.to_vec(); - let lock = self.state.known_bytecodes.read().unwrap(); - while !lock.contains(&bytecode) { - if bytecode.len() < BYTECODE_CHUNK_LEN { - return Err(internal_error( - "convert_evm_like_deploy_requests", - "Bytecode not found", - )); - } - let (new_bytecode, new_constructor_part) = - bytecode.split_at(bytecode.len() - BYTECODE_CHUNK_LEN); - constructor_calldata = new_constructor_part - .iter() - .chain(constructor_calldata.iter()) - .cloned() - .collect(); - bytecode = new_bytecode.to_vec(); - } - drop(lock); - - let mut eip712_meta = Eip712Meta::default(); - eip712_meta.gas_per_pubdata = U256::from(MAX_GAS_PER_PUBDATA_BYTE); - eip712_meta.factory_deps = Some(vec![bytecode.clone()]); - transaction_request.eip712_meta = Some(eip712_meta); - - let salt = H256::zero(); - let bytecode_hash = hash_bytecode(&bytecode); - - let deployer = zksync_contracts::deployer_contract(); - transaction_request.input = Bytes( - deployer - .function("create") - .unwrap() - .encode_input(&[ - salt.into_token(), - bytecode_hash.into_token(), - constructor_calldata.into_token(), - ]) - .unwrap(), - ); - } - Ok(transaction_request) - } } // Bogus methods. diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs index 4cade2ff1953..b5a71432967d 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs @@ -49,13 +49,14 @@ impl EthSubscribe { } } + /// Assigns ID for the subscriber if the connection is open, returns error otherwise. fn assign_id( subscriber: typed::Subscriber, - ) -> (typed::Sink, SubscriptionId) { + ) -> Result<(typed::Sink, SubscriptionId), ()> { let id = H128::random(); let sub_id = SubscriptionId::String(format!("0x{}", hex::encode(id.0))); - let sink = subscriber.assign_id(sub_id.clone()).unwrap(); - (sink, sub_id) + let sink = subscriber.assign_id(sub_id.clone())?; + Ok((sink, sub_id)) } fn reject(subscriber: typed::Subscriber) { @@ -78,13 +79,17 @@ impl EthSubscribe { let sub_type = match sub_type.as_str() { "newHeads" => { let mut block_subs = self.active_block_subs.write().await; - let (sink, id) = Self::assign_id(subscriber); + let Ok((sink, id)) = Self::assign_id(subscriber) else { + return; + }; block_subs.insert(id, sink); Some(SubscriptionType::Blocks) } "newPendingTransactions" => { let mut tx_subs = self.active_tx_subs.write().await; - let (sink, id) = Self::assign_id(subscriber); + let Ok((sink, id)) = Self::assign_id(subscriber) else { + return; + }; tx_subs.insert(id, sink); Some(SubscriptionType::Txs) } @@ -104,7 +109,9 @@ impl EthSubscribe { None } else { let mut log_subs = self.active_log_subs.write().await; - let (sink, id) = Self::assign_id(subscriber); + let Ok((sink, id)) = Self::assign_id(subscriber) else { + return; + }; log_subs.insert(id, (sink, filter)); Some(SubscriptionType::Logs) } @@ -116,7 +123,9 @@ impl EthSubscribe { } } "syncing" => { - let (sink, _) = Self::assign_id(subscriber); + let Ok((sink, _id)) = Self::assign_id(subscriber) else { + return; + }; let _ = sink.notify(Ok(PubSubResult::Syncing(false))); None } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs index f6117c48dba0..0c8905489017 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -5,19 +5,20 @@ use bigdecimal::{BigDecimal, Zero}; use zksync_mini_merkle_tree::MiniMerkleTree; -#[cfg(feature = "openzeppelin_tests")] -use zksync_types::Bytes; +use zksync_types::l2::L2Tx; use zksync_types::{ - api::{BridgeAddresses, GetLogsFilter, L2ToL1LogProof, TransactionDetails, U64}, + api::{ + BlockDetails, BridgeAddresses, GetLogsFilter, L1BatchDetails, L2ToL1LogProof, + ProtocolVersion, TransactionDetails, + }, commitment::SerializeCommitment, - explorer_api::{BlockDetails, L1BatchDetails}, fee::Fee, l1::L1Tx, l2_to_l1_log::L2ToL1Log, tokens::ETHEREUM_ADDRESS, - transaction_request::{l2_tx_from_call_req, CallRequest}, + transaction_request::CallRequest, L1BatchNumber, MiniblockNumber, Transaction, L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, - MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, + MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, U64, }; use zksync_utils::address_to_h256; use zksync_web3_decl::{ @@ -61,8 +62,8 @@ impl ZksNamespace { eip712_meta.gas_per_pubdata = MAX_GAS_PER_PUBDATA_BYTE.into(); } - let mut tx = l2_tx_from_call_req( - request_with_gas_per_pubdata_overridden, + let mut tx = L2Tx::from_request( + request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, )?; @@ -214,17 +215,16 @@ impl ZksNamespace { .connection_pool .access_storage_tagged("api") .await - .explorer() .accounts_dal() .get_balances_for_address(address) .await .map_err(|err| internal_error(METHOD_NAME, err))? .into_iter() - .map(|(address, balance_item)| { + .map(|(address, balance)| { if address == L2_ETH_TOKEN_ADDRESS { - (ETHEREUM_ADDRESS, balance_item.balance) + (ETHEREUM_ADDRESS, balance) } else { - (address, balance_item.balance) + (address, balance) } }) .collect(); @@ -265,13 +265,13 @@ impl ZksNamespace { .map_err(|err| internal_error(METHOD_NAME, err))? .expect("L1 batch should contain at least one miniblock"); - let all_l1_logs_in_block = storage + let all_l1_logs_in_batch = storage .blocks_web3_dal() .get_l2_to_l1_logs(l1_batch_number) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - // Position of l1 log in block relative to logs with identical data + // Position of l1 log in L1 batch relative to logs with identical data let l1_log_relative_position = if let Some(l2_log_position) = l2_log_position { let pos = storage .events_web3_dal() @@ -301,7 +301,7 @@ impl ZksNamespace { 0 }; - let l1_log_index = match all_l1_logs_in_block + let l1_log_index = match all_l1_logs_in_batch .iter() .enumerate() .filter(|(_, log)| { @@ -317,8 +317,8 @@ impl ZksNamespace { } }; - let merkle_tree_leaves = all_l1_logs_in_block.iter().map(L2ToL1Log::to_bytes); - let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_BLOCK) + let merkle_tree_leaves = all_l1_logs_in_batch.iter().map(L2ToL1Log::to_bytes); + let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH) .merkle_root_and_path(l1_log_index); let msg_proof = L2ToL1LogProof { proof, @@ -353,13 +353,13 @@ impl ZksNamespace { None => return Ok(None), }; - let all_l1_logs_in_block = storage + let all_l1_logs_in_batch = storage .blocks_web3_dal() .get_l2_to_l1_logs(l1_batch_number) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - let l1_log_index = match all_l1_logs_in_block + let l1_log_index = match all_l1_logs_in_batch .iter() .enumerate() .filter(|(_, log)| log.tx_number_in_block == l1_batch_tx_index) @@ -371,8 +371,8 @@ impl ZksNamespace { } }; - let merkle_tree_leaves = all_l1_logs_in_block.iter().map(L2ToL1Log::to_bytes); - let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_BLOCK) + let merkle_tree_leaves = all_l1_logs_in_batch.iter().map(L2ToL1Log::to_bytes); + let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH) .merkle_root_and_path(l1_log_index); let msg_proof = L2ToL1LogProof { proof, @@ -440,8 +440,7 @@ impl ZksNamespace { .connection_pool .access_storage_tagged("api") .await - .explorer() - .blocks_dal() + .blocks_web3_dal() .get_block_details( block_number, self.state.tx_sender.0.sender_config.fee_account_addr, @@ -522,8 +521,7 @@ impl ZksNamespace { .connection_pool .access_storage_tagged("api") .await - .explorer() - .blocks_dal() + .blocks_web3_dal() .get_l1_batch_details(batch_number) .await .map_err(|err| internal_error(METHOD_NAME, err)); @@ -566,11 +564,36 @@ impl ZksNamespace { gas_price.into() } - #[cfg(feature = "openzeppelin_tests")] - /// Saves contract bytecode to memory. - pub fn set_known_bytecode_impl(&self, bytecode: Bytes) -> bool { - let mut lock = self.state.known_bytecodes.write().unwrap(); - lock.insert(bytecode.0.clone()); - true + #[tracing::instrument(skip(self))] + pub async fn get_protocol_version_impl( + &self, + version_id: Option, + ) -> Option { + let start = Instant::now(); + const METHOD_NAME: &str = "get_protocol_version"; + + let protocol_version = match version_id { + Some(id) => { + self.state + .connection_pool + .access_storage() + .await + .protocol_versions_web3_dal() + .get_protocol_version_by_id(id) + .await + } + None => Some( + self.state + .connection_pool + .access_storage() + .await + .protocol_versions_web3_dal() + .get_latest_protocol_version() + .await, + ), + }; + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + + protocol_version } } diff --git a/core/bin/zksync_core/src/api_server/web3/state.rs b/core/bin/zksync_core/src/api_server/web3/state.rs index c2c3003e5d51..63d28621968f 100644 --- a/core/bin/zksync_core/src/api_server/web3/state.rs +++ b/core/bin/zksync_core/src/api_server/web3/state.rs @@ -1,7 +1,4 @@ use std::collections::HashMap; -#[cfg(feature = "openzeppelin_tests")] -use std::collections::HashSet; -use std::convert::TryInto; use std::sync::Arc; use tokio::sync::RwLock; @@ -12,7 +9,6 @@ use crate::api_server::web3::{backend_jsonrpc::error::internal_error, resolve_bl use crate::sync_layer::SyncState; use zksync_dal::ConnectionPool; -use zksync_eth_signer::PrivateKeySigner; use zksync_types::{ api, l2::L2Tx, transaction_request::CallRequest, Address, L1ChainId, L2ChainId, @@ -38,6 +34,7 @@ pub struct InternalApiConfig { pub diamond_proxy_addr: Address, pub l2_testnet_paymaster_addr: Option
, pub req_entities_limit: usize, + pub fee_history_limit: u64, } impl InternalApiConfig { @@ -62,6 +59,7 @@ impl InternalApiConfig { diamond_proxy_addr: contracts_config.diamond_proxy_addr, l2_testnet_paymaster_addr: contracts_config.l2_testnet_paymaster_addr, req_entities_limit: web3_config.req_entities_limit(), + fee_history_limit: web3_config.fee_history_limit(), } } } @@ -74,9 +72,6 @@ pub struct RpcState { pub tx_sender: TxSender, pub sync_state: Option, pub(super) api_config: InternalApiConfig, - pub accounts: HashMap, - #[cfg(feature = "openzeppelin_tests")] - pub known_bytecodes: Arc>>>, } // Custom implementation is required due to generic param: @@ -90,9 +85,6 @@ impl Clone for RpcState { tx_sender: self.tx_sender.clone(), sync_state: self.sync_state.clone(), api_config: self.api_config.clone(), - accounts: self.accounts.clone(), - #[cfg(feature = "openzeppelin_tests")] - known_bytecodes: self.known_bytecodes.clone(), } } } @@ -100,10 +92,12 @@ impl Clone for RpcState { impl RpcState { pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; - let (tx_request, hash) = - api::TransactionRequest::from_bytes(bytes, chain_id.0, self.api_config.max_tx_size)?; + let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id.0)?; - Ok((tx_request.try_into()?, hash)) + Ok(( + L2Tx::from_request(tx_request, self.api_config.max_tx_size)?, + hash, + )) } pub fn u64_to_block_number(n: U64) -> MiniblockNumber { diff --git a/core/bin/zksync_core/src/bin/block_reverter.rs b/core/bin/zksync_core/src/bin/block_reverter.rs index 95f527d17f4d..fcfa5ec7e1ed 100644 --- a/core/bin/zksync_core/src/bin/block_reverter.rs +++ b/core/bin/zksync_core/src/bin/block_reverter.rs @@ -75,9 +75,10 @@ async fn main() -> anyhow::Result<()> { let contracts = ContractsConfig::from_env(); let config = BlockReverterEthConfig::new(eth_sender, contracts, eth_client.web3_url.clone()); - let connection_pool = ConnectionPool::new(None, DbVariant::Master).await; + let connection_pool = ConnectionPool::builder(DbVariant::Master).build().await; let block_reverter = BlockReverter::new( - db_config, + db_config.state_keeper_db_path, + db_config.merkle_tree.path, Some(config), connection_pool, L1ExecutedBatchesRevert::Disallowed, diff --git a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs index f81df1fbddb0..97a7a417fe5c 100644 --- a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs +++ b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs @@ -1,10 +1,11 @@ use clap::Parser; -use std::{num::NonZeroU32, time::Instant}; +use std::time::Instant; use zksync_config::DBConfig; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_storage::RocksDB; +use zksync_types::L1BatchNumber; #[derive(Debug, Parser)] #[command( @@ -14,28 +15,34 @@ use zksync_storage::RocksDB; long_about = None )] struct Cli { - /// Specifies the version of the tree to be checked, expressed as a non-zero number - /// of blocks applied to it. By default, the latest tree version is checked. - #[arg(long = "blocks")] - blocks: Option, + /// Specifies the version of the tree to be checked, expressed as a 0-based L1 batch number + /// applied to it last. If not specified, the latest tree version is checked. + #[arg(long = "l1-batch")] + l1_batch: Option, } impl Cli { fn run(self, config: &DBConfig) { - let db_path = &config.new_merkle_tree_ssd_path; + let db_path = &config.merkle_tree.path; vlog::info!("Verifying consistency of Merkle tree at {db_path}"); let start = Instant::now(); let db = RocksDB::new(db_path, true); let tree = ZkSyncTree::new_lightweight(db); - let block_number = self.blocks.or_else(|| NonZeroU32::new(tree.block_number())); - if let Some(block_number) = block_number { - vlog::info!("Block number to check: {block_number}"); - tree.verify_consistency(block_number); - vlog::info!("Merkle tree verified in {:?}", start.elapsed()); + let l1_batch_number = if let Some(number) = self.l1_batch { + L1BatchNumber(number) } else { - vlog::info!("Merkle tree is empty, skipping"); - } + let next_number = tree.next_l1_batch_number(); + if next_number == L1BatchNumber(0) { + vlog::info!("Merkle tree is empty, skipping"); + return; + } + next_number - 1 + }; + + vlog::info!("L1 batch number to check: {l1_batch_number}"); + tree.verify_consistency(l1_batch_number); + vlog::info!("Merkle tree verified in {:?}", start.elapsed()); } } diff --git a/core/bin/zksync_core/src/bin/rocksdb_util.rs b/core/bin/zksync_core/src/bin/rocksdb_util.rs index a799934606e5..71899380fee8 100644 --- a/core/bin/zksync_core/src/bin/rocksdb_util.rs +++ b/core/bin/zksync_core/src/bin/rocksdb_util.rs @@ -1,7 +1,10 @@ use clap::{Parser, Subcommand}; + use zksync_config::DBConfig; -use zksync_storage::rocksdb::backup::{BackupEngine, BackupEngineOptions, RestoreOptions}; -use zksync_storage::rocksdb::{Error, Options, DB}; +use zksync_storage::rocksdb::{ + backup::{BackupEngine, BackupEngineOptions, RestoreOptions}, + Env, Error, Options, DB, +}; #[derive(Debug, Parser)] #[command(author = "Matter Labs", version, about = "RocksDB management utility", long_about = None)] @@ -22,21 +25,21 @@ enum Command { fn create_backup(config: &DBConfig) -> Result<(), Error> { let mut engine = BackupEngine::open( - &BackupEngineOptions::default(), - config.merkle_tree_backup_path(), + &BackupEngineOptions::new(&config.merkle_tree.backup_path)?, + &Env::new()?, )?; - let db_dir = &config.new_merkle_tree_ssd_path; + let db_dir = &config.merkle_tree.path; let db = DB::open_for_read_only(&Options::default(), db_dir, false)?; engine.create_new_backup(&db)?; - engine.purge_old_backups(config.backup_count()) + engine.purge_old_backups(config.backup_count) } fn restore_from_latest_backup(config: &DBConfig) -> Result<(), Error> { let mut engine = BackupEngine::open( - &BackupEngineOptions::default(), - config.merkle_tree_backup_path(), + &BackupEngineOptions::new(&config.merkle_tree.backup_path)?, + &Env::new()?, )?; - let db_dir = &config.new_merkle_tree_ssd_path; + let db_dir = &config.merkle_tree.path; engine.restore_from_latest_backup(db_dir, db_dir, &RestoreOptions::default()) } @@ -57,12 +60,10 @@ mod tests { fn backup_restore_workflow() { let backup_dir = TempDir::new().expect("failed to get temporary directory for RocksDB"); let temp_dir = TempDir::new().expect("failed to get temporary directory for RocksDB"); - let db_config = DBConfig { - new_merkle_tree_ssd_path: temp_dir.path().to_str().unwrap().to_string(), - merkle_tree_backup_path: backup_dir.path().to_str().unwrap().to_string(), - ..Default::default() - }; - let db_dir = &db_config.new_merkle_tree_ssd_path; + let mut db_config = DBConfig::from_env(); + db_config.merkle_tree.path = temp_dir.path().to_str().unwrap().to_string(); + db_config.merkle_tree.backup_path = backup_dir.path().to_str().unwrap().to_string(); + let db_dir = &db_config.merkle_tree.path; let mut options = Options::default(); options.create_if_missing(true); diff --git a/core/bin/zksync_core/src/bin/slot_index_consistency_checker.rs b/core/bin/zksync_core/src/bin/slot_index_consistency_checker.rs new file mode 100644 index 000000000000..2a771f1bcbf5 --- /dev/null +++ b/core/bin/zksync_core/src/bin/slot_index_consistency_checker.rs @@ -0,0 +1,64 @@ +use zksync_config::DBConfig; +use zksync_dal::{connection::DbVariant, ConnectionPool}; +use zksync_merkle_tree::domain::ZkSyncTree; +use zksync_storage::RocksDB; +use zksync_types::{L1BatchNumber, H256, U256}; + +pub fn u256_to_h256_rev(num: U256) -> H256 { + let mut bytes = [0u8; 32]; + num.to_little_endian(&mut bytes); + H256::from_slice(&bytes) +} + +#[tokio::main] +async fn main() { + vlog::init(); + let db_path = DBConfig::from_env().merkle_tree.path; + vlog::info!("Verifying consistency of slot indices"); + + let pool = ConnectionPool::singleton(DbVariant::Replica).build().await; + let mut storage = pool.access_storage().await; + + let db = RocksDB::new(db_path, true); + let tree = ZkSyncTree::new_lightweight(db); + + let next_number = tree.next_l1_batch_number(); + if next_number == L1BatchNumber(0) { + vlog::info!("Merkle tree is empty, skipping"); + return; + } + let tree_l1_batch_number = next_number - 1; + let pg_l1_batch_number = storage.blocks_dal().get_sealed_l1_batch_number().await; + + let check_up_to_l1_batch_number = tree_l1_batch_number.min(pg_l1_batch_number); + + for l1_batch_number in 0..=check_up_to_l1_batch_number.0 { + vlog::info!("Checking indices for L1 batch {l1_batch_number}"); + let pg_keys: Vec<_> = storage + .storage_logs_dedup_dal() + .initial_writes_for_batch(l1_batch_number.into()) + .await + .into_iter() + .map(|(key, index)| { + ( + key, + index.expect("Missing index in database, migration should be run beforehand"), + ) + }) + .collect(); + let keys_u256: Vec<_> = pg_keys + .iter() + .map(|(key, _)| U256::from_little_endian(key.as_bytes())) + .collect(); + + let tree_keys: Vec<_> = tree + .read_leaves(l1_batch_number.into(), &keys_u256) + .into_iter() + .zip(keys_u256) + .map(|(leaf_data, key)| (u256_to_h256_rev(key), leaf_data.unwrap().leaf_index)) + .collect(); + assert_eq!(pg_keys, tree_keys); + + vlog::info!("Indices are consistent for L1 batch {l1_batch_number}"); + } +} diff --git a/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs b/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs index 8184baec29fd..4c31eac7a59a 100644 --- a/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs +++ b/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs @@ -1,13 +1,12 @@ use std::io::Write; use zksync_dal::{connection::DbVariant, ConnectionPool}; -use zksync_types::explorer_api::SourceCodeData; +use zksync_types::contract_verification_api::SourceCodeData; #[tokio::main] async fn main() { - let pool = ConnectionPool::new(Some(1), DbVariant::Replica).await; + let pool = ConnectionPool::singleton(DbVariant::Replica).build().await; let mut storage = pool.access_storage().await; let reqs = storage - .explorer() .contract_verification_dal() .get_all_successful_requests() .await diff --git a/core/bin/zksync_core/src/bin/zksync_server.rs b/core/bin/zksync_core/src/bin/zksync_server.rs index f1c07360adeb..a160b398aebc 100644 --- a/core/bin/zksync_core/src/bin/zksync_server.rs +++ b/core/bin/zksync_core/src/bin/zksync_server.rs @@ -3,7 +3,7 @@ use clap::Parser; use std::{env, str::FromStr, time::Duration}; use zksync_config::configs::chain::NetworkConfig; -use zksync_config::ETHSenderConfig; +use zksync_config::{ContractsConfig, ETHSenderConfig}; use zksync_core::{ genesis_init, initialize_components, is_genesis_needed, setup_sigint_handler, Component, Components, @@ -11,6 +11,10 @@ use zksync_core::{ use zksync_storage::RocksDB; use zksync_utils::wait_for_tasks::wait_for_tasks; +#[cfg(not(target_env = "msvc"))] +#[global_allocator] +static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + #[derive(Debug, Parser)] #[structopt(author = "Matter Labs", version, about = "zkSync operator node", long_about = None)] struct Cli { @@ -53,7 +57,8 @@ async fn main() -> anyhow::Result<()> { if opt.genesis || is_genesis_needed().await { let network = NetworkConfig::from_env(); let eth_sender = ETHSenderConfig::from_env(); - genesis_init(ð_sender, &network).await; + let contracts = ContractsConfig::from_env(); + genesis_init(ð_sender, &network, &contracts).await; if opt.genesis { return Ok(()); } @@ -105,9 +110,12 @@ async fn main() -> anyhow::Result<()> { vlog::warn!("Circuit breaker received, shutting down. Reason: {}", error_msg); } }, - }; + } + stop_sender.send(true).ok(); - RocksDB::await_rocksdb_termination(); + tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) + .await + .unwrap(); // Sleep for some time to let some components gracefully stop. tokio::time::sleep(Duration::from_secs(5)).await; health_check_handle.stop().await; diff --git a/core/bin/zksync_core/src/block_reverter/mod.rs b/core/bin/zksync_core/src/block_reverter/mod.rs index df7fa8f1a622..283254bafac1 100644 --- a/core/bin/zksync_core/src/block_reverter/mod.rs +++ b/core/bin/zksync_core/src/block_reverter/mod.rs @@ -5,7 +5,7 @@ use tokio::time::sleep; use std::path::Path; use std::time::Duration; -use zksync_config::{ContractsConfig, DBConfig, ETHSenderConfig}; +use zksync_config::{ContractsConfig, ETHSenderConfig}; use zksync_contracts::zksync_contract; use zksync_dal::ConnectionPool; use zksync_merkle_tree::domain::ZkSyncTree; @@ -88,7 +88,8 @@ impl BlockReverterEthConfig { /// - State of the Ethereum contract (if the block was committed) #[derive(Debug)] pub struct BlockReverter { - db_config: DBConfig, + state_keeper_cache_path: String, + merkle_tree_path: String, eth_config: Option, connection_pool: ConnectionPool, executed_batches_revert_mode: L1ExecutedBatchesRevert, @@ -96,14 +97,16 @@ pub struct BlockReverter { impl BlockReverter { pub fn new( - db_config: DBConfig, + state_keeper_cache_path: String, + merkle_tree_path: String, eth_config: Option, connection_pool: ConnectionPool, executed_batches_revert_mode: L1ExecutedBatchesRevert, ) -> Self { Self { + state_keeper_cache_path, + merkle_tree_path, eth_config, - db_config, connection_pool, executed_batches_revert_mode, } @@ -126,12 +129,12 @@ impl BlockReverter { let mut storage = self.connection_pool.access_storage().await; let last_executed_l1_batch = storage .blocks_dal() - .get_number_of_last_block_executed_on_eth() + .get_number_of_last_l1_batch_executed_on_eth() .await - .expect("failed to get last executed L1 block"); + .expect("failed to get last executed L1 batch"); assert!( last_l1_batch_to_keep >= last_executed_l1_batch, - "Attempt to revert already executed blocks" + "Attempt to revert already executed L1 batches" ); } @@ -155,27 +158,26 @@ impl BlockReverter { .access_storage() .await .blocks_dal() - .get_block_state_root(last_l1_batch_to_keep) + .get_l1_batch_state_root(last_l1_batch_to_keep) .await - .expect("failed to fetch root hash for target block"); + .expect("failed to fetch root hash for target L1 batch"); // Rolling back Merkle tree - let new_lightweight_tree_path = &self.db_config.new_merkle_tree_ssd_path; - if Path::new(new_lightweight_tree_path).exists() { - vlog::info!("Rolling back new lightweight tree..."); + if Path::new(&self.merkle_tree_path).exists() { + vlog::info!("Rolling back Merkle tree..."); Self::rollback_new_tree( last_l1_batch_to_keep, - new_lightweight_tree_path, + &self.merkle_tree_path, storage_root_hash, ); } else { - vlog::info!("New lightweight tree not found; skipping"); + vlog::info!("Merkle tree not found; skipping"); } } if rollback_sk_cache { assert!( - Path::new(self.db_config.state_keeper_db_path()).exists(), + Path::new(&self.state_keeper_cache_path).exists(), "Path with state keeper cache DB doesn't exist" ); self.rollback_state_keeper_cache(last_l1_batch_to_keep) @@ -185,14 +187,14 @@ impl BlockReverter { fn rollback_new_tree( last_l1_batch_to_keep: L1BatchNumber, - path: impl AsRef, + path: &str, storage_root_hash: H256, ) { let db = RocksDB::new(path, true); let mut tree = ZkSyncTree::new_lightweight(db); - if tree.block_number() <= last_l1_batch_to_keep.0 { - vlog::info!("Tree is behind the block to revert to; skipping"); + if tree.next_l1_batch_number() <= last_l1_batch_to_keep { + vlog::info!("Tree is behind the L1 batch to revert to; skipping"); return; } tree.revert_logs(last_l1_batch_to_keep); @@ -206,8 +208,7 @@ impl BlockReverter { /// Reverts blocks in the state keeper cache. async fn rollback_state_keeper_cache(&self, last_l1_batch_to_keep: L1BatchNumber) { vlog::info!("opening DB with state keeper cache..."); - let path = self.db_config.state_keeper_db_path().as_ref(); - let mut sk_cache = RocksdbStorage::new(path); + let mut sk_cache = RocksdbStorage::new(self.state_keeper_cache_path.as_ref()); if sk_cache.l1_batch_number() > last_l1_batch_to_keep + 1 { let mut storage = self.connection_pool.access_storage().await; @@ -343,9 +344,9 @@ impl BlockReverter { async fn get_l1_batch_number_from_contract(&self, op: AggregatedActionType) -> L1BatchNumber { let function_name = match op { - AggregatedActionType::CommitBlocks => "getTotalBlocksCommitted", - AggregatedActionType::PublishProofBlocksOnchain => "getTotalBlocksVerified", - AggregatedActionType::ExecuteBlocks => "getTotalBlocksExecuted", + AggregatedActionType::Commit => "getTotalBlocksCommitted", + AggregatedActionType::PublishProofOnchain => "getTotalBlocksVerified", + AggregatedActionType::Execute => "getTotalBlocksExecuted", }; let eth_config = self .eth_config @@ -370,19 +371,17 @@ impl BlockReverter { /// Returns suggested values for rollback. pub async fn suggested_values(&self) -> SuggestedRollbackValues { let last_committed_l1_batch_number = self - .get_l1_batch_number_from_contract(AggregatedActionType::CommitBlocks) + .get_l1_batch_number_from_contract(AggregatedActionType::Commit) .await; let last_verified_l1_batch_number = self - .get_l1_batch_number_from_contract(AggregatedActionType::PublishProofBlocksOnchain) + .get_l1_batch_number_from_contract(AggregatedActionType::PublishProofOnchain) .await; let last_executed_l1_batch_number = self - .get_l1_batch_number_from_contract(AggregatedActionType::ExecuteBlocks) + .get_l1_batch_number_from_contract(AggregatedActionType::Execute) .await; vlog::info!( - "Last L1 batch numbers on contract: committed {}, verified {}, executed {}", - last_committed_l1_batch_number, - last_verified_l1_batch_number, - last_executed_l1_batch_number + "Last L1 batch numbers on contract: committed {last_committed_l1_batch_number}, \ + verified {last_verified_l1_batch_number}, executed {last_executed_l1_batch_number}" ); let eth_config = self diff --git a/core/bin/zksync_core/src/consistency_checker/mod.rs b/core/bin/zksync_core/src/consistency_checker/mod.rs index 345d07caf872..2949596ba362 100644 --- a/core/bin/zksync_core/src/consistency_checker/mod.rs +++ b/core/bin/zksync_core/src/consistency_checker/mod.rs @@ -30,24 +30,24 @@ impl ConsistencyChecker { async fn check_commitments(&self, batch_number: L1BatchNumber) -> Result { let mut storage = self.db.access_storage().await; - let storage_block = storage + let storage_l1_batch = storage .blocks_dal() - .get_storage_block(batch_number) + .get_storage_l1_batch(batch_number) .await - .unwrap_or_else(|| panic!("Block {} not found in the database", batch_number)); + .unwrap_or_else(|| panic!("L1 batch #{} not found in the database", batch_number)); - let commit_tx_id = storage_block + let commit_tx_id = storage_l1_batch .eth_commit_tx_id - .unwrap_or_else(|| panic!("Block commit tx not found for block {}", batch_number)) + .unwrap_or_else(|| panic!("Commit tx not found for L1 batch #{}", batch_number)) as u32; let block_metadata = storage .blocks_dal() - .get_block_with_metadata(storage_block) + .get_l1_batch_with_metadata(storage_l1_batch) .await .unwrap_or_else(|| { panic!( - "Block metadata for block {} not found in the database", + "Metadata for L1 batch #{} not found in the database", batch_number ) }); @@ -118,7 +118,7 @@ impl ConsistencyChecker { .access_storage() .await .blocks_dal() - .get_number_of_last_block_committed_on_eth() + .get_number_of_last_l1_batch_committed_on_eth() .await .unwrap_or(L1BatchNumber(0)) } @@ -145,7 +145,7 @@ impl ConsistencyChecker { .access_storage() .await .blocks_dal() - .get_block_metadata(batch_number) + .get_l1_batch_metadata(batch_number) .await .is_some(); diff --git a/core/bin/zksync_core/src/data_fetchers/token_price/coinmarketcap.rs b/core/bin/zksync_core/src/data_fetchers/token_price/coinmarketcap.rs deleted file mode 100644 index b233508f1434..000000000000 --- a/core/bin/zksync_core/src/data_fetchers/token_price/coinmarketcap.rs +++ /dev/null @@ -1,193 +0,0 @@ -// use std::{collections::HashMap, str::FromStr}; -// -// use async_trait::async_trait; -// use chrono::{DateTime, Utc}; -// use itertools::Itertools; -// use num::{rational::Ratio, BigUint}; -// use reqwest::{Client, Url}; -// use serde::{Deserialize, Serialize}; -// -// use zksync_config::FetcherConfig; -// use zksync_storage::{db_view::DBView, tokens::TokensSchema}; -// use zksync_types::{tokens::TokenPrice, Address}; -// use zksync_utils::UnsignedRatioSerializeAsDecimal; -// -// use crate::data_fetchers::error::ApiFetchError; -// -// use super::FetcherImpl; -// -// #[derive(Debug, Clone)] -// pub struct CoinMarketCapFetcher { -// client: Client, -// addr: Url, -// } -// -// impl CoinMarketCapFetcher { -// pub fn new(config: &FetcherConfig) -> Self { -// Self { -// client: Client::new(), -// addr: Url::from_str(&config.token_list.url).expect("failed parse One Inch URL"), -// } -// } -// } -// -// #[async_trait] -// impl FetcherImpl for CoinMarketCapFetcher { -// async fn fetch_token_price( -// &self, -// token_addrs: &[Address], -// ) -> Result, ApiFetchError> { -// let token_addrs = token_addrs.to_vec(); -// -// let tokens = DBView::with_snapshot(move |snap| { -// let tokens_list = TokensSchema::new(&*snap).token_list(); -// -// token_addrs -// .iter() -// .cloned() -// .filter_map(|token_addr| { -// if let Some(token_symbol) = tokens_list.token_symbol(&token_addr) { -// Some((token_addr, token_symbol)) -// } else { -// vlog::warn!( -// "Error getting token symbol: token address: {:#x}", -// token_addr, -// ); -// None -// } -// }) -// .collect::>() -// }) -// .await; -// -// if tokens.is_empty() { -// return Err(ApiFetchError::Other( -// "Failed to identify symbols of tokens by their addresses".to_string(), -// )); -// } -// -// let comma_separated_token_symbols = tokens -// .iter() -// .map(|(_, token_symbol)| token_symbol) -// .join(","); -// -// let request_url = self -// .addr -// .join("/v1/cryptocurrency/quotes/latest") -// .expect("failed to join URL path"); -// -// let mut api_response = self -// .client -// .get(request_url.clone()) -// .query(&[("symbol", comma_separated_token_symbols)]) -// .send() -// .await -// .map_err(|err| { -// ApiFetchError::Other(format!("Coinmarketcap API request failed: {}", err)) -// })? -// .json::() -// .await -// .map_err(|err| ApiFetchError::UnexpectedJsonFormat(err.to_string()))?; -// -// let result = tokens -// .into_iter() -// .filter_map(|(token_addr, token_symbol)| { -// let token_info = api_response.data.remove(&token_symbol); -// let usd_quote = token_info.and_then(|mut token_info| token_info.quote.remove("USD")); -// -// if let Some(usd_quote) = usd_quote { -// Some(( -// token_addr, -// TokenPrice { -// usd_price: usd_quote.price, -// last_updated: usd_quote.last_updated, -// }, -// )) -// } else { -// vlog::warn!( -// "Error getting token price from CoinMarketCap: token address: {:#x}, token symbol: {}", -// token_addr, -// token_symbol, -// ); -// None -// } -// }) -// .collect(); -// -// Ok(result) -// } -// } -// -// #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -// pub struct CoinMarketCapQuote { -// #[serde(with = "UnsignedRatioSerializeAsDecimal")] -// pub price: Ratio, -// pub last_updated: DateTime, -// } -// -// #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -// pub struct CoinMarketCapTokenInfo { -// pub quote: HashMap, -// } -// -// #[derive(Debug, Clone, Serialize, Deserialize)] -// pub struct CoinMarketCapResponse { -// pub data: HashMap, -// } -// -// #[test] -// fn parse_coin_market_cap_response() { -// let example = r#"{ -// "status": { -// "timestamp": "2020-04-17T04:51:12.012Z", -// "error_code": 0, -// "error_message": null, -// "elapsed": 9, -// "credit_count": 1, -// "notice": null -// }, -// "data": { -// "ETH": { -// "id": 1027, -// "name": "Ethereum", -// "symbol": "ETH", -// "slug": "ethereum", -// "num_market_pairs": 5153, -// "date_added": "2015-08-07T00:00:00.000Z", -// "tags": [ -// "mineable" -// ], -// "max_supply": null, -// "circulating_supply": 110550929.1865, -// "total_supply": 110550929.1865, -// "platform": null, -// "cmc_rank": 2, -// "last_updated": "2020-04-17T04:50:41.000Z", -// "quote": { -// "USD": { -// "price": 170.692214992, -// "volume_24h": 22515583743.3856, -// "percent_change_1h": -0.380817, -// "percent_change_24h": 11.5718, -// "percent_change_7d": 3.6317, -// "market_cap": 18870182972.267426, -// "last_updated": "2020-04-17T04:50:41.000Z" -// } -// } -// } -// } -// }"#; -// -// let resp = -// serde_json::from_str::(example).expect("serialization failed"); -// let token_data = resp.data.get("ETH").expect("ETH data not found"); -// let quote = token_data.quote.get("USD").expect("USD not found"); -// assert_eq!( -// quote.price, -// UnsignedRatioSerializeAsDecimal::deserialize_from_str_with_dot("170.692214992").unwrap() -// ); -// assert_eq!( -// quote.last_updated, -// DateTime::::from_str("2020-04-17T04:50:41.000Z").unwrap() -// ); -// } diff --git a/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs index 67ff9236b113..0f2868090f34 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs @@ -14,7 +14,6 @@ use num::{rational::Ratio, BigUint}; use tokio::sync::watch; pub mod coingecko; -// pub mod coinmarketcap; pub mod mock; #[async_trait] diff --git a/core/bin/zksync_core/src/eth_sender/aggregator.rs b/core/bin/zksync_core/src/eth_sender/aggregator.rs index 0c643c0763c4..c19a8885f680 100644 --- a/core/bin/zksync_core/src/eth_sender/aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/aggregator.rs @@ -3,60 +3,61 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; use zksync_types::{ aggregated_operations::{ - AggregatedActionType, AggregatedOperation, BlocksCommitOperation, BlocksExecuteOperation, - BlocksProofOperation, + AggregatedActionType, AggregatedOperation, L1BatchCommitOperation, L1BatchExecuteOperation, + L1BatchProofOperation, }, - commitment::BlockWithMetadata, + commitment::L1BatchWithMetadata, helpers::unix_timestamp_ms, - L1BatchNumber, + protocol_version::L1VerifierConfig, + L1BatchNumber, ProtocolVersionId, }; -use crate::eth_sender::block_publish_criterion::{ - BlockNumberCriterion, BlockPublishCriterion, DataSizeCriterion, GasCriterion, +use super::publish_criterion::{ + DataSizeCriterion, GasCriterion, L1BatchPublishCriterion, NumberCriterion, TimestampDeadlineCriterion, }; #[derive(Debug)] pub struct Aggregator { - commit_criterion: Vec>, - proof_criterion: Vec>, - execute_criterion: Vec>, + commit_criteria: Vec>, + proof_criteria: Vec>, + execute_criteria: Vec>, config: SenderConfig, } impl Aggregator { pub fn new(config: SenderConfig) -> Self { Self { - commit_criterion: vec![ - Box::from(BlockNumberCriterion { - op: AggregatedActionType::CommitBlocks, + commit_criteria: vec![ + Box::from(NumberCriterion { + op: AggregatedActionType::Commit, limit: config.max_aggregated_blocks_to_commit, }), Box::from(GasCriterion::new( - AggregatedActionType::CommitBlocks, + AggregatedActionType::Commit, config.max_aggregated_tx_gas, )), Box::from(DataSizeCriterion { - op: AggregatedActionType::CommitBlocks, + op: AggregatedActionType::Commit, data_limit: config.max_eth_tx_data_size, }), Box::from(TimestampDeadlineCriterion { - op: AggregatedActionType::CommitBlocks, + op: AggregatedActionType::Commit, deadline_seconds: config.aggregated_block_commit_deadline, max_allowed_lag: Some(config.timestamp_criteria_max_allowed_lag), }), ], - proof_criterion: vec![ - Box::from(BlockNumberCriterion { - op: AggregatedActionType::PublishProofBlocksOnchain, + proof_criteria: vec![ + Box::from(NumberCriterion { + op: AggregatedActionType::PublishProofOnchain, limit: *config.aggregated_proof_sizes.iter().max().unwrap() as u32, }), Box::from(GasCriterion::new( - AggregatedActionType::PublishProofBlocksOnchain, + AggregatedActionType::PublishProofOnchain, config.max_aggregated_tx_gas, )), Box::from(TimestampDeadlineCriterion { - op: AggregatedActionType::PublishProofBlocksOnchain, + op: AggregatedActionType::PublishProofOnchain, deadline_seconds: config.aggregated_block_prove_deadline, // Currently, we can't use this functionality for proof criterion // since we don't send dummy and real proofs in the same range, @@ -64,17 +65,17 @@ impl Aggregator { max_allowed_lag: None, }), ], - execute_criterion: vec![ - Box::from(BlockNumberCriterion { - op: AggregatedActionType::ExecuteBlocks, + execute_criteria: vec![ + Box::from(NumberCriterion { + op: AggregatedActionType::Execute, limit: config.max_aggregated_blocks_to_execute, }), Box::from(GasCriterion::new( - AggregatedActionType::ExecuteBlocks, + AggregatedActionType::Execute, config.max_aggregated_tx_gas, )), Box::from(TimestampDeadlineCriterion { - op: AggregatedActionType::ExecuteBlocks, + op: AggregatedActionType::Execute, deadline_seconds: config.aggregated_block_execute_deadline, max_allowed_lag: Some(config.timestamp_criteria_max_allowed_lag), }), @@ -88,36 +89,40 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, prover_storage: &mut StorageProcessor<'_>, base_system_contracts_hashes: BaseSystemContractsHashes, + protocol_version_id: ProtocolVersionId, + l1_verifier_config: L1VerifierConfig, ) -> Option { - let last_sealed_block_number = storage.blocks_dal().get_sealed_block_number().await; + let last_sealed_l1_batch_number = storage.blocks_dal().get_sealed_l1_batch_number().await; if let Some(op) = self .get_execute_operations( storage, self.config.max_aggregated_blocks_to_execute as usize, - last_sealed_block_number, + last_sealed_l1_batch_number, ) .await { - Some(AggregatedOperation::ExecuteBlocks(op)) + Some(AggregatedOperation::Execute(op)) } else if let Some(op) = self .get_proof_operation( storage, prover_storage, *self.config.aggregated_proof_sizes.iter().max().unwrap(), - last_sealed_block_number, + last_sealed_l1_batch_number, + l1_verifier_config, ) .await { - Some(AggregatedOperation::PublishProofBlocksOnchain(op)) + Some(AggregatedOperation::PublishProofOnchain(op)) } else { self.get_commit_operation( storage, self.config.max_aggregated_blocks_to_commit as usize, - last_sealed_block_number, + last_sealed_l1_batch_number, base_system_contracts_hashes, + protocol_version_id, ) .await - .map(AggregatedOperation::CommitBlocks) + .map(AggregatedOperation::Commit) } } @@ -125,114 +130,130 @@ impl Aggregator { &mut self, storage: &mut StorageProcessor<'_>, limit: usize, - last_sealed_block: L1BatchNumber, - ) -> Option { + last_sealed_l1_batch: L1BatchNumber, + ) -> Option { let max_l1_batch_timestamp_millis = self .config .l1_batch_min_age_before_execute_seconds .map(|age| unix_timestamp_ms() - age * 1_000); - let ready_for_execute_blocks = storage + let ready_for_execute_batches = storage .blocks_dal() - .get_ready_for_execute_blocks(limit, max_l1_batch_timestamp_millis) + .get_ready_for_execute_l1_batches(limit, max_l1_batch_timestamp_millis) .await; - let blocks = extract_ready_subrange( + let l1_batches = extract_ready_subrange( storage, - &mut self.execute_criterion, - ready_for_execute_blocks, - last_sealed_block, + &mut self.execute_criteria, + ready_for_execute_batches, + last_sealed_l1_batch, ) .await; - blocks.map(|blocks| BlocksExecuteOperation { blocks }) + l1_batches.map(|l1_batches| L1BatchExecuteOperation { l1_batches }) } async fn get_commit_operation( &mut self, storage: &mut StorageProcessor<'_>, limit: usize, - last_sealed_block: L1BatchNumber, + last_sealed_batch: L1BatchNumber, base_system_contracts_hashes: BaseSystemContractsHashes, - ) -> Option { + protocol_version_id: ProtocolVersionId, + ) -> Option { let mut blocks_dal = storage.blocks_dal(); - - let last_block = blocks_dal.get_last_committed_to_eth_block().await?; - - let ready_for_commit_blocks = blocks_dal - .get_ready_for_commit_blocks( + let last_committed_l1_batch = blocks_dal.get_last_committed_to_eth_l1_batch().await?; + let ready_for_commit_l1_batches = blocks_dal + .get_ready_for_commit_l1_batches( limit, base_system_contracts_hashes.bootloader, base_system_contracts_hashes.default_aa, + protocol_version_id, ) .await; - // Check that the blocks that are selected are sequential - ready_for_commit_blocks + // Check that the L1 batches that are selected are sequential + ready_for_commit_l1_batches .iter() - .reduce(|last_block, next_block| { - if last_block.header.number + 1 == next_block.header.number { - next_block + .reduce(|last_batch, next_batch| { + if last_batch.header.number + 1 == next_batch.header.number { + next_batch } else { - panic!("Blocks are not sequential") + panic!("L1 batches prepared for commit are not sequential"); } }); - let blocks = extract_ready_subrange( + let batches = extract_ready_subrange( storage, - &mut self.commit_criterion, - ready_for_commit_blocks, - last_sealed_block, + &mut self.commit_criteria, + ready_for_commit_l1_batches, + last_sealed_batch, ) .await; - blocks.map(|blocks| BlocksCommitOperation { - last_committed_block: last_block, - blocks, + + batches.map(|batches| L1BatchCommitOperation { + last_committed_l1_batch, + l1_batches: batches, }) } async fn load_real_proof_operation( storage: &mut StorageProcessor<'_>, prover_storage: &mut StorageProcessor<'_>, - ) -> Option { - let previous_proven_block_number = + l1_verifier_config: L1VerifierConfig, + ) -> Option { + let previous_proven_batch_number = storage.blocks_dal().get_last_l1_batch_with_prove_tx().await; + if let Some(version_id) = storage + .blocks_dal() + .get_batch_protocol_version_id(previous_proven_batch_number + 1) + .await + { + let verifier_config_for_next_batch = storage + .protocol_versions_dal() + .l1_verifier_config_for_version(version_id) + .await + .unwrap(); + if verifier_config_for_next_batch != l1_verifier_config { + return None; + } + } let proofs = prover_storage .prover_dal() .get_final_proofs_for_blocks( - previous_proven_block_number + 1, - previous_proven_block_number + 1, + previous_proven_batch_number + 1, + previous_proven_batch_number + 1, ) .await; if proofs.is_empty() { - // The proof for the next block is not generated yet + // The proof for the next L1 batch is not generated yet return None; } assert_eq!(proofs.len(), 1); - let previous_proven_block_metadata = storage + let previous_proven_batch_metadata = storage .blocks_dal() - .get_block_metadata(previous_proven_block_number) + .get_l1_batch_metadata(previous_proven_batch_number) .await .unwrap_or_else(|| { panic!( - "Block number {} with submitted proof is not complete in the DB", - previous_proven_block_number - ) + "L1 batch #{} with submitted proof is not complete in the DB", + previous_proven_batch_number + ); }); - let block_to_prove_metadata = storage + let metadata_for_batch_being_proved = storage .blocks_dal() - .get_block_metadata(previous_proven_block_number + 1) + .get_l1_batch_metadata(previous_proven_batch_number + 1) .await .unwrap_or_else(|| { panic!( - "Block number {} with generated proof is not complete in the DB", - previous_proven_block_number + 1 - ) + "L1 batch #{} with generated proof is not complete in the DB", + previous_proven_batch_number + 1 + ); }); - Some(BlocksProofOperation { - prev_block: previous_proven_block_metadata, - blocks: vec![block_to_prove_metadata], + Some(L1BatchProofOperation { + prev_l1_batch: previous_proven_batch_metadata, + l1_batches: vec![metadata_for_batch_being_proved], proofs, should_verify: true, }) @@ -241,32 +262,29 @@ impl Aggregator { async fn prepare_dummy_proof_operation( &mut self, storage: &mut StorageProcessor<'_>, - ready_for_proof_blocks: Vec, - last_sealed_block: L1BatchNumber, - ) -> Option { - if let Some(blocks) = extract_ready_subrange( + ready_for_proof_l1_batches: Vec, + last_sealed_l1_batch: L1BatchNumber, + ) -> Option { + let batches = extract_ready_subrange( storage, - &mut self.proof_criterion, - ready_for_proof_blocks, - last_sealed_block, + &mut self.proof_criteria, + ready_for_proof_l1_batches, + last_sealed_l1_batch, ) - .await - { - let prev_block_number = blocks.first().map(|bl| bl.header.number - 1)?; - let prev_block = storage - .blocks_dal() - .get_block_metadata(prev_block_number) - .await?; + .await?; - Some(BlocksProofOperation { - prev_block, - blocks, - proofs: vec![], - should_verify: false, - }) - } else { - None - } + let prev_l1_batch_number = batches.first().map(|batch| batch.header.number - 1)?; + let prev_batch = storage + .blocks_dal() + .get_l1_batch_metadata(prev_l1_batch_number) + .await?; + + Some(L1BatchProofOperation { + prev_l1_batch: prev_batch, + l1_batches: batches, + proofs: vec![], + should_verify: false, + }) } async fn get_proof_operation( @@ -274,37 +292,43 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, prover_storage: &mut StorageProcessor<'_>, limit: usize, - last_sealed_block: L1BatchNumber, - ) -> Option { + last_sealed_l1_batch: L1BatchNumber, + l1_verifier_config: L1VerifierConfig, + ) -> Option { match self.config.proof_sending_mode { ProofSendingMode::OnlyRealProofs => { - Self::load_real_proof_operation(storage, prover_storage).await + Self::load_real_proof_operation(storage, prover_storage, l1_verifier_config).await } + ProofSendingMode::SkipEveryProof => { - let ready_for_proof_blocks = storage + let ready_for_proof_l1_batches = storage .blocks_dal() - .get_ready_for_dummy_proof_blocks(limit) + .get_ready_for_dummy_proof_l1_batches(limit) .await; self.prepare_dummy_proof_operation( storage, - ready_for_proof_blocks, - last_sealed_block, + ready_for_proof_l1_batches, + last_sealed_l1_batch, ) .await } + ProofSendingMode::OnlySampledProofs => { // if there is a sampled proof then send it, otherwise check for skipped ones. - if let Some(op) = Self::load_real_proof_operation(storage, prover_storage).await { + if let Some(op) = + Self::load_real_proof_operation(storage, prover_storage, l1_verifier_config) + .await + { Some(op) } else { - let ready_for_proof_blocks = storage + let ready_for_proof_batches = storage .blocks_dal() - .get_skipped_for_proof_blocks(limit) + .get_skipped_for_proof_l1_batches(limit) .await; self.prepare_dummy_proof_operation( storage, - ready_for_proof_blocks, - last_sealed_block, + ready_for_proof_batches, + last_sealed_l1_batch, ) .await } @@ -315,23 +339,25 @@ impl Aggregator { async fn extract_ready_subrange( storage: &mut StorageProcessor<'_>, - publish_criteria: &mut [Box], - unpublished_blocks: Vec, - last_sealed_block: L1BatchNumber, -) -> Option> { - let mut last_block: Option = None; - for crit in publish_criteria.iter_mut() { - if let Some(crit_block) = crit - .last_block_to_publish(storage, &unpublished_blocks, last_sealed_block) - .await - { - last_block = last_block.map_or(Some(crit_block), |block| Some(block.min(crit_block))); + publish_criteria: &mut [Box], + unpublished_l1_batches: Vec, + last_sealed_l1_batch: L1BatchNumber, +) -> Option> { + let mut last_l1_batch: Option = None; + for criterion in publish_criteria { + let l1_batch_by_criterion = criterion + .last_l1_batch_to_publish(storage, &unpublished_l1_batches, last_sealed_l1_batch) + .await; + if let Some(l1_batch) = l1_batch_by_criterion { + last_l1_batch = Some(last_l1_batch.map_or(l1_batch, |number| number.min(l1_batch))); } } - last_block.map(|last_block| { - unpublished_blocks + + let last_l1_batch = last_l1_batch?; + Some( + unpublished_l1_batches .into_iter() - .take_while(|bl| bl.header.number <= last_block) - .collect() - }) + .take_while(|l1_batch| l1_batch.header.number <= last_l1_batch) + .collect(), + ) } diff --git a/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs b/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs deleted file mode 100644 index 87c581df69f1..000000000000 --- a/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs +++ /dev/null @@ -1,256 +0,0 @@ -use crate::gas_tracker::agg_block_base_cost; -use async_trait::async_trait; -use chrono::Utc; -use zksync_dal::StorageProcessor; -use zksync_types::commitment::BlockWithMetadata; -use zksync_types::{aggregated_operations::AggregatedActionType, L1BatchNumber}; - -#[async_trait] -pub trait BlockPublishCriterion: std::fmt::Debug + Send + Sync { - // returns None if there is no need to publish any blocks - // otherwise returns the block height of the last block that needs to be published - async fn last_block_to_publish( - &mut self, - storage: &mut StorageProcessor<'_>, - consecutive_blocks: &[BlockWithMetadata], - last_sealed_block: L1BatchNumber, - ) -> Option; - - fn name(&self) -> &'static str; -} - -#[derive(Debug)] -pub struct BlockNumberCriterion { - pub op: AggregatedActionType, - // maximum number of blocks to be packed together - pub limit: u32, -} - -#[async_trait] -impl BlockPublishCriterion for BlockNumberCriterion { - async fn last_block_to_publish( - &mut self, - _storage: &mut StorageProcessor<'_>, - consecutive_blocks: &[BlockWithMetadata], - _last_sealed_block: L1BatchNumber, - ) -> Option { - { - let mut block_heights = consecutive_blocks.iter().map(|block| block.header.number.0); - block_heights.next().and_then(|first| { - let last_block_height = block_heights.last().unwrap_or(first); - let blocks_count = last_block_height - first + 1; - if blocks_count >= self.limit { - let result = L1BatchNumber(first + self.limit - 1); - vlog::debug!( - "{} block range {}-{}: NUMBER {} triggered", - self.op.to_string(), - first, - result.0, - self.limit - ); - metrics::counter!( - "server.eth_sender.block_aggregation_reason", - 1, - "type" => "number", - "op" => self.op.to_string() - ); - Some(result) - } else { - None - } - }) - } - } - - fn name(&self) -> &'static str { - "block_number" - } -} - -#[derive(Debug)] -pub struct TimestampDeadlineCriterion { - pub op: AggregatedActionType, - // Maximum block age in seconds. Once reached, we pack and publish all the available blocks. - pub deadline_seconds: u64, - // If `max_allowed_lag` is some and last block sent to L1 is more than `max_allowed_lag` behind, - // it means that sender is lagging significantly and we shouldn't apply this criteria to use all capacity - // and avoid packing small ranges. - pub max_allowed_lag: Option, -} - -#[async_trait] -impl BlockPublishCriterion for TimestampDeadlineCriterion { - async fn last_block_to_publish( - &mut self, - _storage: &mut StorageProcessor<'_>, - consecutive_blocks: &[BlockWithMetadata], - last_sealed_block: L1BatchNumber, - ) -> Option { - consecutive_blocks.iter().next().and_then(|first_block| { - let last_block_number = consecutive_blocks.iter().last().unwrap().header.number.0; - if let Some(max_allowed_lag) = self.max_allowed_lag { - if last_sealed_block.0 - last_block_number >= max_allowed_lag as u32 { - return None; - } - } - let oldest_block_age_seconds = - Utc::now().timestamp() as u64 - first_block.header.timestamp; - if oldest_block_age_seconds >= self.deadline_seconds { - let result = consecutive_blocks - .last() - .unwrap_or(first_block) - .header - .number; - vlog::debug!( - "{} block range {}-{}: TIMESTAMP triggered", - self.op.to_string(), - first_block.header.number.0, - result.0 - ); - metrics::counter!( - "server.eth_sender.block_aggregation_reason", - 1, - "type" => "timestamp", - "op" => self.op.to_string() - ); - Some(result) - } else { - None - } - }) - } - fn name(&self) -> &'static str { - "timestamp" - } -} - -#[derive(Debug)] -pub struct GasCriterion { - pub op: AggregatedActionType, - pub gas_limit: u32, -} - -impl GasCriterion { - pub fn new(op: AggregatedActionType, gas_limit: u32) -> GasCriterion { - GasCriterion { op, gas_limit } - } - - async fn get_gas_amount( - &mut self, - storage: &mut StorageProcessor<'_>, - block_number: L1BatchNumber, - ) -> u32 { - storage - .blocks_dal() - .get_blocks_predicted_gas(block_number, block_number, self.op) - .await - } -} - -#[async_trait] -impl BlockPublishCriterion for GasCriterion { - async fn last_block_to_publish( - &mut self, - storage: &mut StorageProcessor<'_>, - consecutive_blocks: &[BlockWithMetadata], - _last_sealed_block: L1BatchNumber, - ) -> Option { - let base_cost = agg_block_base_cost(self.op); - assert!( - self.gas_limit > base_cost, - "Config max gas cost for operations is too low" - ); - // We're not sure our predictions are accurate, so it's safer to lower the gas limit by 10% - let mut gas_left = (self.gas_limit as f64 * 0.9).round() as u32 - base_cost; - - let mut last_block: Option = None; - for (index, block) in consecutive_blocks.iter().enumerate() { - let block_gas = self.get_gas_amount(storage, block.header.number).await; - if block_gas >= gas_left { - if index == 0 { - panic!( - "block {} requires {} gas, which is more than the range limit of {}", - block.header.number, block_gas, self.gas_limit - ) - } - last_block = Some(L1BatchNumber(block.header.number.0 - 1)); - break; - } else { - gas_left -= block_gas; - } - } - - if last_block.is_some() { - vlog::debug!( - "{} block range {}-{}: GAS {} triggered", - self.op.to_string(), - consecutive_blocks.first().unwrap().header.number.0, - last_block.unwrap().0, - self.gas_limit - gas_left, - ); - metrics::counter!( - "server.eth_sender.block_aggregation_reason", - 1, - "type" => "gas", - "op" => self.op.to_string() - ); - } - last_block - } - fn name(&self) -> &'static str { - "gas_limit" - } -} - -#[derive(Debug)] -pub struct DataSizeCriterion { - pub op: AggregatedActionType, - pub data_limit: usize, -} - -#[async_trait] -impl BlockPublishCriterion for DataSizeCriterion { - async fn last_block_to_publish( - &mut self, - _storage: &mut StorageProcessor<'_>, - consecutive_blocks: &[BlockWithMetadata], - _last_sealed_block: L1BatchNumber, - ) -> Option { - const STORED_BLOCK_INFO_SIZE: usize = 96; // size of `StoredBlockInfo` solidity struct - let mut data_size_left = self.data_limit - STORED_BLOCK_INFO_SIZE; - - for (index, block) in consecutive_blocks.iter().enumerate() { - if data_size_left < block.l1_commit_data_size() { - if index == 0 { - panic!( - "block {} requires {} data, which is more than the range limit of {}", - block.header.number, - block.l1_commit_data_size(), - self.data_limit - ) - } - vlog::debug!( - "{} block range {}-{}: DATA LIMIT {} triggered", - self.op.to_string(), - consecutive_blocks.first().unwrap().header.number.0, - block.header.number.0 - 1, - self.data_limit - data_size_left, - ); - metrics::counter!( - "server.eth_sender.block_aggregation_reason", - 1, - "type" => "data_size", - "op" => self.op.to_string() - ); - return Some(block.header.number - 1); - } - data_size_left -= block.l1_commit_data_size(); - } - - None - } - - fn name(&self) -> &'static str { - "data_size" - } -} diff --git a/core/bin/zksync_core/src/eth_sender/error.rs b/core/bin/zksync_core/src/eth_sender/error.rs index 585277b39a5f..080e252c92c2 100644 --- a/core/bin/zksync_core/src/eth_sender/error.rs +++ b/core/bin/zksync_core/src/eth_sender/error.rs @@ -1,7 +1,10 @@ -use zksync_eth_client::types::Error; +use zksync_eth_client::types; +use zksync_types::web3::contract; #[derive(Debug, thiserror::Error)] pub enum ETHSenderError { #[error("Ethereum gateway Error {0}")] - EthereumGateWayError(#[from] Error), + EthereumGateWayError(#[from] types::Error), + #[error("Token parsing Error: {0}")] + ParseError(#[from] contract::Error), } diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs index ac1da0d38ade..13d565d29773 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -1,14 +1,36 @@ -use crate::eth_sender::grafana_metrics::track_eth_tx_metrics; -use crate::eth_sender::zksync_functions::ZkSyncFunctions; -use crate::eth_sender::{zksync_functions, Aggregator, ETHSenderError}; -use crate::gas_tracker::agg_block_base_cost; -use std::cmp::max; +use std::convert::TryInto; + use tokio::sync::watch; + use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::BoundEthInterface; -use zksync_types::{aggregated_operations::AggregatedOperation, eth_sender::EthTx, Address, H256}; +use zksync_types::{ + aggregated_operations::AggregatedOperation, + contracts::{Multicall3Call, Multicall3Result}, + eth_sender::EthTx, + ethabi::Token, + protocol_version::{L1VerifierConfig, VerifierParams}, + vk_transform::l1_vk_commitment, + web3::contract::{tokens::Tokenizable, Error, Options}, + Address, ProtocolVersionId, H256, U256, +}; + +use crate::eth_sender::{ + grafana_metrics::track_eth_tx_metrics, zksync_functions::ZkSyncFunctions, Aggregator, + ETHSenderError, +}; +use crate::gas_tracker::agg_l1_batch_base_cost; + +/// Data queried from L1 using multicall contract. +#[derive(Debug)] +pub struct MulticallData { + pub base_system_contracts_hashes: BaseSystemContractsHashes, + pub verifier_params: VerifierParams, + pub verifier_address: Address, + pub protocol_version_id: ProtocolVersionId, +} /// The component is responsible for aggregating l1 batches into eth_txs: /// Such as CommitBlocks, PublishProofBlocksOnchain and ExecuteBlock @@ -17,7 +39,9 @@ use zksync_types::{aggregated_operations::AggregatedOperation, eth_sender::EthTx pub struct EthTxAggregator { aggregator: Aggregator, config: SenderConfig, - contract_address: Address, + timelock_contract_address: Address, + l1_multicall3_address: Address, + pub(super) main_zksync_contract_address: Address, functions: ZkSyncFunctions, base_nonce: u64, } @@ -26,16 +50,20 @@ impl EthTxAggregator { pub fn new( config: SenderConfig, aggregator: Aggregator, - contract_address: Address, + timelock_contract_address: Address, + l1_multicall3_address: Address, + main_zksync_contract_address: Address, base_nonce: u64, ) -> Self { - let functions = zksync_functions::get_zksync_functions(); + let functions = ZkSyncFunctions::default(); Self { - base_nonce, - aggregator, config, - contract_address, + aggregator, + timelock_contract_address, + l1_multicall3_address, + main_zksync_contract_address, functions, + base_nonce, } } @@ -55,46 +83,236 @@ impl EthTxAggregator { break; } - if let Err(e) = self + if let Err(err) = self .loop_iteration(&mut storage, &mut prover_storage, ð_client) .await { // Web3 API request failures can cause this, // and anything more important is already properly reported. - vlog::warn!("eth_sender error {:?}", e); + vlog::warn!("eth_sender error {err:?}"); } tokio::time::sleep(self.config.aggregate_tx_poll_period()).await; } } - async fn get_l1_base_system_contracts_hashes( + pub(super) async fn get_multicall_data( &mut self, eth_client: &E, - ) -> Result { - let bootloader_code_hash: H256 = eth_client - .call_main_contract_function( - "getL2BootloaderBytecodeHash", - (), + ) -> Result { + let calldata = self.generate_calldata_for_multicall(); + let aggregate3_result = eth_client + .call_contract_function( + &self.functions.aggregate3.name, + calldata, None, - Default::default(), + Options::default(), None, + self.l1_multicall3_address, + self.functions.multicall_contract.clone(), ) .await?; - let default_account_code_hash: H256 = eth_client - .call_main_contract_function( - "getL2DefaultAccountBytecodeHash", + self.parse_multicall_data(aggregate3_result) + } + + // Multicall's aggregate function accepts 1 argument - arrays of different contract calls. + // The role of the method below is to tokenize input for multicall, which is actually a vector of tokens. + // Each token describes a specific contract call. + pub(super) fn generate_calldata_for_multicall(&self) -> Vec { + const ALLOW_FAILURE: bool = false; + + // First zksync contract call + let get_l2_bootloader_hash_input = self + .functions + .get_l2_bootloader_bytecode_hash + .encode_input(&[]) + .unwrap(); + let get_bootloader_hash_call = Multicall3Call { + target: self.main_zksync_contract_address, + allow_failure: ALLOW_FAILURE, + calldata: get_l2_bootloader_hash_input, + }; + + // Second zksync contract call + let get_l2_default_aa_hash_input = self + .functions + .get_l2_default_account_bytecode_hash + .encode_input(&[]) + .unwrap(); + let get_default_aa_hash_call = Multicall3Call { + target: self.main_zksync_contract_address, + allow_failure: ALLOW_FAILURE, + calldata: get_l2_default_aa_hash_input, + }; + + // Third zksync contract call + let get_verifier_params_input = self + .functions + .get_verifier_params + .encode_input(&[]) + .unwrap(); + let get_verifier_params_call = Multicall3Call { + target: self.main_zksync_contract_address, + allow_failure: ALLOW_FAILURE, + calldata: get_verifier_params_input, + }; + + // Fourth zksync contract call + let get_verifier_input = self.functions.get_verifier.encode_input(&[]).unwrap(); + let get_verifier_call = Multicall3Call { + target: self.main_zksync_contract_address, + allow_failure: ALLOW_FAILURE, + calldata: get_verifier_input, + }; + + // Fifth zksync contract call + let get_protocol_version_input = self + .functions + .get_protocol_version + .encode_input(&[]) + .unwrap(); + let get_protocol_version_call = Multicall3Call { + target: self.main_zksync_contract_address, + allow_failure: ALLOW_FAILURE, + calldata: get_protocol_version_input, + }; + + // Convert structs into tokens and return vector with them + vec![ + get_bootloader_hash_call.into_token(), + get_default_aa_hash_call.into_token(), + get_verifier_params_call.into_token(), + get_verifier_call.into_token(), + get_protocol_version_call.into_token(), + ] + } + + // The role of the method below is to detokenize multicall call's result, which is actually a token. + // This token is an array of tuples like (bool, bytes), that contain the status and result for each contract call. + pub(super) fn parse_multicall_data( + &self, + token: Token, + ) -> Result { + let parse_error = |tokens: &[Token]| { + Err(ETHSenderError::ParseError(Error::InvalidOutputType( + format!("Failed to parse multicall token: {:?}", tokens), + ))) + }; + + if let Token::Array(call_results) = token { + // 5 calls are aggregated in multicall + if call_results.len() != 5 { + return parse_error(&call_results); + } + let mut call_results_iterator = call_results.into_iter(); + + let multicall3_bootloader = + Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; + + if multicall3_bootloader.len() != 32 { + return Err(ETHSenderError::ParseError(Error::InvalidOutputType( + format!( + "multicall3 bootloader hash data is not of the len of 32: {:?}", + multicall3_bootloader + ), + ))); + } + let bootloader = H256::from_slice(&multicall3_bootloader); + + let multicall3_default_aa = + Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; + if multicall3_default_aa.len() != 32 { + return Err(ETHSenderError::ParseError(Error::InvalidOutputType( + format!( + "multicall3 default aa hash data is not of the len of 32: {:?}", + multicall3_default_aa + ), + ))); + } + let default_aa = H256::from_slice(&multicall3_default_aa); + let base_system_contracts_hashes = BaseSystemContractsHashes { + bootloader, + default_aa, + }; + + let multicall3_verifier_params = + Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; + if multicall3_verifier_params.len() != 96 { + return Err(ETHSenderError::ParseError(Error::InvalidOutputType( + format!( + "multicall3 verifier params data is not of the len of 96: {:?}", + multicall3_default_aa + ), + ))); + } + let recursion_node_level_vk_hash = H256::from_slice(&multicall3_verifier_params[..32]); + let recursion_leaf_level_vk_hash = + H256::from_slice(&multicall3_verifier_params[32..64]); + let recursion_circuits_set_vks_hash = + H256::from_slice(&multicall3_verifier_params[64..]); + let verifier_params = VerifierParams { + recursion_node_level_vk_hash, + recursion_leaf_level_vk_hash, + recursion_circuits_set_vks_hash, + }; + + let multicall3_verifier_address = + Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; + if multicall3_verifier_address.len() != 32 { + return Err(ETHSenderError::ParseError(Error::InvalidOutputType( + format!( + "multicall3 verifier address data is not of the len of 32: {:?}", + multicall3_verifier_address + ), + ))); + } + let verifier_address = Address::from_slice(&multicall3_verifier_address[12..]); + + let multicall3_protocol_version = + Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; + if multicall3_protocol_version.len() != 32 { + return Err(ETHSenderError::ParseError(Error::InvalidOutputType( + format!( + "multicall3 protocol version data is not of the len of 32: {:?}", + multicall3_protocol_version + ), + ))); + } + let protocol_version_id = U256::from_big_endian(&multicall3_protocol_version) + .try_into() + .unwrap(); + + return Ok(MulticallData { + base_system_contracts_hashes, + verifier_params, + verifier_address, + protocol_version_id, + }); + } + parse_error(&[token]) + } + + /// Loads current verifier config on L1 + async fn get_recursion_scheduler_level_vk_hash( + &mut self, + eth_client: &E, + verifier_address: Address, + ) -> Result { + let token: Token = eth_client + .call_contract_function( + &self.functions.get_verification_key.name, (), None, Default::default(), None, + verifier_address, + self.functions.verifier_contract.clone(), ) .await?; - Ok(BaseSystemContractsHashes { - bootloader: bootloader_code_hash, - default_aa: default_account_code_hash, - }) + let recursion_scheduler_level_vk_hash = l1_vk_commitment(token); + + Ok(recursion_scheduler_level_vk_hash) } #[tracing::instrument(skip(self, storage, eth_client))] @@ -104,77 +322,94 @@ impl EthTxAggregator { prover_storage: &mut StorageProcessor<'_>, eth_client: &E, ) -> Result<(), ETHSenderError> { - let base_system_contracts_hashes = - self.get_l1_base_system_contracts_hashes(eth_client).await?; + let MulticallData { + base_system_contracts_hashes, + verifier_params, + verifier_address, + protocol_version_id, + } = self.get_multicall_data(eth_client).await?; + + let recursion_scheduler_level_vk_hash = self + .get_recursion_scheduler_level_vk_hash(eth_client, verifier_address) + .await?; + let l1_verifier_config = L1VerifierConfig { + params: verifier_params, + recursion_scheduler_level_vk_hash, + }; if let Some(agg_op) = self .aggregator - .get_next_ready_operation(storage, prover_storage, base_system_contracts_hashes) + .get_next_ready_operation( + storage, + prover_storage, + base_system_contracts_hashes, + protocol_version_id, + l1_verifier_config, + ) .await { let tx = self.save_eth_tx(storage, &agg_op).await?; - Self::log_eth_tx_saving(storage, agg_op, &tx).await; + Self::report_eth_tx_saving(storage, agg_op, &tx).await; } Ok(()) } - async fn log_eth_tx_saving( + async fn report_eth_tx_saving( storage: &mut StorageProcessor<'_>, aggregated_op: AggregatedOperation, tx: &EthTx, ) { + let l1_batch_number_range = aggregated_op.l1_batch_range(); vlog::info!( - "eth_tx {} {} ({}-{}): saved", + "eth_tx with ID {} for op {} was saved for L1 batches {l1_batch_number_range:?}", tx.id, - aggregated_op.get_action_caption(), - aggregated_op.get_block_range().0 .0, - aggregated_op.get_block_range().1 .0, + aggregated_op.get_action_caption() ); - if let AggregatedOperation::CommitBlocks(commit_op) = &aggregated_op { - for block in &commit_op.blocks { + if let AggregatedOperation::Commit(commit_op) = &aggregated_op { + for batch in &commit_op.l1_batches { metrics::histogram!( "server.eth_sender.pubdata_size", - block.metadata.l2_l1_messages_compressed.len() as f64, + batch.metadata.l2_l1_messages_compressed.len() as f64, "kind" => "l2_l1_messages_compressed" ); metrics::histogram!( "server.eth_sender.pubdata_size", - block.metadata.initial_writes_compressed.len() as f64, + batch.metadata.initial_writes_compressed.len() as f64, "kind" => "initial_writes_compressed" ); metrics::histogram!( "server.eth_sender.pubdata_size", - block.metadata.repeated_writes_compressed.len() as f64, + batch.metadata.repeated_writes_compressed.len() as f64, "kind" => "repeated_writes_compressed" ); } } + let range_size = l1_batch_number_range.end().0 - l1_batch_number_range.start().0 + 1; metrics::histogram!( "server.eth_sender.block_range_size", - (aggregated_op.get_block_range().1.0 - aggregated_op.get_block_range().0.0 + 1) as f64, - "type" => aggregated_op.get_action_type().to_string() + range_size as f64, + "type" => aggregated_op.get_action_type().as_str() ); track_eth_tx_metrics(storage, "save", tx).await; } fn encode_aggregated_op(&self, op: &AggregatedOperation) -> Vec { match &op { - AggregatedOperation::CommitBlocks(commit_blocks) => self + AggregatedOperation::Commit(op) => self .functions .commit_blocks - .encode_input(&commit_blocks.get_eth_tx_args()), - AggregatedOperation::PublishProofBlocksOnchain(prove_blocks) => self + .encode_input(&op.get_eth_tx_args()), + AggregatedOperation::PublishProofOnchain(op) => self .functions .prove_blocks - .encode_input(&prove_blocks.get_eth_tx_args()), - AggregatedOperation::ExecuteBlocks(execute_blocks) => self + .encode_input(&op.get_eth_tx_args()), + AggregatedOperation::Execute(op) => self .functions .execute_blocks - .encode_input(&execute_blocks.get_eth_tx_args()), + .encode_input(&op.get_eth_tx_args()), } - .expect("Failed to encode transaction data.") - .to_vec() + .expect("Failed to encode transaction data") } pub(super) async fn save_eth_tx( @@ -185,14 +420,14 @@ impl EthTxAggregator { let mut transaction = storage.start_transaction().await; let nonce = self.get_next_nonce(&mut transaction).await?; let calldata = self.encode_aggregated_op(aggregated_op); - let (first_block, last_block) = aggregated_op.get_block_range(); + let l1_batch_number_range = aggregated_op.l1_batch_range(); let op_type = aggregated_op.get_action_type(); - let blocks_predicted_gas = transaction + let predicted_gas_for_batches = transaction .blocks_dal() - .get_blocks_predicted_gas(first_block, last_block, op_type) + .get_l1_batches_predicted_gas(l1_batch_number_range.clone(), op_type) .await; - let eth_tx_predicted_gas = agg_block_base_cost(op_type) + blocks_predicted_gas; + let eth_tx_predicted_gas = agg_l1_batch_base_cost(op_type) + predicted_gas_for_batches; let eth_tx = transaction .eth_sender_dal() @@ -200,14 +435,14 @@ impl EthTxAggregator { nonce, calldata, op_type, - self.contract_address, + self.timelock_contract_address, eth_tx_predicted_gas, ) .await; transaction .blocks_dal() - .set_eth_tx_id(first_block, last_block, eth_tx.id, op_type) + .set_eth_tx_id(l1_batch_number_range, eth_tx.id, op_type) .await; transaction.commit().await; Ok(eth_tx) @@ -220,6 +455,6 @@ impl EthTxAggregator { let db_nonce = storage.eth_sender_dal().get_next_nonce().await.unwrap_or(0); // Between server starts we can execute some txs using operator account or remove some txs from the database // At the start we have to consider this fact and get the max nonce. - Ok(max(db_nonce, self.base_nonce)) + Ok(db_nonce.max(self.base_nonce)) } } diff --git a/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs b/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs index bd8250344971..2f9e1c706c48 100644 --- a/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs +++ b/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs @@ -1,4 +1,5 @@ use std::time::Instant; + use zksync_dal::StorageProcessor; use zksync_types::eth_sender::EthTx; use zksync_utils::time::seconds_since_epoch; @@ -9,33 +10,33 @@ pub async fn track_eth_tx_metrics( tx: &EthTx, ) { let start = Instant::now(); - let stage = format!("l1_{}_{}", l1_stage, tx.tx_type.to_string()); + let stage = format!("l1_{l1_stage}_{}", tx.tx_type); - let blocks = connection + let l1_batch_headers = connection .blocks_dal() - .get_blocks_for_eth_tx_id(tx.id) + .get_l1_batches_for_eth_tx_id(tx.id) .await; // This should be only the case when some blocks were reverted. - if blocks.is_empty() { - vlog::warn!("No blocks were found for eth_tx with id = {}", tx.id); + if l1_batch_headers.is_empty() { + vlog::warn!("No L1 batches were found for eth_tx with id = {}", tx.id); return; } - for block in blocks { + for header in l1_batch_headers { metrics::histogram!( "server.block_latency", - (seconds_since_epoch() - block.timestamp) as f64, + (seconds_since_epoch() - header.timestamp) as f64, "stage" => stage.clone() ); metrics::counter!( "server.processed_txs", - block.tx_count() as u64, + header.tx_count() as u64, "stage" => stage.clone() ); metrics::counter!( "server.processed_l1_txs", - block.l1_tx_count as u64, + header.l1_tx_count as u64, "stage" => stage.clone() ); } diff --git a/core/bin/zksync_core/src/eth_sender/mod.rs b/core/bin/zksync_core/src/eth_sender/mod.rs index 2e5044be81e9..1bf0468e8e3f 100644 --- a/core/bin/zksync_core/src/eth_sender/mod.rs +++ b/core/bin/zksync_core/src/eth_sender/mod.rs @@ -1,5 +1,5 @@ mod aggregator; -mod block_publish_criterion; +mod publish_criterion; mod error; mod eth_tx_aggregator; diff --git a/core/bin/zksync_core/src/eth_sender/publish_criterion.rs b/core/bin/zksync_core/src/eth_sender/publish_criterion.rs new file mode 100644 index 000000000000..1046496c4295 --- /dev/null +++ b/core/bin/zksync_core/src/eth_sender/publish_criterion.rs @@ -0,0 +1,262 @@ +use async_trait::async_trait; +use chrono::Utc; + +use std::fmt; + +use zksync_dal::StorageProcessor; +use zksync_types::commitment::L1BatchWithMetadata; +use zksync_types::{aggregated_operations::AggregatedActionType, L1BatchNumber}; + +use crate::gas_tracker::agg_l1_batch_base_cost; + +#[async_trait] +pub trait L1BatchPublishCriterion: fmt::Debug + Send + Sync { + // Takes `&self` receiver for the trait to be object-safe + fn name(&self) -> &'static str; + + /// Returns `None` if there is no need to publish any L1 batches. + /// Otherwise, returns the number of the last L1 batch that needs to be published. + async fn last_l1_batch_to_publish( + &mut self, + storage: &mut StorageProcessor<'_>, + consecutive_l1_batches: &[L1BatchWithMetadata], + last_sealed_l1_batch: L1BatchNumber, + ) -> Option; +} + +#[derive(Debug)] +pub struct NumberCriterion { + pub op: AggregatedActionType, + /// Maximum number of L1 batches to be packed together. + pub limit: u32, +} + +#[async_trait] +impl L1BatchPublishCriterion for NumberCriterion { + fn name(&self) -> &'static str { + "l1_batch_number" + } + + async fn last_l1_batch_to_publish( + &mut self, + _storage: &mut StorageProcessor<'_>, + consecutive_l1_batches: &[L1BatchWithMetadata], + _last_sealed_l1_batch: L1BatchNumber, + ) -> Option { + let mut batch_numbers = consecutive_l1_batches + .iter() + .map(|batch| batch.header.number.0); + + let first = batch_numbers.next()?; + let last_batch_number = batch_numbers.last().unwrap_or(first); + let batch_count = last_batch_number - first + 1; + if batch_count >= self.limit { + let result = L1BatchNumber(first + self.limit - 1); + vlog::debug!( + "`l1_batch_number` publish criterion (limit={}) triggered for op {} with L1 batch range {:?}", + self.limit, + self.op, + first..=result.0 + ); + metrics::counter!( + "server.eth_sender.block_aggregation_reason", + 1, + "type" => "number", + "op" => self.op.as_str() + ); + Some(result) + } else { + None + } + } +} + +#[derive(Debug)] +pub struct TimestampDeadlineCriterion { + pub op: AggregatedActionType, + /// Maximum L1 batch age in seconds. Once reached, we pack and publish all the available L1 batches. + pub deadline_seconds: u64, + /// If `max_allowed_lag` is `Some(_)` and last batch sent to L1 is more than `max_allowed_lag` behind, + /// it means that sender is lagging significantly and we shouldn't apply this criteria to use all capacity + /// and avoid packing small ranges. + pub max_allowed_lag: Option, +} + +#[async_trait] +impl L1BatchPublishCriterion for TimestampDeadlineCriterion { + fn name(&self) -> &'static str { + "timestamp" + } + + async fn last_l1_batch_to_publish( + &mut self, + _storage: &mut StorageProcessor<'_>, + consecutive_l1_batches: &[L1BatchWithMetadata], + last_sealed_l1_batch: L1BatchNumber, + ) -> Option { + let first_l1_batch = consecutive_l1_batches.iter().next()?; + let last_l1_batch_number = consecutive_l1_batches.iter().last()?.header.number.0; + if let Some(max_allowed_lag) = self.max_allowed_lag { + if last_sealed_l1_batch.0 - last_l1_batch_number >= max_allowed_lag as u32 { + return None; + } + } + let oldest_l1_batch_age_seconds = + Utc::now().timestamp() as u64 - first_l1_batch.header.timestamp; + if oldest_l1_batch_age_seconds >= self.deadline_seconds { + let result = consecutive_l1_batches + .last() + .unwrap_or(first_l1_batch) + .header + .number; + vlog::debug!( + "`timestamp` publish criterion triggered for op {} with L1 batch range {:?}", + self.op, + first_l1_batch.header.number.0..=result.0 + ); + metrics::counter!( + "server.eth_sender.block_aggregation_reason", + 1, + "type" => "timestamp", + "op" => self.op.as_str() + ); + Some(result) + } else { + None + } + } +} + +#[derive(Debug)] +pub struct GasCriterion { + pub op: AggregatedActionType, + pub gas_limit: u32, +} + +impl GasCriterion { + pub fn new(op: AggregatedActionType, gas_limit: u32) -> GasCriterion { + GasCriterion { op, gas_limit } + } + + async fn get_gas_amount( + &self, + storage: &mut StorageProcessor<'_>, + batch_number: L1BatchNumber, + ) -> u32 { + storage + .blocks_dal() + .get_l1_batches_predicted_gas(batch_number..=batch_number, self.op) + .await + } +} + +#[async_trait] +impl L1BatchPublishCriterion for GasCriterion { + fn name(&self) -> &'static str { + "gas_limit" + } + + async fn last_l1_batch_to_publish( + &mut self, + storage: &mut StorageProcessor<'_>, + consecutive_l1_batches: &[L1BatchWithMetadata], + _last_sealed_l1_batch: L1BatchNumber, + ) -> Option { + let base_cost = agg_l1_batch_base_cost(self.op); + assert!( + self.gas_limit > base_cost, + "Config max gas cost for operations is too low" + ); + // We're not sure our predictions are accurate, so it's safer to lower the gas limit by 10% + let mut gas_left = (self.gas_limit as f64 * 0.9).round() as u32 - base_cost; + + let mut last_l1_batch = None; + for (index, l1_batch) in consecutive_l1_batches.iter().enumerate() { + let batch_gas = self.get_gas_amount(storage, l1_batch.header.number).await; + if batch_gas >= gas_left { + if index == 0 { + panic!( + "L1 batch #{} requires {} gas, which is more than the range limit of {}", + l1_batch.header.number, batch_gas, self.gas_limit + ); + } + last_l1_batch = Some(L1BatchNumber(l1_batch.header.number.0 - 1)); + break; + } else { + gas_left -= batch_gas; + } + } + + if let Some(last_l1_batch) = last_l1_batch { + let first_l1_batch_number = consecutive_l1_batches.first().unwrap().header.number.0; + vlog::debug!( + "`gas_limit` publish criterion (gas={}) triggered for op {} with L1 batch range {:?}", + self.gas_limit - gas_left, + self.op, + first_l1_batch_number..=last_l1_batch.0 + ); + metrics::counter!( + "server.eth_sender.block_aggregation_reason", + 1, + "type" => "gas", + "op" => self.op.as_str() + ); + } + last_l1_batch + } +} + +#[derive(Debug)] +pub struct DataSizeCriterion { + pub op: AggregatedActionType, + pub data_limit: usize, +} + +#[async_trait] +impl L1BatchPublishCriterion for DataSizeCriterion { + fn name(&self) -> &'static str { + "data_size" + } + + async fn last_l1_batch_to_publish( + &mut self, + _storage: &mut StorageProcessor<'_>, + consecutive_l1_batches: &[L1BatchWithMetadata], + _last_sealed_l1_batch: L1BatchNumber, + ) -> Option { + const STORED_BLOCK_INFO_SIZE: usize = 96; // size of `StoredBlockInfo` solidity struct + let mut data_size_left = self.data_limit - STORED_BLOCK_INFO_SIZE; + + for (index, l1_batch) in consecutive_l1_batches.iter().enumerate() { + if data_size_left < l1_batch.l1_commit_data_size() { + if index == 0 { + panic!( + "L1 batch #{} requires {} data, which is more than the range limit of {}", + l1_batch.header.number, + l1_batch.l1_commit_data_size(), + self.data_limit + ); + } + + let first_l1_batch_number = consecutive_l1_batches.first().unwrap().header.number.0; + let output = l1_batch.header.number - 1; + vlog::debug!( + "`data_size` publish criterion (data={}) triggered for op {} with L1 batch range {:?}", + self.data_limit - data_size_left, + self.op, + first_l1_batch_number..=output.0 + ); + metrics::counter!( + "server.eth_sender.block_aggregation_reason", + 1, + "type" => "data_size", + "op" => self.op.as_str() + ); + return Some(output); + } + data_size_left -= l1_batch.l1_commit_data_size(); + } + + None + } +} diff --git a/core/bin/zksync_core/src/eth_sender/tests.rs b/core/bin/zksync_core/src/eth_sender/tests.rs index bbf7a0f6700b..ee9f739b2125 100644 --- a/core/bin/zksync_core/src/eth_sender/tests.rs +++ b/core/bin/zksync_core/src/eth_sender/tests.rs @@ -1,25 +1,28 @@ +use assert_matches::assert_matches; use std::sync::{atomic::Ordering, Arc}; use db_test_macro::db_test; use zksync_config::{ configs::eth_sender::{ProofSendingMode, SenderConfig}, - ETHSenderConfig, GasAdjusterConfig, + ContractsConfig, ETHSenderConfig, GasAdjusterConfig, }; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{clients::mock::MockEthereum, EthInterface}; use zksync_types::{ aggregated_operations::{ - AggregatedOperation, BlocksCommitOperation, BlocksExecuteOperation, BlocksProofOperation, + AggregatedOperation, L1BatchCommitOperation, L1BatchExecuteOperation, L1BatchProofOperation, }, block::L1BatchHeader, - commitment::{BlockMetaParameters, BlockMetadata, BlockWithMetadata}, + commitment::{L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata}, + ethabi::Token, helpers::unix_timestamp_ms, - Address, L1BatchNumber, L1BlockNumber, H256, + web3::contract::Error, + Address, L1BatchNumber, L1BlockNumber, ProtocolVersionId, H256, }; use crate::eth_sender::{ - eth_tx_manager::L1BlockNumbers, Aggregator, EthTxAggregator, EthTxManager, + eth_tx_manager::L1BlockNumbers, Aggregator, ETHSenderError, EthTxAggregator, EthTxManager, }; use crate::l1_gas_price::GasAdjuster; @@ -27,7 +30,7 @@ use crate::l1_gas_price::GasAdjuster; type MockEthTxManager = EthTxManager, GasAdjuster>>; const DUMMY_OPERATION: AggregatedOperation = - AggregatedOperation::ExecuteBlocks(BlocksExecuteOperation { blocks: vec![] }); + AggregatedOperation::Execute(L1BatchExecuteOperation { l1_batches: vec![] }); #[derive(Debug)] struct EthSenderTester { @@ -48,6 +51,7 @@ impl EthSenderTester { non_ordering_confirmations: bool, ) -> Self { let eth_sender_config = ETHSenderConfig::from_env(); + let contracts_config = ContractsConfig::from_env(); let aggregator_config = SenderConfig { aggregated_proof_sizes: vec![1], ..eth_sender_config.sender.clone() @@ -61,7 +65,8 @@ impl EthSenderTester { .chain(history) .collect(), ) - .with_non_ordering_confirmation(non_ordering_confirmations), + .with_non_ordering_confirmation(non_ordering_confirmations) + .with_multicall_address(contracts_config.l1_multicall3_addr), ); gateway .block_number @@ -90,6 +95,8 @@ impl EthSenderTester { Aggregator::new(aggregator_config.clone()), // zkSync contract address Address::random(), + contracts_config.l1_multicall3_addr, + Address::random(), 0, ); @@ -435,27 +442,31 @@ async fn failed_eth_tx(connection_pool: ConnectionPool) { .unwrap(); } -fn block_metadata(header: &L1BatchHeader) -> BlockWithMetadata { - BlockWithMetadata { - header: header.clone(), - metadata: BlockMetadata { - root_hash: Default::default(), - rollup_last_leaf_index: 0, - merkle_root_hash: Default::default(), - initial_writes_compressed: vec![], - repeated_writes_compressed: vec![], - commitment: Default::default(), - l2_l1_messages_compressed: vec![], - l2_l1_merkle_root: Default::default(), - block_meta_params: BlockMetaParameters { - zkporter_is_available: false, - bootloader_code_hash: Default::default(), - default_aa_code_hash: Default::default(), - }, - aux_data_hash: Default::default(), - meta_parameters_hash: Default::default(), - pass_through_data_hash: Default::default(), +fn default_l1_batch_metadata() -> L1BatchMetadata { + L1BatchMetadata { + root_hash: Default::default(), + rollup_last_leaf_index: 0, + merkle_root_hash: Default::default(), + initial_writes_compressed: vec![], + repeated_writes_compressed: vec![], + commitment: Default::default(), + l2_l1_messages_compressed: vec![], + l2_l1_merkle_root: Default::default(), + block_meta_params: L1BatchMetaParameters { + zkporter_is_available: false, + bootloader_code_hash: Default::default(), + default_aa_code_hash: Default::default(), }, + aux_data_hash: Default::default(), + meta_parameters_hash: Default::default(), + pass_through_data_hash: Default::default(), + } +} + +fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { + L1BatchWithMetadata { + header, + metadata: default_l1_batch_metadata(), factory_deps: vec![], } } @@ -463,211 +474,395 @@ fn block_metadata(header: &L1BatchHeader) -> BlockWithMetadata { #[db_test] async fn correct_order_for_confirmations(connection_pool: ConnectionPool) -> anyhow::Result<()> { let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true).await; - let zero_block = insert_block(&mut tester, L1BatchNumber(0)).await; - let first_block = insert_block(&mut tester, L1BatchNumber(1)).await; - let second_block = insert_block(&mut tester, L1BatchNumber(2)).await; - commit_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; - proof_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; - execute_blocks(&mut tester, vec![first_block.clone()], true).await; - commit_block(&mut tester, first_block.clone(), second_block.clone(), true).await; - proof_block(&mut tester, first_block.clone(), second_block.clone(), true).await; - - let blocks = tester + insert_genesis_protocol_version(&tester).await; + let genesis_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(0)).await; + let first_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(1)).await; + let second_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(2)).await; + + commit_l1_batch( + &mut tester, + genesis_l1_batch.clone(), + first_l1_batch.clone(), + true, + ) + .await; + prove_l1_batch( + &mut tester, + genesis_l1_batch.clone(), + first_l1_batch.clone(), + true, + ) + .await; + execute_l1_batches(&mut tester, vec![first_l1_batch.clone()], true).await; + commit_l1_batch( + &mut tester, + first_l1_batch.clone(), + second_l1_batch.clone(), + true, + ) + .await; + prove_l1_batch( + &mut tester, + first_l1_batch.clone(), + second_l1_batch.clone(), + true, + ) + .await; + + let l1_batches = tester .storage() .await .blocks_dal() - .get_ready_for_execute_blocks(45, None) + .get_ready_for_execute_l1_batches(45, None) .await; - assert_eq!(blocks.len(), 1); - assert_eq!(blocks[0].header.number.0, 2); + assert_eq!(l1_batches.len(), 1); + assert_eq!(l1_batches[0].header.number.0, 2); - execute_blocks(&mut tester, vec![second_block.clone()], true).await; - let blocks = tester + execute_l1_batches(&mut tester, vec![second_l1_batch.clone()], true).await; + let l1_batches = tester .storage() .await .blocks_dal() - .get_ready_for_execute_blocks(45, None) + .get_ready_for_execute_l1_batches(45, None) .await; - assert_eq!(blocks.len(), 0); + assert_eq!(l1_batches.len(), 0); Ok(()) } #[db_test] -async fn skipped_block_at_the_start(connection_pool: ConnectionPool) -> anyhow::Result<()> { +async fn skipped_l1_batch_at_the_start(connection_pool: ConnectionPool) -> anyhow::Result<()> { let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true).await; - let zero_block = insert_block(&mut tester, L1BatchNumber(0)).await; - let first_block = insert_block(&mut tester, L1BatchNumber(1)).await; - let second_block = insert_block(&mut tester, L1BatchNumber(2)).await; - commit_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; - proof_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; - execute_blocks(&mut tester, vec![first_block.clone()], true).await; - commit_block(&mut tester, first_block.clone(), second_block.clone(), true).await; - proof_block(&mut tester, first_block.clone(), second_block.clone(), true).await; - execute_blocks(&mut tester, vec![second_block.clone()], true).await; - - let third_block = insert_block(&mut tester, L1BatchNumber(3)).await; - let fourth_block = insert_block(&mut tester, L1BatchNumber(4)).await; + insert_genesis_protocol_version(&tester).await; + let genesis_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(0)).await; + let first_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(1)).await; + let second_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(2)).await; + + commit_l1_batch( + &mut tester, + genesis_l1_batch.clone(), + first_l1_batch.clone(), + true, + ) + .await; + prove_l1_batch( + &mut tester, + genesis_l1_batch.clone(), + first_l1_batch.clone(), + true, + ) + .await; + execute_l1_batches(&mut tester, vec![first_l1_batch.clone()], true).await; + commit_l1_batch( + &mut tester, + first_l1_batch.clone(), + second_l1_batch.clone(), + true, + ) + .await; + prove_l1_batch( + &mut tester, + first_l1_batch.clone(), + second_l1_batch.clone(), + true, + ) + .await; + execute_l1_batches(&mut tester, vec![second_l1_batch.clone()], true).await; + + let third_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(3)).await; + let fourth_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(4)).await; // DO NOT CONFIRM THIRD BLOCK - let third_block_commit_tx_hash = commit_block( + let third_l1_batch_commit_tx_hash = commit_l1_batch( &mut tester, - second_block.clone(), - third_block.clone(), + second_l1_batch.clone(), + third_l1_batch.clone(), false, ) .await; - proof_block(&mut tester, second_block.clone(), third_block.clone(), true).await; - commit_block(&mut tester, third_block.clone(), fourth_block.clone(), true).await; - proof_block(&mut tester, third_block.clone(), fourth_block.clone(), true).await; - let blocks = tester + prove_l1_batch( + &mut tester, + second_l1_batch.clone(), + third_l1_batch.clone(), + true, + ) + .await; + commit_l1_batch( + &mut tester, + third_l1_batch.clone(), + fourth_l1_batch.clone(), + true, + ) + .await; + prove_l1_batch( + &mut tester, + third_l1_batch.clone(), + fourth_l1_batch.clone(), + true, + ) + .await; + let l1_batches = tester .storage() .await .blocks_dal() - .get_ready_for_execute_blocks(45, Some(unix_timestamp_ms())) + .get_ready_for_execute_l1_batches(45, Some(unix_timestamp_ms())) .await; - assert_eq!(blocks.len(), 2); + assert_eq!(l1_batches.len(), 2); - confirm_tx(&mut tester, third_block_commit_tx_hash).await; - let blocks = tester + confirm_tx(&mut tester, third_l1_batch_commit_tx_hash).await; + let l1_batches = tester .storage() .await .blocks_dal() - .get_ready_for_execute_blocks(45, Some(unix_timestamp_ms())) + .get_ready_for_execute_l1_batches(45, Some(unix_timestamp_ms())) .await; - assert_eq!(blocks.len(), 2); + assert_eq!(l1_batches.len(), 2); Ok(()) } #[db_test] -async fn skipped_block_in_the_middle(connection_pool: ConnectionPool) -> anyhow::Result<()> { +async fn skipped_l1_batch_in_the_middle(connection_pool: ConnectionPool) -> anyhow::Result<()> { let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true).await; - let zero_block = insert_block(&mut tester, L1BatchNumber(0)).await; - let first_block = insert_block(&mut tester, L1BatchNumber(1)).await; - let second_block = insert_block(&mut tester, L1BatchNumber(2)).await; - commit_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; - proof_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; - execute_blocks(&mut tester, vec![first_block.clone()], true).await; - commit_block(&mut tester, first_block.clone(), second_block.clone(), true).await; - proof_block(&mut tester, first_block.clone(), second_block.clone(), true).await; - - let third_block = insert_block(&mut tester, L1BatchNumber(3)).await; - let fourth_block = insert_block(&mut tester, L1BatchNumber(4)).await; + insert_genesis_protocol_version(&tester).await; + let genesis_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(0)).await; + let first_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(1)).await; + let second_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(2)).await; + commit_l1_batch( + &mut tester, + genesis_l1_batch.clone(), + first_l1_batch.clone(), + true, + ) + .await; + prove_l1_batch(&mut tester, genesis_l1_batch, first_l1_batch.clone(), true).await; + execute_l1_batches(&mut tester, vec![first_l1_batch.clone()], true).await; + commit_l1_batch( + &mut tester, + first_l1_batch.clone(), + second_l1_batch.clone(), + true, + ) + .await; + prove_l1_batch( + &mut tester, + first_l1_batch.clone(), + second_l1_batch.clone(), + true, + ) + .await; + + let third_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(3)).await; + let fourth_l1_batch = insert_l1_batch(&mut tester, L1BatchNumber(4)).await; // DO NOT CONFIRM THIRD BLOCK - let third_block_commit_tx_hash = commit_block( + let third_l1_batch_commit_tx_hash = commit_l1_batch( &mut tester, - second_block.clone(), - third_block.clone(), + second_l1_batch.clone(), + third_l1_batch.clone(), false, ) .await; - proof_block(&mut tester, second_block.clone(), third_block.clone(), true).await; - commit_block(&mut tester, third_block.clone(), fourth_block.clone(), true).await; - proof_block(&mut tester, third_block.clone(), fourth_block.clone(), true).await; - let blocks = tester + prove_l1_batch( + &mut tester, + second_l1_batch.clone(), + third_l1_batch.clone(), + true, + ) + .await; + commit_l1_batch( + &mut tester, + third_l1_batch.clone(), + fourth_l1_batch.clone(), + true, + ) + .await; + prove_l1_batch( + &mut tester, + third_l1_batch.clone(), + fourth_l1_batch.clone(), + true, + ) + .await; + let l1_batches = tester .storage() .await .blocks_dal() - .get_ready_for_execute_blocks(45, None) + .get_ready_for_execute_l1_batches(45, None) .await; - // We should return all block including third block - assert_eq!(blocks.len(), 3); - assert_eq!(blocks[0].header.number.0, 2); + // We should return all L1 batches including the third one + assert_eq!(l1_batches.len(), 3); + assert_eq!(l1_batches[0].header.number.0, 2); - confirm_tx(&mut tester, third_block_commit_tx_hash).await; - let blocks = tester + confirm_tx(&mut tester, third_l1_batch_commit_tx_hash).await; + let l1_batches = tester .storage() .await .blocks_dal() - .get_ready_for_execute_blocks(45, None) + .get_ready_for_execute_l1_batches(45, None) .await; - assert_eq!(blocks.len(), 3); + assert_eq!(l1_batches.len(), 3); Ok(()) } -async fn insert_block(tester: &mut EthSenderTester, number: L1BatchNumber) -> L1BatchHeader { - let mut block = L1BatchHeader::new( +#[db_test] +async fn test_parse_multicall_data(connection_pool: ConnectionPool) { + let tester = EthSenderTester::new(connection_pool, vec![100; 100], false).await; + + let original_correct_form_data = Token::Array(vec![ + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 96])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 32])]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes( + H256::from_low_u64_be(ProtocolVersionId::default() as u64) + .0 + .to_vec(), + ), + ]), + ]); + + assert!(tester + .aggregator + .parse_multicall_data(original_correct_form_data) + .is_ok()); + + let original_wrong_form_data = vec![ + // should contain 5 tuples + Token::Array(vec![]), + Token::Array(vec![ + Token::Tuple(vec![]), + Token::Tuple(vec![]), + Token::Tuple(vec![]), + ]), + Token::Array(vec![Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes(vec![ + 30, 72, 156, 45, 219, 103, 54, 150, 36, 37, 58, 97, 81, 255, 186, 33, 35, 20, 195, + 77, 19, 182, 23, 65, 145, 9, 223, 123, 242, 64, 125, 149, + ]), + ])]), + // should contain 2 tokens in the tuple + Token::Array(vec![ + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes(vec![ + 30, 72, 156, 45, 219, 103, 54, 150, 36, 37, 58, 97, 81, 255, 186, 33, 35, 20, + 195, 77, 19, 182, 23, 65, 145, 9, 223, 123, 242, 64, 125, 149, + ]), + Token::Bytes(vec![]), + ]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes(vec![ + 40, 72, 156, 45, 219, 103, 54, 150, 36, 37, 58, 97, 81, 255, 186, 33, 35, 20, + 195, 77, 19, 182, 23, 65, 145, 9, 223, 123, 242, 64, 225, 149, + ]), + ]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 96])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 20])]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes( + H256::from_low_u64_be(ProtocolVersionId::default() as u64) + .0 + .to_vec(), + ), + ]), + ]), + ]; + + for wrong_data_instance in original_wrong_form_data { + assert_matches!( + tester + .aggregator + .parse_multicall_data(wrong_data_instance.clone()), + Err(ETHSenderError::ParseError(Error::InvalidOutputType(_))) + ); + } +} + +#[db_test] +async fn get_multicall_data(connection_pool: ConnectionPool) { + let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], false).await; + let multicall_data = tester.aggregator.get_multicall_data(&tester.gateway).await; + assert!(multicall_data.is_ok()); +} + +async fn insert_genesis_protocol_version(tester: &EthSenderTester) { + tester + .storage() + .await + .protocol_versions_dal() + .save_protocol_version(Default::default()) + .await; +} + +async fn insert_l1_batch(tester: &mut EthSenderTester, number: L1BatchNumber) -> L1BatchHeader { + let mut header = L1BatchHeader::new( number, 0, Address::zero(), - BaseSystemContractsHashes { - bootloader: Default::default(), - default_aa: Default::default(), - }, + BaseSystemContractsHashes::default(), + Default::default(), ); - block.is_finished = true; - // save block to the database + header.is_finished = true; + + // Save L1 batch to the database tester .storage() .await .blocks_dal() - .insert_l1_batch(&block, Default::default()) + .insert_l1_batch(&header, &[], Default::default()) .await; tester .storage() .await .blocks_dal() - .save_blocks_metadata( - block.number, - &BlockMetadata { - root_hash: Default::default(), - rollup_last_leaf_index: 0, - merkle_root_hash: Default::default(), - initial_writes_compressed: vec![], - repeated_writes_compressed: vec![], - commitment: Default::default(), - l2_l1_messages_compressed: vec![], - l2_l1_merkle_root: Default::default(), - block_meta_params: BlockMetaParameters { - zkporter_is_available: false, - bootloader_code_hash: Default::default(), - default_aa_code_hash: Default::default(), - }, - aux_data_hash: Default::default(), - meta_parameters_hash: Default::default(), - pass_through_data_hash: Default::default(), - }, + .save_l1_batch_metadata( + header.number, + &default_l1_batch_metadata(), Default::default(), ) .await; - block + header } -async fn execute_blocks( +async fn execute_l1_batches( tester: &mut EthSenderTester, - blocks: Vec, + l1_batches: Vec, confirm: bool, ) -> H256 { - let operation = AggregatedOperation::ExecuteBlocks(BlocksExecuteOperation { - blocks: blocks.iter().map(block_metadata).collect(), + let operation = AggregatedOperation::Execute(L1BatchExecuteOperation { + l1_batches: l1_batches.into_iter().map(l1_batch_with_metadata).collect(), }); send_operation(tester, operation, confirm).await } -async fn proof_block( +async fn prove_l1_batch( tester: &mut EthSenderTester, - last_committed_block: L1BatchHeader, - block: L1BatchHeader, + last_committed_l1_batch: L1BatchHeader, + l1_batch: L1BatchHeader, confirm: bool, ) -> H256 { - let operation = AggregatedOperation::PublishProofBlocksOnchain(BlocksProofOperation { - prev_block: block_metadata(&last_committed_block), - blocks: vec![block_metadata(&block)], + let operation = AggregatedOperation::PublishProofOnchain(L1BatchProofOperation { + prev_l1_batch: l1_batch_with_metadata(last_committed_l1_batch), + l1_batches: vec![l1_batch_with_metadata(l1_batch)], proofs: vec![], should_verify: false, }); send_operation(tester, operation, confirm).await } -async fn commit_block( +async fn commit_l1_batch( tester: &mut EthSenderTester, - last_committed_block: L1BatchHeader, - block: L1BatchHeader, + last_committed_l1_batch: L1BatchHeader, + l1_batch: L1BatchHeader, confirm: bool, ) -> H256 { - let operation = AggregatedOperation::CommitBlocks(BlocksCommitOperation { - last_committed_block: block_metadata(&last_committed_block), - blocks: vec![block_metadata(&block)], + let operation = AggregatedOperation::Commit(L1BatchCommitOperation { + last_committed_l1_batch: l1_batch_with_metadata(last_committed_l1_batch), + l1_batches: vec![l1_batch_with_metadata(l1_batch)], }); send_operation(tester, operation, confirm).await } diff --git a/core/bin/zksync_core/src/eth_sender/zksync_functions.rs b/core/bin/zksync_core/src/eth_sender/zksync_functions.rs index 78e684f02877..1dadbd142df2 100644 --- a/core/bin/zksync_core/src/eth_sender/zksync_functions.rs +++ b/core/bin/zksync_core/src/eth_sender/zksync_functions.rs @@ -1,43 +1,66 @@ -use zksync_contracts::zksync_contract; -use zksync_types::ethabi::Function; +use zksync_contracts::{multicall_contract, verifier_contract, zksync_contract}; +use zksync_types::ethabi::{Contract, Function}; #[derive(Debug)] pub(super) struct ZkSyncFunctions { pub(super) commit_blocks: Function, pub(super) prove_blocks: Function, pub(super) execute_blocks: Function, -} + pub(super) get_l2_bootloader_bytecode_hash: Function, + pub(super) get_l2_default_account_bytecode_hash: Function, + pub(super) get_verifier: Function, + pub(super) get_verifier_params: Function, + pub(super) get_protocol_version: Function, -pub(super) fn get_zksync_functions() -> ZkSyncFunctions { - let zksync_contract = zksync_contract(); + pub(super) verifier_contract: Contract, + pub(super) get_verification_key: Function, - let commit_blocks = zksync_contract - .functions - .get("commitBlocks") - .cloned() - .expect("commitBlocks function not found") - .pop() - .expect("commitBlocks function entry not found"); + pub(super) multicall_contract: Contract, + pub(super) aggregate3: Function, +} - let prove_blocks = zksync_contract +fn get_function(contract: &Contract, name: &str) -> Function { + contract .functions - .get("proveBlocks") + .get(name) .cloned() - .expect("proveBlocks function not found") + .unwrap_or_else(|| panic!("{} function not found", name)) .pop() - .expect("proveBlocks function entry not found"); + .unwrap_or_else(|| panic!("{} function entry not found", name)) +} - let execute_blocks = zksync_contract - .functions - .get("executeBlocks") - .cloned() - .expect("executeBlocks function not found") - .pop() - .expect("executeBlocks function entry not found"); +impl Default for ZkSyncFunctions { + fn default() -> Self { + let zksync_contract = zksync_contract(); + let verifier_contract = verifier_contract(); + let multicall_contract = multicall_contract(); + + let commit_blocks = get_function(&zksync_contract, "commitBlocks"); + let prove_blocks = get_function(&zksync_contract, "proveBlocks"); + let execute_blocks = get_function(&zksync_contract, "executeBlocks"); + let get_l2_bootloader_bytecode_hash = + get_function(&zksync_contract, "getL2BootloaderBytecodeHash"); + let get_l2_default_account_bytecode_hash = + get_function(&zksync_contract, "getL2DefaultAccountBytecodeHash"); + let get_verifier = get_function(&zksync_contract, "getVerifier"); + let get_verifier_params = get_function(&zksync_contract, "getVerifierParams"); + let get_protocol_version = get_function(&zksync_contract, "getProtocolVersion"); + let get_verification_key = get_function(&verifier_contract, "get_verification_key"); + let aggregate3 = get_function(&multicall_contract, "aggregate3"); - ZkSyncFunctions { - commit_blocks, - prove_blocks, - execute_blocks, + ZkSyncFunctions { + commit_blocks, + prove_blocks, + execute_blocks, + get_l2_bootloader_bytecode_hash, + get_l2_default_account_bytecode_hash, + get_verifier, + get_verifier_params, + get_protocol_version, + verifier_contract, + get_verification_key, + multicall_contract, + aggregate3, + } } } diff --git a/core/bin/zksync_core/src/eth_watch/client.rs b/core/bin/zksync_core/src/eth_watch/client.rs index 80fd3176ded3..e3b4c3a3ecc5 100644 --- a/core/bin/zksync_core/src/eth_watch/client.rs +++ b/core/bin/zksync_core/src/eth_watch/client.rs @@ -1,25 +1,23 @@ -use itertools::Itertools; -use std::convert::TryFrom; -use std::fmt::{Debug, Display}; +use std::fmt::Debug; use tokio::time::Instant; +use zksync_contracts::verifier_contract; use zksync_eth_client::{types::Error as EthClientError, EthInterface}; -use zksync_types::ethabi::{Contract, Hash}; -use zksync_contracts::zksync_contract; use zksync_types::{ - l1::L1Tx, + ethabi::{Contract, Token}, + vk_transform::l1_vk_commitment, web3::{ self, types::{BlockNumber, FilterBuilder, Log}, }, - H160, + Address, H256, }; #[derive(Debug, thiserror::Error)] pub enum Error { - #[error("Log parsing filed: {0}")] + #[error("Log parsing failed: {0}")] LogParse(String), #[error("Eth client error: {0}")] EthClient(#[from] EthClientError), @@ -27,31 +25,21 @@ pub enum Error { InfiniteRecursion, } -#[derive(Debug)] -struct ContractTopics { - new_priority_request: Hash, -} - -impl ContractTopics { - fn new(zksync_contract: &Contract) -> Self { - Self { - new_priority_request: zksync_contract - .event("NewPriorityRequest") - .expect("main contract abi error") - .signature(), - } - } -} - #[async_trait::async_trait] pub trait EthClient { - async fn get_priority_op_events( + /// Returns events in a given block range. + async fn get_events( &self, from: BlockNumber, to: BlockNumber, retries_left: usize, - ) -> Result, Error>; + ) -> Result, Error>; + /// Returns finalized L1 block number. async fn finalized_block_number(&self) -> Result; + /// Returns scheduler verification key hash by verifier address. + async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result; + /// Sets list of topics to return events for. + fn set_topics(&mut self, topics: Vec); } pub const RETRY_LIMIT: usize = 5; @@ -61,37 +49,34 @@ const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; #[derive(Debug)] pub struct EthHttpQueryClient { client: E, - topics: ContractTopics, - zksync_contract_addr: H160, + topics: Vec, + zksync_contract_addr: Address, + verifier_contract_abi: Contract, confirmations_for_eth_event: Option, } impl EthHttpQueryClient { pub fn new( client: E, - zksync_contract_addr: H160, + zksync_contract_addr: Address, confirmations_for_eth_event: Option, ) -> Self { vlog::debug!("New eth client, contract addr: {:x}", zksync_contract_addr); - let topics = ContractTopics::new(&zksync_contract()); Self { client, - topics, + topics: Vec::new(), zksync_contract_addr, + verifier_contract_abi: verifier_contract(), confirmations_for_eth_event, } } - async fn get_filter_logs( + async fn get_filter_logs( &self, from: BlockNumber, to: BlockNumber, - topics: Vec, - ) -> Result, Error> - where - T: TryFrom, - T::Error: Debug + Display, - { + topics: Vec, + ) -> Result, Error> { let filter = FilterBuilder::default() .address(vec![self.zksync_contract_addr]) .from_block(from) @@ -99,28 +84,37 @@ impl EthHttpQueryClient { .topics(Some(topics), None, None, None) .build(); - self.client - .logs(filter, "watch") - .await? - .into_iter() - .map(|log| T::try_from(log).map_err(|err| Error::LogParse(format!("{}", err)))) - .collect() + self.client.logs(filter, "watch").await.map_err(Into::into) } } #[async_trait::async_trait] impl EthClient for EthHttpQueryClient { - async fn get_priority_op_events( + async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result { + let vk_token: Token = self + .client + .call_contract_function( + "get_verification_key", + (), + None, + Default::default(), + None, + verifier_address, + self.verifier_contract_abi.clone(), + ) + .await?; + Ok(l1_vk_commitment(vk_token)) + } + + async fn get_events( &self, from: BlockNumber, to: BlockNumber, retries_left: usize, - ) -> Result, Error> { + ) -> Result, Error> { let start = Instant::now(); - let mut result = self - .get_filter_logs(from, to, vec![self.topics.new_priority_request]) - .await; + let mut result = self.get_filter_logs(from, to, self.topics.clone()).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. @@ -136,8 +130,8 @@ impl EthClient for EthHttpQueryClient EthClient for EthHttpQueryClient 0 { vlog::warn!("Retrying. Retries left: {:?}", retries_left); - result = self - .get_priority_op_events(from, to, retries_left - 1) - .await; + result = self.get_events(from, to, retries_left - 1).await; } } - let events: Vec = result? - .into_iter() - .sorted_by_key(|event| event.serial_id()) - .collect(); - metrics::histogram!("eth_watcher.get_priority_op_events", start.elapsed()); - Ok(events) + result } async fn finalized_block_number(&self) -> Result { @@ -217,4 +204,8 @@ impl EthClient for EthHttpQueryClient) { + self.topics = topics; + } } diff --git a/core/bin/zksync_core/src/eth_watch/event_processors/mod.rs b/core/bin/zksync_core/src/eth_watch/event_processors/mod.rs new file mode 100644 index 000000000000..70e1db9a3f14 --- /dev/null +++ b/core/bin/zksync_core/src/eth_watch/event_processors/mod.rs @@ -0,0 +1,20 @@ +use crate::eth_watch::client::{Error, EthClient}; +use zksync_dal::StorageProcessor; +use zksync_types::{web3::types::Log, H256}; + +pub mod priority_ops; +pub mod upgrades; + +#[async_trait::async_trait] +pub trait EventProcessor: Send + std::fmt::Debug { + /// Processes given events + async fn process_events( + &mut self, + storage: &mut StorageProcessor<'_>, + client: &W, + events: Vec, + ) -> Result<(), Error>; + + /// Relevant topic which defines what events to be processed + fn relevant_topic(&self) -> H256; +} diff --git a/core/bin/zksync_core/src/eth_watch/event_processors/priority_ops.rs b/core/bin/zksync_core/src/eth_watch/event_processors/priority_ops.rs new file mode 100644 index 000000000000..d10b5893091d --- /dev/null +++ b/core/bin/zksync_core/src/eth_watch/event_processors/priority_ops.rs @@ -0,0 +1,110 @@ +use crate::eth_watch::{ + client::{Error, EthClient}, + event_processors::EventProcessor, +}; +use std::convert::TryFrom; +use std::time::Instant; +use zksync_contracts::zksync_contract; +use zksync_dal::StorageProcessor; +use zksync_types::{l1::L1Tx, web3::types::Log, PriorityOpId, H256}; + +/// Responsible for saving new priority L1 transactions to the database. +#[derive(Debug)] +pub struct PriorityOpsEventProcessor { + next_expected_priority_id: PriorityOpId, + new_priority_request_signature: H256, +} + +impl PriorityOpsEventProcessor { + pub fn new(next_expected_priority_id: PriorityOpId) -> Self { + Self { + next_expected_priority_id, + new_priority_request_signature: zksync_contract() + .event("NewPriorityRequest") + .expect("NewPriorityRequest event is missing in abi") + .signature(), + } + } +} + +#[async_trait::async_trait] +impl EventProcessor for PriorityOpsEventProcessor { + async fn process_events( + &mut self, + storage: &mut StorageProcessor<'_>, + _client: &W, + events: Vec, + ) -> Result<(), Error> { + let mut priority_ops = Vec::new(); + for event in events + .into_iter() + .filter(|event| event.topics[0] == self.new_priority_request_signature) + { + let tx = L1Tx::try_from(event).map_err(|err| Error::LogParse(format!("{}", err)))?; + priority_ops.push(tx); + } + + if priority_ops.is_empty() { + return Ok(()); + } + + let first = &priority_ops[0]; + let last = &priority_ops[priority_ops.len() - 1]; + vlog::debug!( + "Received priority requests with serial ids: {} (block {}) - {} (block {})", + first.serial_id(), + first.eth_block(), + last.serial_id(), + last.eth_block(), + ); + assert_eq!( + last.serial_id().0 - first.serial_id().0 + 1, + priority_ops.len() as u64, + "There is a gap in priority ops received" + ); + + let new_ops: Vec<_> = priority_ops + .into_iter() + .skip_while(|tx| tx.serial_id() < self.next_expected_priority_id) + .collect(); + if new_ops.is_empty() { + return Ok(()); + } + + let first_new = &new_ops[0]; + let last_new = new_ops[new_ops.len() - 1].clone(); + assert_eq!( + first_new.serial_id(), + self.next_expected_priority_id, + "priority transaction serial id mismatch" + ); + + let stage_start = Instant::now(); + metrics::counter!( + "server.processed_txs", + new_ops.len() as u64, + "stage" => "mempool_added" + ); + metrics::counter!( + "server.processed_l1_txs", + new_ops.len() as u64, + "stage" => "mempool_added" + ); + for new_op in new_ops { + let eth_block = new_op.eth_block(); + storage + .transactions_dal() + .insert_transaction_l1(new_op, eth_block) + .await; + } + metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "persist_l1_txs"); + + self.next_expected_priority_id = last_new.serial_id().next(); + + Ok(()) + } + + fn relevant_topic(&self) -> H256 { + self.new_priority_request_signature + } +} diff --git a/core/bin/zksync_core/src/eth_watch/event_processors/upgrades.rs b/core/bin/zksync_core/src/eth_watch/event_processors/upgrades.rs new file mode 100644 index 000000000000..a77daed6736d --- /dev/null +++ b/core/bin/zksync_core/src/eth_watch/event_processors/upgrades.rs @@ -0,0 +1,96 @@ +use crate::eth_watch::{ + client::{Error, EthClient}, + event_processors::EventProcessor, +}; +use std::convert::TryFrom; +use std::time::Instant; +use zksync_contracts::zksync_contract; +use zksync_dal::StorageProcessor; +use zksync_types::{web3::types::Log, ProtocolUpgrade, ProtocolVersionId, H256}; + +/// Responsible for saving new protocol upgrade proposals to the database. +#[derive(Debug)] +pub struct UpgradesEventProcessor { + last_seen_version_id: ProtocolVersionId, + upgrade_proposal_signature: H256, +} + +impl UpgradesEventProcessor { + pub fn new(last_seen_version_id: ProtocolVersionId) -> Self { + Self { + last_seen_version_id, + upgrade_proposal_signature: zksync_contract() + .event("ProposeTransparentUpgrade") + .expect("ProposeTransparentUpgrade event is missing in abi") + .signature(), + } + } +} + +#[async_trait::async_trait] +impl EventProcessor for UpgradesEventProcessor { + async fn process_events( + &mut self, + storage: &mut StorageProcessor<'_>, + client: &W, + events: Vec, + ) -> Result<(), Error> { + let mut upgrades = Vec::new(); + for event in events + .into_iter() + .filter(|event| event.topics[0] == self.upgrade_proposal_signature) + { + let upgrade = ProtocolUpgrade::try_from(event) + .map_err(|err| Error::LogParse(format!("{:?}", err)))?; + // Scheduler VK is not present in proposal event. It is hardcoded in verifier contract. + let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { + Some(client.scheduler_vk_hash(address).await?) + } else { + None + }; + upgrades.push((upgrade, scheduler_vk_hash)); + } + + if upgrades.is_empty() { + return Ok(()); + } + + let ids_str: Vec<_> = upgrades + .iter() + .map(|(u, _)| format!("{}", u.id as u16)) + .collect(); + vlog::debug!("Received upgrades with ids: {}", ids_str.join(", ")); + + let new_upgrades: Vec<_> = upgrades + .into_iter() + .skip_while(|(v, _)| v.id as u16 <= self.last_seen_version_id as u16) + .collect(); + if new_upgrades.is_empty() { + return Ok(()); + } + + let last_id = new_upgrades.last().unwrap().0.id; + let stage_start = Instant::now(); + for (upgrade, scheduler_vk_hash) in new_upgrades { + let previous_version = storage + .protocol_versions_dal() + .load_previous_version(upgrade.id) + .await + .expect("Expected previous version to be present in DB"); + let new_version = previous_version.apply_upgrade(upgrade, scheduler_vk_hash); + storage + .protocol_versions_dal() + .save_protocol_version(new_version) + .await; + } + metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "persist_upgrades"); + + self.last_seen_version_id = last_id; + + Ok(()) + } + + fn relevant_topic(&self) -> H256 { + self.upgrade_proposal_signature + } +} diff --git a/core/bin/zksync_core/src/eth_watch/mod.rs b/core/bin/zksync_core/src/eth_watch/mod.rs index eeb4a2c72226..2d2d7276257f 100644 --- a/core/bin/zksync_core/src/eth_watch/mod.rs +++ b/core/bin/zksync_core/src/eth_watch/mod.rs @@ -12,49 +12,69 @@ use tokio::{sync::watch, task::JoinHandle}; // Workspace deps use zksync_config::constants::PRIORITY_EXPIRATION; +use zksync_config::ETHWatchConfig; +use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{ - l1::L1Tx, web3::types::BlockNumber as Web3BlockNumber, L1BlockNumber, PriorityOpId, H160, + web3::types::BlockNumber as Web3BlockNumber, Address, PriorityOpId, ProtocolVersionId, }; // Local deps -use self::client::{Error, EthClient}; - -use zksync_config::ETHWatchConfig; - -use crate::eth_watch::client::{EthHttpQueryClient, RETRY_LIMIT}; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use self::client::{Error, EthClient, EthHttpQueryClient}; +use crate::eth_watch::client::RETRY_LIMIT; +use event_processors::{ + priority_ops::PriorityOpsEventProcessor, upgrades::UpgradesEventProcessor, EventProcessor, +}; use zksync_eth_client::EthInterface; mod client; +mod event_processors; #[cfg(test)] mod tests; #[derive(Debug)] struct EthWatchState { + last_seen_version_id: ProtocolVersionId, next_expected_priority_id: PriorityOpId, last_processed_ethereum_block: u64, } #[derive(Debug)] -pub struct EthWatch { +pub struct EthWatch { client: W, poll_interval: Duration, + event_processors: Vec>>, - state: EthWatchState, + last_processed_ethereum_block: u64, } -impl EthWatch { - pub async fn new(client: W, pool: &ConnectionPool, poll_interval: Duration) -> Self { +impl EthWatch { + pub async fn new(mut client: W, pool: &ConnectionPool, poll_interval: Duration) -> Self { let mut storage = pool.access_storage_tagged("eth_watch").await; let state = Self::initialize_state(&client, &mut storage).await; vlog::info!("initialized state: {:?}", state); + + let priority_ops_processor = + PriorityOpsEventProcessor::new(state.next_expected_priority_id); + let upgrades_processor = UpgradesEventProcessor::new(state.last_seen_version_id); + let event_processors: Vec>> = vec![ + Box::new(priority_ops_processor), + Box::new(upgrades_processor), + ]; + + let topics = event_processors + .iter() + .map(|p| p.relevant_topic()) + .collect(); + client.set_topics(topics); + Self { client, poll_interval, - state, + event_processors, + last_processed_ethereum_block: state.last_processed_ethereum_block, } } @@ -65,6 +85,12 @@ impl EthWatch { .await .map_or(PriorityOpId(0), |e| e + 1); + let last_seen_version_id = storage + .protocol_versions_dal() + .last_version_id() + .await + .expect("Expected at least one (genesis) version to be present in DB"); + let last_processed_ethereum_block = match storage .transactions_dal() .get_last_processed_l1_block() @@ -83,6 +109,7 @@ impl EthWatch { EthWatchState { next_expected_priority_id, + last_seen_version_id, last_processed_ethereum_block, } } @@ -104,103 +131,48 @@ impl EthWatch { // This is an error because otherwise we could potentially miss a priority operation // thus entering priority mode, which is not desired. vlog::error!("Failed to process new blocks {}", error); - self.state = Self::initialize_state(&self.client, &mut storage).await; + self.last_processed_ethereum_block = + Self::initialize_state(&self.client, &mut storage) + .await + .last_processed_ethereum_block; } } } #[tracing::instrument(skip(self, storage))] async fn loop_iteration(&mut self, storage: &mut StorageProcessor<'_>) -> Result<(), Error> { - let mut stage_start = Instant::now(); + let stage_start = Instant::now(); let to_block = self.client.finalized_block_number().await?; - if to_block <= self.state.last_processed_ethereum_block { + if to_block <= self.last_processed_ethereum_block { return Ok(()); } - let new_ops = self - .get_new_priority_ops(self.state.last_processed_ethereum_block, to_block) - .await?; - - self.state.last_processed_ethereum_block = to_block; - - metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "request"); - if !new_ops.is_empty() { - let first = &new_ops[0].1; - let last = &new_ops[new_ops.len() - 1].1; - assert_eq!( - first.serial_id(), - self.state.next_expected_priority_id, - "priority transaction serial id mismatch" - ); - self.state.next_expected_priority_id = last.serial_id().next(); - stage_start = Instant::now(); - metrics::counter!( - "server.processed_txs", - new_ops.len() as u64, - "stage" => "mempool_added" - ); - metrics::counter!( - "server.processed_l1_txs", - new_ops.len() as u64, - "stage" => "mempool_added" - ); - for (eth_block, new_op) in new_ops { - storage - .transactions_dal() - .insert_transaction_l1(new_op, eth_block) - .await; - } - metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "persist"); - } - Ok(()) - } - - async fn get_new_priority_ops( - &self, - from_block: u64, - to_block: u64, - ) -> Result, Error> { - let priority_ops: Vec = self + let events = self .client - .get_priority_op_events( - Web3BlockNumber::Number(from_block.into()), + .get_events( + Web3BlockNumber::Number(self.last_processed_ethereum_block.into()), Web3BlockNumber::Number(to_block.into()), RETRY_LIMIT, ) - .await? - .into_iter() - .collect::>(); - - if !priority_ops.is_empty() { - let first = &priority_ops[0]; - let last = &priority_ops[priority_ops.len() - 1]; - vlog::debug!( - "Received priority requests with serial ids: {} (block {}) - {} (block {})", - first.serial_id(), - first.eth_block(), - last.serial_id(), - last.eth_block(), - ); - assert_eq!( - last.serial_id().0 - first.serial_id().0 + 1, - priority_ops.len() as u64, - "there is a gap in priority ops received" - ) + .await?; + metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "request"); + + for processor in self.event_processors.iter_mut() { + processor + .process_events(storage, &self.client, events.clone()) + .await?; } - Ok(priority_ops - .into_iter() - .skip_while(|tx| tx.serial_id() < self.state.next_expected_priority_id) - .map(|tx| (L1BlockNumber(tx.eth_block() as u32), tx)) - .collect()) + self.last_processed_ethereum_block = to_block; + Ok(()) } } pub async fn start_eth_watch( pool: ConnectionPool, eth_gateway: E, - diamond_proxy_addr: H160, + diamond_proxy_addr: Address, stop_receiver: watch::Receiver, ) -> JoinHandle<()> { let eth_watch = ETHWatchConfig::from_env(); diff --git a/core/bin/zksync_core/src/eth_watch/tests.rs b/core/bin/zksync_core/src/eth_watch/tests.rs index fb5a8b43f444..b94e85879964 100644 --- a/core/bin/zksync_core/src/eth_watch/tests.rs +++ b/core/bin/zksync_core/src/eth_watch/tests.rs @@ -5,18 +5,24 @@ use std::sync::Arc; use tokio::sync::RwLock; use db_test_macro::db_test; +use zksync_contracts::zksync_contract; use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_types::protocol_version::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}; use zksync_types::web3::types::{Address, BlockNumber}; use zksync_types::{ + ethabi::{encode, Hash, Token}, l1::{L1Tx, OpProcessingType, PriorityQueueType}, - Execute, L1TxCommonData, PriorityOpId, Transaction, H256, U256, + web3::types::Log, + Execute, L1TxCommonData, PriorityOpId, ProtocolUpgrade, ProtocolVersion, ProtocolVersionId, + Transaction, H256, U256, }; use super::client::Error; use crate::eth_watch::{client::EthClient, EthWatch}; struct FakeEthClientData { - transactions: HashMap>, + transactions: HashMap>, + upgrades: HashMap>, last_finalized_block_number: u64, } @@ -24,6 +30,7 @@ impl FakeEthClientData { fn new() -> Self { Self { transactions: Default::default(), + upgrades: Default::default(), last_finalized_block_number: 0, } } @@ -32,11 +39,21 @@ impl FakeEthClientData { for transaction in transactions { let eth_block = transaction.eth_block(); self.transactions - .entry(eth_block) + .entry(eth_block.0 as u64) .or_insert_with(Vec::new) - .push(transaction.clone()); + .push(tx_into_log(transaction.clone())); } } + + fn add_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + for (upgrade, eth_block) in upgrades { + self.upgrades + .entry(*eth_block) + .or_insert_with(Vec::new) + .push(upgrade_into_log(upgrade.clone(), *eth_block)); + } + } + fn set_last_finalized_block_number(&mut self, number: u64) { self.last_finalized_block_number = number; } @@ -58,6 +75,10 @@ impl FakeEthClient { self.inner.write().await.add_transactions(transactions); } + async fn add_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + self.inner.write().await.add_upgrades(upgrades); + } + async fn set_last_finalized_block_number(&mut self, number: u64) { self.inner .write() @@ -67,31 +88,42 @@ impl FakeEthClient { async fn block_to_number(&self, block: BlockNumber) -> u64 { match block { - BlockNumber::Latest => unreachable!(), BlockNumber::Earliest => 0, - BlockNumber::Pending => unreachable!(), BlockNumber::Number(number) => number.as_u64(), + BlockNumber::Pending + | BlockNumber::Latest + | BlockNumber::Finalized + | BlockNumber::Safe => unreachable!(), } } } #[async_trait::async_trait] impl EthClient for FakeEthClient { - async fn get_priority_op_events( + async fn get_events( &self, from: BlockNumber, to: BlockNumber, _retries_left: usize, - ) -> Result, Error> { + ) -> Result, Error> { let from = self.block_to_number(from).await; let to = self.block_to_number(to).await; - let mut transactions = vec![]; + let mut logs = vec![]; for number in from..=to { if let Some(ops) = self.inner.read().await.transactions.get(&number) { - transactions.extend_from_slice(ops); + logs.extend_from_slice(ops); + } + if let Some(ops) = self.inner.read().await.upgrades.get(&number) { + logs.extend_from_slice(ops); } } - Ok(transactions) + Ok(logs) + } + + fn set_topics(&mut self, _topics: Vec) {} + + async fn scheduler_vk_hash(&self, _verifier_address: Address) -> Result { + Ok(H256::zero()) } async fn finalized_block_number(&self) -> Result { @@ -99,7 +131,7 @@ impl EthClient for FakeEthClient { } } -fn build_tx(serial_id: u64, eth_block: u64) -> L1Tx { +fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { L1Tx { execute: Execute { contract_address: Address::repeat_byte(0x11), @@ -128,8 +160,34 @@ fn build_tx(serial_id: u64, eth_block: u64) -> L1Tx { } } +fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { + ProtocolUpgradeTx { + execute: Execute { + contract_address: Address::repeat_byte(0x11), + calldata: vec![1, 2, 3], + factory_deps: None, + value: U256::zero(), + }, + common_data: ProtocolUpgradeTxCommonData { + upgrade_id: id, + sender: [1u8; 20].into(), + eth_hash: [2; 32].into(), + eth_block, + gas_limit: Default::default(), + max_fee_per_gas: Default::default(), + gas_per_pubdata_limit: 1u32.into(), + refund_recipient: Address::zero(), + to_mint: Default::default(), + canonical_tx_hash: H256::from_low_u64_be(id as u64), + }, + received_timestamp_ms: 0, + } +} + #[db_test] -async fn test_normal_operation(connection_pool: ConnectionPool) { +async fn test_normal_operation_l1_txs(connection_pool: ConnectionPool) { + setup_db(&connection_pool).await; + let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( client.clone(), @@ -140,30 +198,136 @@ async fn test_normal_operation(connection_pool: ConnectionPool) { let mut storage = connection_pool.access_test_storage().await; client - .add_transactions(&[build_tx(0, 10), build_tx(1, 14), build_tx(2, 18)]) + .add_transactions(&[build_l1_tx(0, 10), build_l1_tx(1, 14), build_l1_tx(2, 18)]) .await; client.set_last_finalized_block_number(15).await; // second tx will not be processed, as it's block is not finalized yet. watcher.loop_iteration(&mut storage).await.unwrap(); let db_txs = get_all_db_txs(&mut storage).await; + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); assert_eq!(db_txs.len(), 2); - let db_tx: L1Tx = db_txs[0].clone().try_into().unwrap(); + let db_tx = db_txs[0].clone(); assert_eq!(db_tx.common_data.serial_id.0, 0); - let db_tx: L1Tx = db_txs[1].clone().try_into().unwrap(); + let db_tx = db_txs[1].clone(); assert_eq!(db_tx.common_data.serial_id.0, 1); client.set_last_finalized_block_number(20).await; // now the second tx will be processed watcher.loop_iteration(&mut storage).await.unwrap(); let db_txs = get_all_db_txs(&mut storage).await; + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); assert_eq!(db_txs.len(), 3); - let db_tx: L1Tx = db_txs[2].clone().try_into().unwrap(); + let db_tx = db_txs[2].clone(); assert_eq!(db_tx.common_data.serial_id.0, 2); } +#[db_test] +async fn test_normal_operation_upgrades(connection_pool: ConnectionPool) { + setup_db(&connection_pool).await; + + let mut client = FakeEthClient::new(); + let mut watcher = EthWatch::new( + client.clone(), + &connection_pool, + std::time::Duration::from_nanos(1), + ) + .await; + + let mut storage = connection_pool.access_test_storage().await; + client + .add_upgrades(&[ + ( + ProtocolUpgrade { + id: ProtocolVersionId::latest(), + tx: None, + ..Default::default() + }, + 10, + ), + ( + ProtocolUpgrade { + id: ProtocolVersionId::next(), + tx: Some(build_upgrade_tx(ProtocolVersionId::next(), 18)), + ..Default::default() + }, + 18, + ), + ]) + .await; + client.set_last_finalized_block_number(15).await; + // second upgrade will not be processed, as it has less than 5 confirmations + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_ids = storage.protocol_versions_dal().all_version_ids().await; + // there should be genesis version and just added version + assert_eq!(db_ids.len(), 2); + assert_eq!(db_ids[1], ProtocolVersionId::latest()); + + client.set_last_finalized_block_number(20).await; + // now the second upgrade will be processed + watcher.loop_iteration(&mut storage).await.unwrap(); + let db_ids = storage.protocol_versions_dal().all_version_ids().await; + assert_eq!(db_ids.len(), 3); + assert_eq!(db_ids[2], ProtocolVersionId::next()); + + // check that tx was saved with the last upgrade + let tx = storage + .protocol_versions_dal() + .get_protocol_upgrade_tx(ProtocolVersionId::next()) + .await + .unwrap(); + assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); +} + +#[db_test] +async fn test_gap_in_upgrades(connection_pool: ConnectionPool) { + setup_db(&connection_pool).await; + + let mut client = FakeEthClient::new(); + let mut watcher = EthWatch::new( + client.clone(), + &connection_pool, + std::time::Duration::from_nanos(1), + ) + .await; + + let mut storage = connection_pool.access_test_storage().await; + client + .add_upgrades(&[( + ProtocolUpgrade { + id: ProtocolVersionId::next(), + tx: None, + ..Default::default() + }, + 10, + )]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_ids = storage.protocol_versions_dal().all_version_ids().await; + // there should be genesis version and just added version + assert_eq!(db_ids.len(), 2); + + let previous_version = (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(); + let next_version = ProtocolVersionId::next(); + assert_eq!(db_ids[0], previous_version); + assert_eq!(db_ids[1], next_version); +} + #[db_test] #[should_panic] async fn test_gap_in_single_batch(connection_pool: ConnectionPool) { + setup_db(&connection_pool).await; + let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( client.clone(), @@ -175,11 +339,11 @@ async fn test_gap_in_single_batch(connection_pool: ConnectionPool) { let mut storage = connection_pool.access_test_storage().await; client .add_transactions(&[ - build_tx(0, 10), - build_tx(1, 14), - build_tx(2, 14), - build_tx(3, 14), - build_tx(5, 14), + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + build_l1_tx(3, 14), + build_l1_tx(5, 14), ]) .await; client.set_last_finalized_block_number(15).await; @@ -189,6 +353,8 @@ async fn test_gap_in_single_batch(connection_pool: ConnectionPool) { #[db_test] #[should_panic] async fn test_gap_between_batches(connection_pool: ConnectionPool) { + setup_db(&connection_pool).await; + let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( client.clone(), @@ -201,12 +367,12 @@ async fn test_gap_between_batches(connection_pool: ConnectionPool) { client .add_transactions(&[ // this goes to the first batch - build_tx(0, 10), - build_tx(1, 14), - build_tx(2, 14), + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), // this goes to the second batch - build_tx(4, 20), - build_tx(5, 22), + build_l1_tx(4, 20), + build_l1_tx(5, 22), ]) .await; client.set_last_finalized_block_number(15).await; @@ -219,6 +385,8 @@ async fn test_gap_between_batches(connection_pool: ConnectionPool) { #[db_test] async fn test_overlapping_batches(connection_pool: ConnectionPool) { + setup_db(&connection_pool).await; + let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( client.clone(), @@ -231,14 +399,14 @@ async fn test_overlapping_batches(connection_pool: ConnectionPool) { client .add_transactions(&[ // this goes to the first batch - build_tx(0, 10), - build_tx(1, 14), - build_tx(2, 14), + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), // this goes to the second batch - build_tx(1, 20), - build_tx(2, 22), - build_tx(3, 23), - build_tx(4, 23), + build_l1_tx(1, 20), + build_l1_tx(2, 22), + build_l1_tx(3, 23), + build_l1_tx(4, 23), ]) .await; client.set_last_finalized_block_number(15).await; @@ -249,9 +417,14 @@ async fn test_overlapping_batches(connection_pool: ConnectionPool) { watcher.loop_iteration(&mut storage).await.unwrap(); let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 5); - let tx: L1Tx = db_txs[2].clone().try_into().unwrap(); + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + let tx = db_txs[2].clone(); assert_eq!(tx.common_data.serial_id.0, 2); - let tx: L1Tx = db_txs[4].clone().try_into().unwrap(); + let tx = db_txs[4].clone(); assert_eq!(tx.common_data.serial_id.0, 4); } @@ -263,3 +436,200 @@ async fn get_all_db_txs(storage: &mut StorageProcessor<'_>) -> Vec .await .0 } + +fn tx_into_log(tx: L1Tx) -> Log { + let eth_block = tx.eth_block().0.into(); + + let tx_data_token = Token::Tuple(vec![ + Token::Uint(0xff.into()), + Token::Address(tx.common_data.sender), + Token::Address(tx.execute.contract_address), + Token::Uint(tx.common_data.gas_limit), + Token::Uint(tx.common_data.gas_per_pubdata_limit), + Token::Uint(tx.common_data.max_fee_per_gas), + Token::Uint(U256::zero()), + Token::Address(Address::zero()), + Token::Uint(tx.common_data.serial_id.0.into()), + Token::Uint(tx.execute.value), + Token::FixedArray(vec![ + Token::Uint(U256::zero()), + Token::Uint(U256::zero()), + Token::Uint(U256::zero()), + Token::Uint(U256::zero()), + ]), + Token::Bytes(tx.execute.calldata), + Token::Bytes(Vec::new()), + Token::Array(Vec::new()), + Token::Bytes(Vec::new()), + Token::Bytes(Vec::new()), + ]); + + let data = encode(&[ + Token::Uint(tx.common_data.serial_id.0.into()), + Token::FixedBytes(H256::random().0.to_vec()), + Token::Uint(u64::MAX.into()), + tx_data_token, + Token::Array(Vec::new()), + ]); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![zksync_contract() + .event("NewPriorityRequest") + .expect("NewPriorityRequest event is missing in abi") + .signature()], + data: data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block), + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + } +} + +fn upgrade_into_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { + let tx_data_token = if let Some(tx) = upgrade.tx { + Token::Tuple(vec![ + Token::Uint(0xfe.into()), + Token::Address(tx.common_data.sender), + Token::Address(tx.execute.contract_address), + Token::Uint(tx.common_data.gas_limit), + Token::Uint(tx.common_data.gas_per_pubdata_limit), + Token::Uint(tx.common_data.max_fee_per_gas), + Token::Uint(U256::zero()), + Token::Address(Address::zero()), + Token::Uint((tx.common_data.upgrade_id as u16).into()), + Token::Uint(tx.execute.value), + Token::FixedArray(vec![ + Token::Uint(U256::zero()), + Token::Uint(U256::zero()), + Token::Uint(U256::zero()), + Token::Uint(U256::zero()), + ]), + Token::Bytes(tx.execute.calldata), + Token::Bytes(Vec::new()), + Token::Array(Vec::new()), + Token::Bytes(Vec::new()), + Token::Bytes(Vec::new()), + ]) + } else { + Token::Tuple(vec![ + Token::Uint(0.into()), + Token::Address(Default::default()), + Token::Address(Default::default()), + Token::Uint(Default::default()), + Token::Uint(Default::default()), + Token::Uint(Default::default()), + Token::Uint(Default::default()), + Token::Address(Default::default()), + Token::Uint(Default::default()), + Token::Uint(Default::default()), + Token::FixedArray(vec![ + Token::Uint(Default::default()), + Token::Uint(Default::default()), + Token::Uint(Default::default()), + Token::Uint(Default::default()), + ]), + Token::Bytes(Default::default()), + Token::Bytes(Default::default()), + Token::Array(Default::default()), + Token::Bytes(Default::default()), + Token::Bytes(Default::default()), + ]) + }; + + let upgrade_token = Token::Tuple(vec![ + tx_data_token, + Token::Array(Vec::new()), + Token::FixedBytes( + upgrade + .bootloader_code_hash + .unwrap_or_default() + .as_bytes() + .to_vec(), + ), + Token::FixedBytes( + upgrade + .default_account_code_hash + .unwrap_or_default() + .as_bytes() + .to_vec(), + ), + Token::Address(upgrade.verifier_address.unwrap_or_default()), + Token::Tuple(vec![ + Token::FixedBytes( + upgrade + .verifier_params + .unwrap_or_default() + .recursion_node_level_vk_hash + .as_bytes() + .to_vec(), + ), + Token::FixedBytes( + upgrade + .verifier_params + .unwrap_or_default() + .recursion_leaf_level_vk_hash + .as_bytes() + .to_vec(), + ), + Token::FixedBytes( + upgrade + .verifier_params + .unwrap_or_default() + .recursion_circuits_set_vks_hash + .as_bytes() + .to_vec(), + ), + ]), + Token::Bytes(Default::default()), + Token::Bytes(Default::default()), + Token::Uint(upgrade.timestamp.into()), + Token::Uint((upgrade.id as u16).into()), + Token::Address(Default::default()), + ]); + + let final_token = Token::Tuple(vec![ + Token::Array(vec![]), + Token::Address(Default::default()), + Token::Bytes( + vec![0u8; 4] + .into_iter() + .chain(encode(&[upgrade_token])) + .collect(), + ), + ]); + + let data = encode(&[final_token, Token::FixedBytes(vec![0u8; 32])]); + Log { + address: Address::repeat_byte(0x1), + topics: vec![zksync_contract() + .event("ProposeTransparentUpgrade") + .expect("ProposeTransparentUpgrade event is missing in abi") + .signature()], + data: data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + } +} + +async fn setup_db(connection_pool: &ConnectionPool) { + connection_pool + .access_test_storage() + .await + .protocol_versions_dal() + .save_protocol_version(ProtocolVersion { + id: (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(), + ..Default::default() + }) + .await; +} diff --git a/core/bin/zksync_core/src/gas_tracker/constants.rs b/core/bin/zksync_core/src/gas_tracker/constants.rs index 7dfc6a95b972..4eb9475cb0f5 100644 --- a/core/bin/zksync_core/src/gas_tracker/constants.rs +++ b/core/bin/zksync_core/src/gas_tracker/constants.rs @@ -1,17 +1,17 @@ // Currently, every AGGR_* cost is overestimated, // so there are safety margins around 100_000 -- 200_000 -pub const AGGR_BLOCK_COMMIT_BASE_COST: u32 = 242_000; -pub const AGGR_BLOCK_PROVE_BASE_COST: u32 = 1_000_000; -pub const AGGR_BLOCK_EXECUTE_BASE_COST: u32 = 241_000; +pub(super) const AGGR_L1_BATCH_COMMIT_BASE_COST: u32 = 242_000; +pub(super) const AGGR_L1_BATCH_PROVE_BASE_COST: u32 = 1_000_000; +pub(super) const AGGR_L1_BATCH_EXECUTE_BASE_COST: u32 = 241_000; -pub const BLOCK_COMMIT_BASE_COST: u32 = 31_000; -pub const BLOCK_PROVE_BASE_COST: u32 = 7_000; -pub const BLOCK_EXECUTE_BASE_COST: u32 = 30_000; +pub(super) const L1_BATCH_COMMIT_BASE_COST: u32 = 31_000; +pub(super) const L1_BATCH_PROVE_BASE_COST: u32 = 7_000; +pub(super) const L1_BATCH_EXECUTE_BASE_COST: u32 = 30_000; -pub const EXECUTE_COMMIT_COST: u32 = 0; -pub const EXECUTE_EXECUTE_COST: u32 = 0; +pub(super) const EXECUTE_COMMIT_COST: u32 = 0; +pub(super) const EXECUTE_EXECUTE_COST: u32 = 0; -pub const L1_OPERATION_EXECUTE_COST: u32 = 12_500; +pub(super) const L1_OPERATION_EXECUTE_COST: u32 = 12_500; -pub const GAS_PER_BYTE: u32 = 18; +pub(super) const GAS_PER_BYTE: u32 = 18; diff --git a/core/bin/zksync_core/src/gas_tracker/mod.rs b/core/bin/zksync_core/src/gas_tracker/mod.rs index 2426a9dad96d..adadfb5c5304 100644 --- a/core/bin/zksync_core/src/gas_tracker/mod.rs +++ b/core/bin/zksync_core/src/gas_tracker/mod.rs @@ -1,46 +1,44 @@ //! This module predicts L1 gas cost for the Commit/PublishProof/Execute operations. +use std::collections::HashMap; + use zksync_types::{ aggregated_operations::AggregatedActionType, - block::BlockGasCount, - commitment::BlockWithMetadata, + block::{BlockGasCount, L1BatchHeader}, + commitment::{L1BatchMetadata, L1BatchWithMetadata}, tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, - ExecuteTransactionCommon, Transaction, + ExecuteTransactionCommon, Transaction, H256, }; +mod constants; + use self::constants::*; -pub mod constants; -pub fn agg_block_base_cost(op: AggregatedActionType) -> u32 { +pub fn agg_l1_batch_base_cost(op: AggregatedActionType) -> u32 { match op { - AggregatedActionType::CommitBlocks => AGGR_BLOCK_COMMIT_BASE_COST, - AggregatedActionType::PublishProofBlocksOnchain => AGGR_BLOCK_PROVE_BASE_COST, - AggregatedActionType::ExecuteBlocks => AGGR_BLOCK_EXECUTE_BASE_COST, + AggregatedActionType::Commit => AGGR_L1_BATCH_COMMIT_BASE_COST, + AggregatedActionType::PublishProofOnchain => AGGR_L1_BATCH_PROVE_BASE_COST, + AggregatedActionType::Execute => AGGR_L1_BATCH_EXECUTE_BASE_COST, } } -pub fn block_base_cost(op: AggregatedActionType) -> u32 { +pub fn l1_batch_base_cost(op: AggregatedActionType) -> u32 { match op { - AggregatedActionType::CommitBlocks => BLOCK_COMMIT_BASE_COST, - AggregatedActionType::PublishProofBlocksOnchain => BLOCK_PROVE_BASE_COST, - AggregatedActionType::ExecuteBlocks => BLOCK_EXECUTE_BASE_COST, + AggregatedActionType::Commit => L1_BATCH_COMMIT_BASE_COST, + AggregatedActionType::PublishProofOnchain => L1_BATCH_PROVE_BASE_COST, + AggregatedActionType::Execute => L1_BATCH_EXECUTE_BASE_COST, } } -pub trait GasCost { - fn base_cost(&self, op: AggregatedActionType) -> u32; -} - -impl GasCost for Transaction { - fn base_cost(&self, op: AggregatedActionType) -> u32 { - match op { - AggregatedActionType::CommitBlocks => EXECUTE_COMMIT_COST, - AggregatedActionType::PublishProofBlocksOnchain => 0, - AggregatedActionType::ExecuteBlocks => match self.common_data { - ExecuteTransactionCommon::L2(_) => EXECUTE_EXECUTE_COST, - ExecuteTransactionCommon::L1(_) => L1_OPERATION_EXECUTE_COST, - }, - } +fn base_tx_cost(tx: &Transaction, op: AggregatedActionType) -> u32 { + match op { + AggregatedActionType::Commit => EXECUTE_COMMIT_COST, + AggregatedActionType::PublishProofOnchain => 0, + AggregatedActionType::Execute => match tx.common_data { + ExecuteTransactionCommon::L1(_) => L1_OPERATION_EXECUTE_COST, + ExecuteTransactionCommon::L2(_) => EXECUTE_EXECUTE_COST, + ExecuteTransactionCommon::ProtocolUpgrade(_) => EXECUTE_EXECUTE_COST, + }, } } @@ -54,9 +52,9 @@ fn additional_writes_commit_cost(writes_metrics: &DeduplicatedWritesMetrics) -> pub fn new_block_gas_count() -> BlockGasCount { BlockGasCount { - commit: block_base_cost(AggregatedActionType::CommitBlocks), - prove: block_base_cost(AggregatedActionType::PublishProofBlocksOnchain), - execute: block_base_cost(AggregatedActionType::ExecuteBlocks), + commit: l1_batch_base_cost(AggregatedActionType::Commit), + prove: l1_batch_base_cost(AggregatedActionType::PublishProofOnchain), + execute: l1_batch_base_cost(AggregatedActionType::Execute), } } @@ -64,12 +62,12 @@ pub fn gas_count_from_tx_and_metrics( tx: &Transaction, execution_metrics: &ExecutionMetrics, ) -> BlockGasCount { - let commit = tx.base_cost(AggregatedActionType::CommitBlocks) + let commit = base_tx_cost(tx, AggregatedActionType::Commit) + additional_pubdata_commit_cost(execution_metrics); BlockGasCount { commit, - prove: tx.base_cost(AggregatedActionType::PublishProofBlocksOnchain), - execute: tx.base_cost(AggregatedActionType::ExecuteBlocks), + prove: base_tx_cost(tx, AggregatedActionType::PublishProofOnchain), + execute: base_tx_cost(tx, AggregatedActionType::Execute), } } @@ -89,22 +87,27 @@ pub fn gas_count_from_writes(writes_metrics: &DeduplicatedWritesMetrics) -> Bloc } } -pub fn commit_gas_count_for_block(block: &BlockWithMetadata) -> u32 { - let base_cost = block_base_cost(AggregatedActionType::CommitBlocks); - let additional_calldata_bytes = block.metadata.initial_writes_compressed.len() as u32 - + block.metadata.repeated_writes_compressed.len() as u32 - + block.metadata.l2_l1_messages_compressed.len() as u32 - + block - .header - .l2_to_l1_messages - .iter() - .map(|message| message.len() as u32) - .sum::() - + block - .factory_deps - .iter() - .map(|factory_dep| factory_dep.len() as u32) - .sum::(); +pub(crate) fn commit_gas_count_for_l1_batch( + header: &L1BatchHeader, + unsorted_factory_deps: &HashMap>, + metadata: &L1BatchMetadata, +) -> u32 { + let base_cost = l1_batch_base_cost(AggregatedActionType::Commit); + let total_messages_len: u32 = header + .l2_to_l1_messages + .iter() + .map(|message| message.len() as u32) + .sum(); + let sorted_factory_deps = + L1BatchWithMetadata::factory_deps_in_appearance_order(header, unsorted_factory_deps); + let total_factory_deps_len: u32 = sorted_factory_deps + .map(|factory_dep| factory_dep.len() as u32) + .sum(); + let additional_calldata_bytes = metadata.initial_writes_compressed.len() as u32 + + metadata.repeated_writes_compressed.len() as u32 + + metadata.l2_l1_messages_compressed.len() as u32 + + total_messages_len + + total_factory_deps_len; let additional_cost = additional_calldata_bytes * GAS_PER_BYTE; base_cost + additional_cost } diff --git a/core/bin/zksync_core/src/genesis.rs b/core/bin/zksync_core/src/genesis.rs index e6f65cbf77df..e47a6025282c 100644 --- a/core/bin/zksync_core/src/genesis.rs +++ b/core/bin/zksync_core/src/genesis.rs @@ -3,35 +3,33 @@ //! setups the required databases, and outputs the data required to initialize a smart contract. use vm::zk_evm::aux_structures::{LogQuery, Timestamp}; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; +use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_types::{ block::DeployedContract, block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, - commitment::{BlockCommitment, BlockMetadata}, + commitment::{L1BatchCommitment, L1BatchMetadata}, get_code_key, get_system_context_init_logs, - system_contracts::get_system_smart_contracts, + protocol_version::{L1VerifierConfig, ProtocolVersion}, tokens::{TokenInfo, TokenMetadata, ETHEREUM_ADDRESS}, zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, StorageLog, StorageLogKind, H256, + AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, + StorageKey, StorageLog, StorageLogKind, H256, }; -use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, miniblock_hash}; -use zksync_web3_decl::{jsonrpsee::http_client::HttpClientBuilder, namespaces::ZksNamespaceClient}; - -use crate::{ - metadata_calculator::get_logs_for_l1_batch, sync_layer::genesis::fetch_base_system_contracts, +use zksync_utils::{ + be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, miniblock_hash, u256_to_h256, }; +use crate::metadata_calculator::L1BatchWithLogs; + #[derive(Debug, Clone)] -pub enum GenesisParams { - MainNode { - first_validator: Address, - }, - ExternalNode { - main_node_url: String, - base_system_contracts_hashes: BaseSystemContractsHashes, - }, +pub struct GenesisParams { + pub first_validator: Address, + pub base_system_contracts: BaseSystemContracts, + pub system_contracts: Vec, + pub first_verifier_address: Address, + pub first_l1_verifier_config: L1VerifierConfig, } pub async fn ensure_genesis_state( @@ -46,65 +44,41 @@ pub async fn ensure_genesis_state( vlog::debug!("genesis is not needed!"); return transaction .blocks_dal() - .get_block_state_root(L1BatchNumber(0)) + .get_l1_batch_state_root(L1BatchNumber(0)) .await .expect("genesis block hash is empty"); } vlog::info!("running regenesis"); - - // If running main node, load base system contracts from disk. - // If running external node, request contracts and first validator address from the main node. - let (base_system_contracts, first_validator_address) = match genesis_params { - GenesisParams::ExternalNode { - main_node_url, - base_system_contracts_hashes, - } => { - if main_node_url.contains("mainnet") || main_node_url.contains("testnet") { - panic!("EN is still WIP. Can't run genesis for mainnet/testnet"); - } - - // These have to be *initial* base contract hashes of main node - // (those that were used during genesis), not necessarily the current ones. - let contracts = - fetch_base_system_contracts(main_node_url, *base_system_contracts_hashes) - .await - .expect("Failed to fetch base system contracts from main node"); - - let client = HttpClientBuilder::default().build(main_node_url).unwrap(); - let first_validator = client - .get_block_details(MiniblockNumber(0)) - .await - .ok() - .flatten() - .expect("Failed to fetch genesis miniblock") - .operator_address; - - (contracts, first_validator) - } - GenesisParams::MainNode { first_validator } => { - (BaseSystemContracts::load_from_disk(), *first_validator) - } - }; + let GenesisParams { + first_validator, + base_system_contracts, + system_contracts, + first_verifier_address, + first_l1_verifier_config, + } = genesis_params; let base_system_contracts_hashes = base_system_contracts.hashes(); - create_genesis_block( + create_genesis_l1_batch( &mut transaction, - first_validator_address, + *first_validator, zksync_chain_id, base_system_contracts, + system_contracts, + *first_l1_verifier_config, + *first_verifier_address, ) .await; vlog::info!("chain_schema_genesis is complete"); - let storage_logs = get_logs_for_l1_batch(&mut transaction, L1BatchNumber(0)).await; + let storage_logs = L1BatchWithLogs::new(&mut transaction, L1BatchNumber(0)).await; let storage_logs = storage_logs.unwrap().storage_logs; let metadata = ZkSyncTree::process_genesis_batch(&storage_logs); let genesis_root_hash = metadata.root_hash; let rollup_last_leaf_index = metadata.leaf_count + 1; - let block_commitment = BlockCommitment::new( + let block_commitment = L1BatchCommitment::new( vec![], rollup_last_leaf_index, genesis_root_hash, @@ -114,7 +88,7 @@ pub async fn ensure_genesis_state( base_system_contracts_hashes.default_aa, ); - save_genesis_block_metadata( + save_genesis_l1_batch_metadata( &mut transaction, &block_commitment, genesis_root_hash, @@ -155,9 +129,9 @@ pub async fn ensure_genesis_state( // because in this case we will have to worry about protecting it. async fn insert_base_system_contracts_to_factory_deps( storage: &mut StorageProcessor<'_>, - contracts: BaseSystemContracts, + contracts: &BaseSystemContracts, ) { - let factory_deps = [contracts.bootloader, contracts.default_aa] + let factory_deps = [&contracts.bootloader, &contracts.default_aa] .iter() .map(|c| (c.hash, be_words_to_bytes(&c.code))) .collect(); @@ -170,14 +144,13 @@ async fn insert_base_system_contracts_to_factory_deps( async fn insert_system_contracts( storage: &mut StorageProcessor<'_>, - contracts: Vec, + contracts: &[DeployedContract], chain_id: L2ChainId, ) { let system_context_init_logs = (H256::default(), get_system_context_init_logs(chain_id)); let storage_logs: Vec<(H256, Vec)> = contracts - .clone() - .into_iter() + .iter() .map(|contract| { let hash = hash_bytecode(&contract.bytecode); let code_key = get_code_key(contract.account_id.address()); @@ -236,9 +209,14 @@ async fn insert_system_contracts( .storage_logs_dedup_dal() .insert_protective_reads(L1BatchNumber(0), &protective_reads) .await; + + let written_storage_keys: Vec<_> = deduplicated_writes + .iter() + .map(|log| StorageKey::new(AccountTreeId::new(log.address), u256_to_h256(log.key))) + .collect(); transaction .storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(0), &deduplicated_writes) + .insert_initial_writes(L1BatchNumber(0), &written_storage_keys) .await; transaction @@ -247,8 +225,8 @@ async fn insert_system_contracts( .await; let factory_deps = contracts - .into_iter() - .map(|c| (hash_bytecode(&c.bytecode), c.bytecode)) + .iter() + .map(|c| (hash_bytecode(&c.bytecode), c.bytecode.clone())) .collect(); transaction .storage_dal() @@ -258,21 +236,34 @@ async fn insert_system_contracts( transaction.commit().await; } -pub(crate) async fn create_genesis_block( +pub(crate) async fn create_genesis_l1_batch( storage: &mut StorageProcessor<'_>, first_validator_address: Address, chain_id: L2ChainId, - base_system_contracts: BaseSystemContracts, + base_system_contracts: &BaseSystemContracts, + system_contracts: &[DeployedContract], + l1_verifier_config: L1VerifierConfig, + verifier_address: Address, ) { - let mut zero_block_header = L1BatchHeader::new( + let version = ProtocolVersion { + id: ProtocolVersionId::latest(), + timestamp: 0, + l1_verifier_config, + base_system_contracts_hashes: base_system_contracts.hashes(), + verifier_address, + tx: None, + }; + + let mut genesis_l1_batch_header = L1BatchHeader::new( L1BatchNumber(0), 0, first_validator_address, base_system_contracts.hashes(), + ProtocolVersionId::latest(), ); - zero_block_header.is_finished = true; + genesis_l1_batch_header.is_finished = true; - let zero_miniblock_header = MiniblockHeader { + let genesis_miniblock_header = MiniblockHeader { number: MiniblockNumber(0), timestamp: 0, hash: miniblock_hash(MiniblockNumber(0)), @@ -282,17 +273,22 @@ pub(crate) async fn create_genesis_block( l1_gas_price: 0, l2_fair_gas_price: 0, base_system_contracts_hashes: base_system_contracts.hashes(), + protocol_version: Some(ProtocolVersionId::latest()), }; let mut transaction = storage.start_transaction().await; + transaction + .protocol_versions_dal() + .save_protocol_version(version) + .await; transaction .blocks_dal() - .insert_l1_batch(&zero_block_header, BlockGasCount::default()) + .insert_l1_batch(&genesis_l1_batch_header, &[], BlockGasCount::default()) .await; transaction .blocks_dal() - .insert_miniblock(&zero_miniblock_header) + .insert_miniblock(&genesis_miniblock_header) .await; transaction .blocks_dal() @@ -300,9 +296,7 @@ pub(crate) async fn create_genesis_block( .await; insert_base_system_contracts_to_factory_deps(&mut transaction, base_system_contracts).await; - - let contracts = get_system_smart_contracts(); - insert_system_contracts(&mut transaction, contracts, chain_id).await; + insert_system_contracts(&mut transaction, system_contracts, chain_id).await; add_eth_token(&mut transaction).await; @@ -334,31 +328,31 @@ pub(crate) async fn add_eth_token(storage: &mut StorageProcessor<'_>) { transaction.commit().await; } -pub(crate) async fn save_genesis_block_metadata( +pub(crate) async fn save_genesis_l1_batch_metadata( storage: &mut StorageProcessor<'_>, - block_commitment: &BlockCommitment, + commitment: &L1BatchCommitment, genesis_root_hash: H256, rollup_last_leaf_index: u64, ) { - let block_commitment_hash = block_commitment.hash(); + let commitment_hash = commitment.hash(); - let metadata = BlockMetadata { + let metadata = L1BatchMetadata { root_hash: genesis_root_hash, rollup_last_leaf_index, merkle_root_hash: genesis_root_hash, initial_writes_compressed: vec![], repeated_writes_compressed: vec![], - commitment: block_commitment_hash.commitment, + commitment: commitment_hash.commitment, l2_l1_messages_compressed: vec![], l2_l1_merkle_root: Default::default(), - block_meta_params: block_commitment.meta_parameters(), - aux_data_hash: block_commitment_hash.aux_output, - meta_parameters_hash: block_commitment_hash.meta_parameters, - pass_through_data_hash: block_commitment_hash.pass_through_data, + block_meta_params: commitment.meta_parameters(), + aux_data_hash: commitment_hash.aux_output, + meta_parameters_hash: commitment_hash.meta_parameters, + pass_through_data_hash: commitment_hash.pass_through_data, }; storage .blocks_dal() - .save_block_metadata(L1BatchNumber(0), &metadata) + .save_genesis_l1_batch_metadata(&metadata) .await; } @@ -366,6 +360,7 @@ pub(crate) async fn save_genesis_block_metadata( mod tests { use db_test_macro::db_test; use zksync_dal::ConnectionPool; + use zksync_types::system_contracts::get_system_smart_contracts; use super::*; @@ -374,13 +369,20 @@ mod tests { let mut conn = pool.access_storage().await; conn.blocks_dal().delete_genesis().await; - let params = GenesisParams::MainNode { + let params = GenesisParams { first_validator: Address::random(), + base_system_contracts: BaseSystemContracts::load_from_disk(), + system_contracts: get_system_smart_contracts(), + first_l1_verifier_config: L1VerifierConfig::default(), + first_verifier_address: Address::random(), }; ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms).await; assert!(!conn.blocks_dal().is_genesis_needed().await); - let metadata = conn.blocks_dal().get_block_metadata(L1BatchNumber(0)).await; + let metadata = conn + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(0)) + .await; let root_hash = metadata.unwrap().metadata.root_hash; assert_ne!(root_hash, H256::zero()); diff --git a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs index 2a64a4bc10d8..43e713a51c12 100644 --- a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs +++ b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; use zksync_utils::time::seconds_since_epoch; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct L1BatchMetricsReporter { @@ -23,12 +23,12 @@ impl L1BatchMetricsReporter { let mut conn = self.connection_pool.access_storage().await; let mut block_metrics = vec![ ( - conn.blocks_dal().get_sealed_block_number().await, + conn.blocks_dal().get_sealed_l1_batch_number().await, "sealed".to_string(), ), ( conn.blocks_dal() - .get_last_block_number_with_metadata() + .get_last_l1_batch_number_with_metadata() .await, "metadata_calculated".to_string(), ), @@ -42,18 +42,18 @@ impl L1BatchMetricsReporter { let eth_stats = conn.eth_sender_dal().get_eth_l1_batches().await; for (tx_type, l1_batch) in eth_stats.saved { - block_metrics.push((l1_batch, format!("l1_saved_{:?}", tx_type))) + block_metrics.push((l1_batch, format!("l1_saved_{}", tx_type.as_str()))) } for (tx_type, l1_batch) in eth_stats.mined { - block_metrics.push((l1_batch, format!("l1_mined_{:?}", tx_type))) + block_metrics.push((l1_batch, format!("l1_mined_{}", tx_type.as_str()))) } for (l1_batch_number, stage) in block_metrics { metrics::gauge!( "server.block_number", l1_batch_number.0 as f64, - "stage" => stage + "stage" => stage ); } diff --git a/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs index 2b2dc4ce7d54..493dc3294204 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs @@ -1,8 +1,8 @@ use std::time::Duration; -use crate::house_keeper::periodic_job::PeriodicJob; use async_trait::async_trait; use zksync_dal::ConnectionPool; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct FriProverJobRetryManager { diff --git a/core/bin/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index 2387f0ffc850..4085e73080d4 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -1,6 +1,6 @@ -use crate::house_keeper::periodic_job::PeriodicJob; use async_trait::async_trait; use zksync_dal::ConnectionPool; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct FriProverStatsReporter { diff --git a/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs b/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs index dc12445045fa..d037c9077d6d 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct SchedulerCircuitQueuer { diff --git a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs index eefd4fa9e770..4e63b8e2506d 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs @@ -1,8 +1,8 @@ use std::time::Duration; -use crate::house_keeper::periodic_job::PeriodicJob; use async_trait::async_trait; use zksync_dal::ConnectionPool; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct FriWitnessGeneratorJobRetryManager { diff --git a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs index c70d024c8a7e..1fb145953464 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; use zksync_types::proofs::{AggregationRound, JobCountStatistics}; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; const FRI_WITNESS_GENERATOR_SERVICE_NAME: &str = "fri_witness_generator"; diff --git a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs index 9954cc012f80..a575a5a50084 100644 --- a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs +++ b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs @@ -2,7 +2,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; trait AsBlobUrls { fn as_blob_urls(&self) -> (&str, Option<&str>); diff --git a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs index 194ead19ee62..7ffdb40b9577 100644 --- a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct GpuProverQueueMonitor { diff --git a/core/bin/zksync_core/src/house_keeper/mod.rs b/core/bin/zksync_core/src/house_keeper/mod.rs index 11cf7d1e272a..ae5c8a991eb2 100644 --- a/core/bin/zksync_core/src/house_keeper/mod.rs +++ b/core/bin/zksync_core/src/house_keeper/mod.rs @@ -6,7 +6,6 @@ pub mod fri_witness_generator_jobs_retry_manager; pub mod fri_witness_generator_queue_monitor; pub mod gcs_blob_cleaner; pub mod gpu_prover_queue_monitor; -pub mod periodic_job; pub mod prover_job_retry_manager; pub mod prover_queue_monitor; pub mod waiting_to_queued_fri_witness_job_mover; diff --git a/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs index ab8d3cd8c3ea..7e1259aeea07 100644 --- a/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs +++ b/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs @@ -3,7 +3,7 @@ use std::time::Duration; use async_trait::async_trait; use zksync_dal::ConnectionPool; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct ProverJobRetryManager { diff --git a/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs index 801c6b51478f..7a2b1769637a 100644 --- a/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs @@ -3,7 +3,7 @@ use zksync_config::configs::ProverGroupConfig; use zksync_dal::ConnectionPool; use zksync_prover_utils::circuit_name_to_numeric_index; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct ProverStatsReporter { diff --git a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs index e099caabf5df..995942116bb6 100644 --- a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs +++ b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct WaitingToQueuedFriWitnessJobMover { diff --git a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs index 1fb219e2b2f2..9d1bc60de953 100644 --- a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs +++ b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] pub struct WaitingToQueuedWitnessJobMover { diff --git a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs index 37f2b55f6cab..c6d58d346d5d 100644 --- a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; use zksync_types::proofs::{AggregationRound, JobCountStatistics}; -use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_prover_utils::periodic_job::PeriodicJob; const WITNESS_GENERATOR_SERVICE_NAME: &str = "witness_generator"; diff --git a/core/bin/zksync_core/src/lib.rs b/core/bin/zksync_core/src/lib.rs index a676d3de2e20..25105507ce62 100644 --- a/core/bin/zksync_core/src/lib.rs +++ b/core/bin/zksync_core/src/lib.rs @@ -2,15 +2,13 @@ use std::{str::FromStr, sync::Arc, time::Instant}; -use api_server::execution_sandbox::VmConcurrencyLimiter; use futures::channel::oneshot; use tokio::{sync::watch, task::JoinHandle}; -use house_keeper::periodic_job::PeriodicJob; use prometheus_exporter::run_prometheus_exporter; use zksync_circuit_breaker::{ - facet_selectors::FacetSelectorsChecker, l1_txs::FailedL1TransactionChecker, vks::VksChecker, - CircuitBreaker, CircuitBreakerChecker, CircuitBreakerError, + facet_selectors::FacetSelectorsChecker, l1_txs::FailedL1TransactionChecker, CircuitBreaker, + CircuitBreakerChecker, CircuitBreakerError, }; use zksync_config::configs::{ api::{HealthCheckConfig, Web3JsonRpcConfig}, @@ -18,34 +16,38 @@ use zksync_config::configs::{ self, CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, StateKeeperConfig, }, + database::MerkleTreeMode, house_keeper::HouseKeeperConfig, - FriProverConfig, FriWitnessGeneratorConfig, PrometheusConfig, ProverGroupConfig, - WitnessGeneratorConfig, + FriProverConfig, FriWitnessGeneratorConfig, PrometheusConfig, ProofDataHandlerConfig, + ProverGroupConfig, WitnessGeneratorConfig, }; use zksync_config::{ ApiConfig, ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig, FetcherConfig, ProverConfigs, }; -use zksync_contracts::BaseSystemContractsHashes; +use zksync_contracts::BaseSystemContracts; use zksync_dal::{ connection::DbVariant, healthcheck::ConnectionPoolHealthCheck, ConnectionPool, StorageProcessor, }; use zksync_eth_client::clients::http::QueryClient; use zksync_eth_client::{clients::http::PKSigningClient, BoundEthInterface}; -use zksync_health_check::CheckHealth; +use zksync_health_check::{CheckHealth, ReactiveHealthCheck}; use zksync_object_store::ObjectStoreFactory; +use zksync_prover_utils::periodic_job::PeriodicJob; use zksync_queued_job_processor::JobProcessor; -use zksync_state::FactoryDepsCache; -use zksync_types::{proofs::AggregationRound, L2ChainId, PackedEthSignature, H160}; +use zksync_state::PostgresStorageCaches; +use zksync_types::{ + proofs::AggregationRound, + protocol_version::{L1VerifierConfig, VerifierParams}, + system_contracts::get_system_smart_contracts, + Address, L2ChainId, PackedEthSignature, +}; +use zksync_verification_key_server::get_cached_commitments; use crate::api_server::healthcheck::HealthCheckHandle; use crate::api_server::tx_sender::TxSenderConfig; -use crate::api_server::web3::api_health_check::ApiHealthCheck; -use crate::api_server::web3::state::InternalApiConfig; -use crate::api_server::{ - healthcheck, - tx_sender::{TxSender, TxSenderBuilder}, -}; +use crate::api_server::tx_sender::{TxSender, TxSenderBuilder}; +use crate::api_server::web3::{state::InternalApiConfig, Namespace}; use crate::eth_sender::{Aggregator, EthTxManager}; use crate::house_keeper::fri_prover_job_retry_manager::FriProverJobRetryManager; use crate::house_keeper::fri_prover_queue_monitor::FriProverStatsReporter; @@ -62,7 +64,7 @@ use crate::house_keeper::{ }; use crate::l1_gas_price::{GasAdjusterSingleton, L1GasPriceProvider}; use crate::metadata_calculator::{ - MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig, TreeHealthCheck, + MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig, }; use crate::state_keeper::{create_state_keeper, MempoolFetcher, MempoolGuard, MiniblockSealer}; use crate::witness_generator::{ @@ -70,7 +72,12 @@ use crate::witness_generator::{ node_aggregation::NodeAggregationWitnessGenerator, scheduler::SchedulerWitnessGenerator, }; use crate::{ - api_server::{explorer, web3}, + api_server::{ + contract_verification, + execution_sandbox::{VmConcurrencyBarrier, VmConcurrencyLimiter}, + tx_sender::ApiContracts, + web3, + }, data_fetchers::run_data_fetchers, eth_sender::EthTxAggregator, eth_watch::start_eth_watch, @@ -88,13 +95,18 @@ pub mod genesis; pub mod house_keeper; pub mod l1_gas_price; pub mod metadata_calculator; +pub mod proof_data_handler; pub mod reorg_detector; pub mod state_keeper; pub mod sync_layer; pub mod witness_generator; /// Inserts the initial information about zkSync tokens into the database. -pub async fn genesis_init(eth_sender: ÐSenderConfig, network_config: &NetworkConfig) { +pub async fn genesis_init( + eth_sender: ÐSenderConfig, + network_config: &NetworkConfig, + contracts_config: &ContractsConfig, +) { let mut storage = StorageProcessor::establish_connection(true).await; let operator_address = PackedEthSignature::address_from_private_key( ð_sender @@ -107,9 +119,22 @@ pub async fn genesis_init(eth_sender: ÐSenderConfig, network_config: &Network genesis::ensure_genesis_state( &mut storage, L2ChainId(network_config.zksync_network_id), - &genesis::GenesisParams::MainNode { + &genesis::GenesisParams { // We consider the operator to be the first validator for now. first_validator: operator_address, + base_system_contracts: BaseSystemContracts::load_from_disk(), + system_contracts: get_system_smart_contracts(), + first_verifier_address: contracts_config.verifier_addr, + first_l1_verifier_config: L1VerifierConfig { + params: VerifierParams { + recursion_node_level_vk_hash: contracts_config.recursion_node_level_vk_hash, + recursion_leaf_level_vk_hash: contracts_config.recursion_leaf_level_vk_hash, + recursion_circuits_set_vks_hash: contracts_config + .recursion_circuits_set_vks_hash, + }, + recursion_scheduler_level_vk_hash: contracts_config + .recursion_scheduler_level_vk_hash, + }, }, ) .await; @@ -144,8 +169,8 @@ pub enum Component { HttpApi, // Public Web3 API (including PubSub) running on WebSocket server. WsApi, - // REST API for explorer. - ExplorerApi, + // REST API for contract verification. + ContractVerificationApi, // Metadata Calculator. Tree, TreeLightweight, @@ -164,6 +189,8 @@ pub enum Component { WitnessGenerator(Option, AggregationRound), // Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, + // Component for exposing API's to prover for providing proof generation data and accepting proofs. + ProofDataHandler, } #[derive(Debug)] @@ -177,11 +204,11 @@ impl FromStr for Components { "api" => Ok(Components(vec![ Component::HttpApi, Component::WsApi, - Component::ExplorerApi, + Component::ContractVerificationApi, ])), "http_api" => Ok(Components(vec![Component::HttpApi])), "ws_api" => Ok(Components(vec![Component::WsApi])), - "explorer_api" => Ok(Components(vec![Component::ExplorerApi])), + "contract_verification_api" => Ok(Components(vec![Component::ContractVerificationApi])), "tree" | "tree_new" => Ok(Components(vec![Component::Tree])), "tree_lightweight" | "tree_lightweight_new" => { Ok(Components(vec![Component::TreeLightweight])) @@ -230,6 +257,7 @@ impl FromStr for Components { "eth_watcher" => Ok(Components(vec![Component::EthWatcher])), "eth_tx_aggregator" => Ok(Components(vec![Component::EthTxAggregator])), "eth_tx_manager" => Ok(Components(vec![Component::EthTxManager])), + "proof_data_handler" => Ok(Components(vec![Component::ProofDataHandler])), other => Err(format!("{} is not a valid component name", other)), } } @@ -245,19 +273,27 @@ pub async fn initialize_components( HealthCheckHandle, )> { vlog::info!("Starting the components: {components:?}"); - let connection_pool = ConnectionPool::new(None, DbVariant::Master).await; - let prover_connection_pool = ConnectionPool::new(None, DbVariant::Prover).await; - let replica_connection_pool = ConnectionPool::new(None, DbVariant::Replica).await; + + let db_config = DBConfig::from_env(); + let connection_pool = ConnectionPool::builder(DbVariant::Master).build().await; + let prover_connection_pool = ConnectionPool::builder(DbVariant::Prover).build().await; + let replica_connection_pool = ConnectionPool::builder(DbVariant::Replica) + .set_statement_timeout(db_config.statement_timeout()) + .build() + .await; + let mut healthchecks: Vec> = Vec::new(); let contracts_config = ContractsConfig::from_env(); let eth_client_config = ETHClientConfig::from_env(); let circuit_breaker_config = CircuitBreakerConfig::from_env(); + + let main_zksync_contract_address = contracts_config.diamond_proxy_addr; let circuit_breaker_checker = CircuitBreakerChecker::new( circuit_breakers_for_components( &components, ð_client_config.web3_url, &circuit_breaker_config, - contracts_config.diamond_proxy_addr, + main_zksync_contract_address, ) .await, &circuit_breaker_config, @@ -286,14 +322,9 @@ pub async fn initialize_components( tokio::spawn(circuit_breaker_checker.run(cb_sender, stop_receiver.clone())), ]; - let factory_deps_cache = FactoryDepsCache::new( - "factory_deps_cache", - Web3JsonRpcConfig::from_env().factory_deps_cache_size_mb(), - ); - if components.contains(&Component::WsApi) || components.contains(&Component::HttpApi) - || components.contains(&Component::ExplorerApi) + || components.contains(&Component::ContractVerificationApi) { let api_config = ApiConfig::from_env(); let state_keeper_config = StateKeeperConfig::from_env(); @@ -304,7 +335,19 @@ pub async fn initialize_components( &api_config.web3_json_rpc, &contracts_config, ); + + // Lazily initialize storage caches only when they are needed (e.g., skip their initialization + // if we only run the explorer APIs). This is required because the cache update task will + // terminate immediately if storage caches are dropped, which will lead to the (unexpected) + // program termination. + let mut storage_caches = None; + if components.contains(&Component::HttpApi) { + storage_caches = Some(build_storage_caches( + &replica_connection_pool, + &mut task_futures, + )); + let started_at = Instant::now(); vlog::info!("initializing HTTP API"); let bounded_gas_adjuster = gas_adjuster.get_or_init_bounded().await; @@ -318,7 +361,7 @@ pub async fn initialize_components( stop_receiver.clone(), bounded_gas_adjuster.clone(), state_keeper_config.save_call_traces, - factory_deps_cache.clone(), + storage_caches.clone().unwrap(), ) .await; task_futures.extend(futures); @@ -328,6 +371,10 @@ pub async fn initialize_components( } if components.contains(&Component::WsApi) { + let storage_caches = storage_caches.unwrap_or_else(|| { + build_storage_caches(&replica_connection_pool, &mut task_futures) + }); + let started_at = Instant::now(); vlog::info!("initializing WS API"); let bounded_gas_adjuster = gas_adjuster.get_or_init_bounded().await; @@ -340,7 +387,7 @@ pub async fn initialize_components( connection_pool.clone(), replica_connection_pool.clone(), stop_receiver.clone(), - factory_deps_cache.clone(), + storage_caches, ) .await; task_futures.extend(futures); @@ -349,22 +396,20 @@ pub async fn initialize_components( metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "ws_api"); } - if components.contains(&Component::ExplorerApi) { + if components.contains(&Component::ContractVerificationApi) { let started_at = Instant::now(); - vlog::info!("initializing explorer REST API"); - task_futures.push(explorer::start_server_thread_detached( - api_config.explorer.clone(), - contracts_config.l2_erc20_bridge_addr, - state_keeper_config.fee_account_addr, + vlog::info!("initializing contract verification REST API"); + task_futures.push(contract_verification::start_server_thread_detached( connection_pool.clone(), replica_connection_pool.clone(), + api_config.contract_verification.clone(), stop_receiver.clone(), )); vlog::info!( - "initialized explorer REST API in {:?}", + "initialized contract verification REST API in {:?}", started_at.elapsed() ); - metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "explorer_api"); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "contract_verification_api"); } } @@ -376,7 +421,7 @@ pub async fn initialize_components( &mut task_futures, &contracts_config, StateKeeperConfig::from_env(), - &DBConfig::from_env(), + &db_config, &MempoolConfig::from_env(), bounded_gas_adjuster, stop_receiver.clone(), @@ -389,12 +434,12 @@ pub async fn initialize_components( if components.contains(&Component::EthWatcher) { let started_at = Instant::now(); vlog::info!("initializing ETH-Watcher"); - let eth_watch_pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let eth_watch_pool = ConnectionPool::singleton(DbVariant::Master).build().await; task_futures.push( start_eth_watch( eth_watch_pool, query_client.clone(), - contracts_config.diamond_proxy_addr, + main_zksync_contract_address, stop_receiver.clone(), ) .await, @@ -406,8 +451,8 @@ pub async fn initialize_components( if components.contains(&Component::EthTxAggregator) { let started_at = Instant::now(); vlog::info!("initializing ETH-TxAggregator"); - let eth_sender_storage = ConnectionPool::new(Some(1), DbVariant::Master).await; - let eth_sender_prover_storage = ConnectionPool::new(Some(1), DbVariant::Prover).await; + let eth_sender_pool = ConnectionPool::singleton(DbVariant::Master).build().await; + let eth_sender_prover_pool = ConnectionPool::singleton(DbVariant::Prover).build().await; let eth_sender = ETHSenderConfig::from_env(); let eth_client = @@ -417,11 +462,13 @@ pub async fn initialize_components( eth_sender.sender.clone(), Aggregator::new(eth_sender.sender.clone()), contracts_config.validator_timelock_addr, + contracts_config.l1_multicall3_addr, + main_zksync_contract_address, nonce.as_u64(), ); task_futures.push(tokio::spawn(eth_tx_aggregator_actor.run( - eth_sender_storage.clone(), - eth_sender_prover_storage.clone(), + eth_sender_pool, + eth_sender_prover_pool, eth_client, stop_receiver.clone(), ))); @@ -432,7 +479,7 @@ pub async fn initialize_components( if components.contains(&Component::EthTxManager) { let started_at = Instant::now(); vlog::info!("initializing ETH-TxManager"); - let eth_sender_storage = ConnectionPool::new(Some(1), DbVariant::Master).await; + let eth_manager_pool = ConnectionPool::singleton(DbVariant::Master).build().await; let eth_sender = ETHSenderConfig::from_env(); let eth_client = PKSigningClient::from_config(ð_sender, &contracts_config, ð_client_config); @@ -442,7 +489,7 @@ pub async fn initialize_components( eth_client, ); task_futures.extend([tokio::spawn( - eth_tx_manager_actor.run(eth_sender_storage, stop_receiver.clone()), + eth_tx_manager_actor.run(eth_manager_pool, stop_receiver.clone()), )]); vlog::info!("initialized ETH-TxManager in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "eth_tx_aggregator"); @@ -469,7 +516,7 @@ pub async fn initialize_components( &mut healthchecks, &components, &store_factory, - &stop_receiver, + stop_receiver.clone(), ) .await; add_witness_generator_to_task_futures( @@ -486,6 +533,15 @@ pub async fn initialize_components( add_house_keeper_to_task_futures(&mut task_futures, &store_factory).await; } + if components.contains(&Component::ProofDataHandler) { + task_futures.push(tokio::spawn(proof_data_handler::run_server( + ProofDataHandlerConfig::from_env(), + store_factory.create_store().await, + connection_pool.clone(), + stop_receiver.clone(), + ))); + } + // Run healthcheck server for all components. healthchecks.push(Box::new(ConnectionPoolHealthCheck::new( replica_connection_pool, @@ -493,7 +549,7 @@ pub async fn initialize_components( let healtcheck_api_config = HealthCheckConfig::from_env(); let health_check_handle = - healthcheck::start_server_thread_detached(healtcheck_api_config.bind_addr(), healthchecks); + HealthCheckHandle::spawn_server(healtcheck_api_config.bind_addr(), healthchecks); if let Some(task) = gas_adjuster.run_if_initialized(stop_receiver.clone()) { task_futures.push(task); @@ -511,7 +567,8 @@ async fn add_state_keeper_to_task_futures, ) { let fair_l2_gas_price = state_keeper_config.fair_l2_gas_price; - let state_keeper_pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let pool_builder = ConnectionPool::singleton(DbVariant::Master); + let state_keeper_pool = pool_builder.build().await; let next_priority_id = state_keeper_pool .access_storage() .await @@ -520,7 +577,7 @@ async fn add_state_keeper_to_task_futures>, components: &[Component], store_factory: &ObjectStoreFactory, - stop_receiver: &watch::Receiver, + stop_receiver: watch::Receiver, ) { - let db_config = DBConfig::from_env(); - let operation_config = OperationsManagerConfig::from_env(); - const COMPONENTS_TO_MODES: &[(Component, bool)] = - &[(Component::Tree, true), (Component::TreeLightweight, false)]; - if components.contains(&Component::TreeBackup) { panic!("Tree backup mode is disabled"); } - if components.contains(&Component::Tree) && components.contains(&Component::TreeLightweight) { - panic!( + + let db_config = DBConfig::from_env(); + let operation_config = OperationsManagerConfig::from_env(); + let has_tree_component = components.contains(&Component::Tree); + let has_lightweight_component = components.contains(&Component::TreeLightweight); + let mode = match (has_tree_component, has_lightweight_component) { + (true, true) => panic!( "Cannot start a node with a Merkle tree in both full and lightweight modes. \ Since the storage layout is mode-independent, choose either of modes and run \ the node with it." - ); - } - - for &(component, is_full) in COMPONENTS_TO_MODES { - if components.contains(&component) { - let mode = if is_full { - MetadataCalculatorModeConfig::Full { store_factory } - } else { - MetadataCalculatorModeConfig::Lightweight - }; - let (future, tree_health_check) = - run_tree(&db_config, &operation_config, mode, stop_receiver.clone()).await; - task_futures.push(future); - healthchecks.push(Box::new(tree_health_check)); - } - } + ), + (false, true) => MetadataCalculatorModeConfig::Lightweight, + (true, false) => match db_config.merkle_tree.mode { + MerkleTreeMode::Lightweight => MetadataCalculatorModeConfig::Lightweight, + MerkleTreeMode::Full => MetadataCalculatorModeConfig::Full { store_factory }, + }, + (false, false) => return, + }; + let (future, tree_health_check) = + run_tree(&db_config, &operation_config, mode, stop_receiver).await; + task_futures.push(future); + healthchecks.push(Box::new(tree_health_check)); } async fn run_tree( @@ -596,7 +649,7 @@ async fn run_tree( operation_manager: &OperationsManagerConfig, mode: MetadataCalculatorModeConfig<'_>, stop_receiver: watch::Receiver, -) -> (JoinHandle<()>, TreeHealthCheck) { +) -> (JoinHandle<()>, ReactiveHealthCheck) { let started_at = Instant::now(); let mode_str = if matches!(mode, MetadataCalculatorModeConfig::Full { .. }) { "full" @@ -608,20 +661,16 @@ async fn run_tree( let config = MetadataCalculatorConfig::for_main_node(config, operation_manager, mode); let metadata_calculator = MetadataCalculator::new(&config).await; let tree_health_check = metadata_calculator.tree_health_check(); - let tree_tag = metadata_calculator.tree_tag(); - let pool = ConnectionPool::new(Some(1), DbVariant::Master).await; - let prover_pool = ConnectionPool::new(Some(1), DbVariant::Prover).await; + let pool = ConnectionPool::singleton(DbVariant::Master).build().await; + let prover_pool = ConnectionPool::singleton(DbVariant::Prover).build().await; let future = tokio::spawn(metadata_calculator.run(pool, prover_pool, stop_receiver)); - vlog::info!( - "Initialized `{tree_tag}` tree in {:?}", - started_at.elapsed() - ); + vlog::info!("Initialized {mode_str} tree in {:?}", started_at.elapsed()); metrics::gauge!( "server.init.latency", started_at.elapsed(), "stage" => "tree", - "tree" => tree_tag + "tree" => mode_str ); (future, tree_health_check) } @@ -647,6 +696,14 @@ async fn add_witness_generator_to_task_futures( } }); + let vk_commitments = get_cached_commitments(); + let protocol_versions = prover_connection_pool + .access_storage() + .await + .protocol_versions_dal() + .protocol_version_for(&vk_commitments) + .await; + for (batch_size, component_type) in generator_params { let started_at = Instant::now(); vlog::info!( @@ -659,6 +716,7 @@ async fn add_witness_generator_to_task_futures( let witness_generator = BasicWitnessGenerator::new( config, store_factory, + protocol_versions.clone(), connection_pool.clone(), prover_connection_pool.clone(), ) @@ -669,6 +727,7 @@ async fn add_witness_generator_to_task_futures( let witness_generator = LeafAggregationWitnessGenerator::new( config, store_factory, + protocol_versions.clone(), connection_pool.clone(), prover_connection_pool.clone(), ) @@ -679,6 +738,7 @@ async fn add_witness_generator_to_task_futures( let witness_generator = NodeAggregationWitnessGenerator::new( config, store_factory, + protocol_versions.clone(), connection_pool.clone(), prover_connection_pool.clone(), ) @@ -689,6 +749,7 @@ async fn add_witness_generator_to_task_futures( let witness_generator = SchedulerWitnessGenerator::new( config, store_factory, + protocol_versions.clone(), connection_pool.clone(), prover_connection_pool.clone(), ) @@ -715,17 +776,16 @@ async fn add_house_keeper_to_task_futures( store_factory: &ObjectStoreFactory, ) { let house_keeper_config = HouseKeeperConfig::from_env(); - let connection_pool = ConnectionPool::new(Some(1), DbVariant::Replica).await; + let connection_pool = ConnectionPool::singleton(DbVariant::Replica).build().await; let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( house_keeper_config.l1_batch_metrics_reporting_interval_ms, connection_pool, ); - let prover_connection_pool = ConnectionPool::new( - Some(house_keeper_config.prover_db_pool_size), - DbVariant::Prover, - ) - .await; + let prover_connection_pool = ConnectionPool::builder(DbVariant::Prover) + .set_max_size(Some(house_keeper_config.prover_db_pool_size)) + .build() + .await; let gpu_prover_queue = GpuProverQueueMonitor::new( ProverGroupConfig::from_env().synthesizer_per_gpu, house_keeper_config.gpu_prover_queue_reporting_interval_ms, @@ -809,6 +869,28 @@ async fn add_house_keeper_to_task_futures( task_futures.push(tokio::spawn(fri_prover_stats_reporter.run())); } +fn build_storage_caches( + replica_connection_pool: &ConnectionPool, + task_futures: &mut Vec>, +) -> PostgresStorageCaches { + let rpc_config = Web3JsonRpcConfig::from_env(); + let factory_deps_capacity = rpc_config.factory_deps_cache_size() as u64; + let initial_writes_capacity = rpc_config.initial_writes_cache_size() as u64; + let values_capacity = rpc_config.latest_values_cache_size() as u64; + let mut storage_caches = + PostgresStorageCaches::new(factory_deps_capacity, initial_writes_capacity); + + if values_capacity > 0 { + let values_cache_task = storage_caches.configure_storage_values_cache( + values_capacity, + replica_connection_pool.clone(), + tokio::runtime::Handle::current(), + ); + task_futures.push(tokio::task::spawn_blocking(values_cache_task)); + } + storage_caches +} + async fn build_tx_sender( tx_sender_config: &TxSenderConfig, web3_json_config: &Web3JsonRpcConfig, @@ -816,8 +898,8 @@ async fn build_tx_sender( replica_pool: ConnectionPool, master_pool: ConnectionPool, l1_gas_price_provider: Arc, - factory_deps_cache: FactoryDepsCache, -) -> TxSender { + storage_caches: PostgresStorageCaches, +) -> (TxSender, VmConcurrencyBarrier) { let mut tx_sender_builder = TxSenderBuilder::new(tx_sender_config.clone(), replica_pool) .with_main_connection_pool(master_pool) .with_state_keeper_config(state_keeper_config.clone()); @@ -827,16 +909,18 @@ async fn build_tx_sender( tx_sender_builder = tx_sender_builder.with_rate_limiter(transactions_per_sec_limit); }; - let vm_concurrency_limiter = VmConcurrencyLimiter::new(web3_json_config.vm_concurrency_limit); + let max_concurrency = web3_json_config.vm_concurrency_limit(); + let (vm_concurrency_limiter, vm_barrier) = VmConcurrencyLimiter::new(max_concurrency); - tx_sender_builder + let tx_sender = tx_sender_builder .build( l1_gas_price_provider, - tx_sender_config.default_aa, Arc::new(vm_concurrency_limiter), - factory_deps_cache, + ApiContracts::load_from_disk(), + storage_caches, ) - .await + .await; + (tx_sender, vm_barrier) } #[allow(clippy::too_many_arguments)] @@ -850,38 +934,35 @@ async fn run_http_api( stop_receiver: watch::Receiver, gas_adjuster: Arc, with_debug_namespace: bool, - factory_deps_cache: FactoryDepsCache, -) -> (Vec>, ApiHealthCheck) { - let tx_sender = build_tx_sender( + storage_caches: PostgresStorageCaches, +) -> (Vec>, ReactiveHealthCheck) { + let (tx_sender, vm_barrier) = build_tx_sender( tx_sender_config, &api_config.web3_json_rpc, state_keeper_config, replica_connection_pool.clone(), - master_connection_pool.clone(), + master_connection_pool, gas_adjuster, - factory_deps_cache.clone(), + storage_caches, ) .await; - let mut builder = - web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) - .http(api_config.web3_json_rpc.http_port) - .with_filter_limit(api_config.web3_json_rpc.filters_limit()) - .with_threads(api_config.web3_json_rpc.http_server_threads()) - .with_tx_sender(tx_sender); - - if with_debug_namespace { - builder = builder.enable_debug_namespace( - BaseSystemContractsHashes { - bootloader: tx_sender_config.bootloader, - default_aa: tx_sender_config.default_aa, - }, - tx_sender_config.fair_l2_gas_price, - api_config.web3_json_rpc.vm_execution_cache_misses_limit, - ) - } + let namespaces = if with_debug_namespace { + Namespace::ALL.to_vec() + } else { + Namespace::NON_DEBUG.to_vec() + }; - builder.build(stop_receiver.clone()).await + web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) + .http(api_config.web3_json_rpc.http_port) + .with_filter_limit(api_config.web3_json_rpc.filters_limit()) + .with_threads(api_config.web3_json_rpc.http_server_threads()) + .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) + .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) + .with_tx_sender(tx_sender, vm_barrier) + .enable_api_namespaces(namespaces) + .build(stop_receiver.clone()) + .await } #[allow(clippy::too_many_arguments)] @@ -894,16 +975,16 @@ async fn run_ws_api( master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, - factory_deps_cache: FactoryDepsCache, -) -> (Vec>, ApiHealthCheck) { - let tx_sender = build_tx_sender( + storage_caches: PostgresStorageCaches, +) -> (Vec>, ReactiveHealthCheck) { + let (tx_sender, vm_barrier) = build_tx_sender( tx_sender_config, &api_config.web3_json_rpc, state_keeper_config, replica_connection_pool.clone(), - master_connection_pool.clone(), + master_connection_pool, gas_adjuster, - factory_deps_cache.clone(), + storage_caches, ) .await; @@ -911,9 +992,12 @@ async fn run_ws_api( .ws(api_config.web3_json_rpc.ws_port) .with_filter_limit(api_config.web3_json_rpc.filters_limit()) .with_subscriptions_limit(api_config.web3_json_rpc.subscriptions_limit()) + .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) + .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) .with_polling_interval(api_config.web3_json_rpc.pubsub_interval()) .with_threads(api_config.web3_json_rpc.ws_server_threads()) - .with_tx_sender(tx_sender) + .with_tx_sender(tx_sender, vm_barrier) + .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()) .build(stop_receiver.clone()) .await } @@ -922,7 +1006,7 @@ async fn circuit_breakers_for_components( components: &[Component], web3_url: &str, circuit_breaker_config: &CircuitBreakerConfig, - main_contract: H160, + main_contract: Address, ) -> Vec> { let mut circuit_breakers: Vec> = Vec::new(); @@ -932,23 +1016,8 @@ async fn circuit_breakers_for_components( Component::EthTxAggregator | Component::EthTxManager | Component::StateKeeper ) }) { - circuit_breakers.push(Box::new(FailedL1TransactionChecker { - pool: ConnectionPool::new(Some(1), DbVariant::Replica).await, - })); - } - - if components.iter().any(|c| { - matches!( - c, - Component::EthTxAggregator | Component::EthTxManager | Component::TreeBackup - ) - }) { - let eth_client = QueryClient::new(web3_url).unwrap(); - circuit_breakers.push(Box::new(VksChecker::new( - circuit_breaker_config, - eth_client, - main_contract, - ))); + let pool = ConnectionPool::singleton(DbVariant::Replica).build().await; + circuit_breakers.push(Box::new(FailedL1TransactionChecker { pool })); } if components diff --git a/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs b/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs deleted file mode 100644 index 751495156c9f..000000000000 --- a/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs +++ /dev/null @@ -1,39 +0,0 @@ -use async_trait::async_trait; -use tokio::sync::watch; -use zksync_health_check::{CheckHealth, CheckHealthStatus}; - -use super::{MetadataCalculatorMode, MetadataCalculatorStatus}; - -/// HealthCheck used to verify if the tree(MetadataCalculator) is ready. -/// This guarantees that we mark a tree as ready only when it can start processing blocks. -/// Used in the /health endpoint -#[derive(Clone, Debug)] -pub struct TreeHealthCheck { - receiver: watch::Receiver, - tree_mode: MetadataCalculatorMode, -} - -impl TreeHealthCheck { - pub(super) fn new( - receiver: watch::Receiver, - tree_mode: MetadataCalculatorMode, - ) -> TreeHealthCheck { - TreeHealthCheck { - receiver, - tree_mode, - } - } -} - -#[async_trait] -impl CheckHealth for TreeHealthCheck { - async fn check_health(&self) -> CheckHealthStatus { - match *self.receiver.borrow() { - MetadataCalculatorStatus::Ready => CheckHealthStatus::Ready, - MetadataCalculatorStatus::NotReady => CheckHealthStatus::NotReady(format!( - "{} tree is not ready", - self.tree_mode.as_tag() - )), - } - } -} diff --git a/core/bin/zksync_core/src/metadata_calculator/helpers.rs b/core/bin/zksync_core/src/metadata_calculator/helpers.rs index 5eaa20cbf649..925adbe50531 100644 --- a/core/bin/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/bin/zksync_core/src/metadata_calculator/helpers.rs @@ -1,15 +1,40 @@ //! Various helpers for the metadata calculator. +use serde::Serialize; #[cfg(test)] use tokio::sync::mpsc; -use std::{collections::BTreeMap, future::Future, mem, time::Duration}; +use std::{ + collections::BTreeMap, + future::Future, + mem, + path::{Path, PathBuf}, + time::Duration, +}; +use zksync_config::configs::database::MerkleTreeMode; use zksync_dal::StorageProcessor; -use zksync_merkle_tree::domain::{TreeMetadata, ZkSyncTree}; -use zksync_types::{ - block::WitnessBlockWithLogs, L1BatchNumber, StorageKey, StorageLog, WitnessStorageLog, H256, +use zksync_health_check::{Health, HealthStatus}; +use zksync_merkle_tree::{ + domain::{TreeMetadata, ZkSyncTree}, + MerkleTreeColumnFamily, }; +use zksync_storage::RocksDB; +use zksync_types::{block::L1BatchHeader, L1BatchNumber, StorageLog, H256}; + +use super::metrics::{LoadChangesStage, ReportStage, TreeUpdateStage}; + +#[derive(Debug, Serialize)] +pub(super) struct TreeHealthCheckDetails { + pub mode: MerkleTreeMode, + pub next_l1_batch_to_seal: L1BatchNumber, +} + +impl From for Health { + fn from(details: TreeHealthCheckDetails) -> Self { + Self::from(HealthStatus::Ready).with_details(details) + } +} /// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. /// @@ -26,10 +51,43 @@ impl AsyncTree { const INCONSISTENT_MSG: &'static str = "`ZkSyncTree` is in inconsistent state, which could occur after one of its blocking futures was cancelled"; - pub fn new(tree: ZkSyncTree) -> Self { + pub async fn new( + db_path: PathBuf, + mode: MerkleTreeMode, + multi_get_chunk_size: usize, + block_cache_capacity: usize, + ) -> Self { + vlog::info!( + "Initializing Merkle tree at `{db_path}` with {multi_get_chunk_size} multi-get chunk size, \ + {block_cache_capacity}B block cache", + db_path = db_path.display() + ); + + let mut tree = tokio::task::spawn_blocking(move || { + let db = Self::create_db(&db_path, block_cache_capacity); + match mode { + MerkleTreeMode::Full => ZkSyncTree::new(db), + MerkleTreeMode::Lightweight => ZkSyncTree::new_lightweight(db), + } + }) + .await + .unwrap(); + + tree.set_multi_get_chunk_size(multi_get_chunk_size); Self(Some(tree)) } + fn create_db(path: &Path, block_cache_capacity: usize) -> RocksDB { + let db = RocksDB::with_cache(path, true, Some(block_cache_capacity)); + if cfg!(test) { + // We need sync writes for the unit tests to execute reliably. With the default config, + // some writes to RocksDB may occur, but not be visible to the test code. + db.with_sync_writes() + } else { + db + } + } + fn as_ref(&self) -> &ZkSyncTree { self.0.as_ref().expect(Self::INCONSISTENT_MSG) } @@ -42,38 +100,18 @@ impl AsyncTree { self.as_ref().is_empty() } - pub fn block_number(&self) -> u32 { - self.as_ref().block_number() + pub fn next_l1_batch_number(&self) -> L1BatchNumber { + self.as_ref().next_l1_batch_number() } pub fn root_hash(&self) -> H256 { self.as_ref().root_hash() } - pub async fn process_block(&mut self, block: Vec) -> TreeMetadata { - let mut tree = mem::take(self); - let (tree, metadata) = tokio::task::spawn_blocking(move || { - let metadata = tree.as_mut().process_block(&block); - (tree, metadata) - }) - .await - .unwrap(); - - *self = tree; - metadata - } - - pub async fn process_blocks( - &mut self, - blocks: Vec>, - ) -> Vec { + pub async fn process_l1_batch(&mut self, storage_logs: Vec) -> TreeMetadata { let mut tree = mem::take(self); let (tree, metadata) = tokio::task::spawn_blocking(move || { - tree.as_mut().reset(); // For compatibility with the old implementation - let metadata = blocks - .iter() - .map(|block| tree.as_mut().process_block(block)) - .collect(); + let metadata = tree.as_mut().process_l1_batch(&storage_logs); (tree, metadata) }) .await @@ -92,18 +130,22 @@ impl AsyncTree { .await .unwrap(); } + + pub fn revert_logs(&mut self, last_l1_batch_to_keep: L1BatchNumber) { + self.as_mut().revert_logs(last_l1_batch_to_keep); + } } /// Component implementing the delay policy in [`MetadataCalculator`] when there are no -/// blocks to seal. +/// L1 batches to seal. #[derive(Debug, Clone)] pub(super) struct Delayer { delay_interval: Duration, - // Notifies the tests about the block count and tree root hash when the calculator - // runs out of blocks to process. (Since RocksDB is exclusive, we cannot just create + // Notifies the tests about the next L1 batch number and tree root hash when the calculator + // runs out of L1 batches to process. (Since RocksDB is exclusive, we cannot just create // another instance to check these params on the test side without stopping the calc.) #[cfg(test)] - pub delay_notifier: mpsc::UnboundedSender<(u32, H256)>, + pub delay_notifier: mpsc::UnboundedSender<(L1BatchNumber, H256)>, } impl Delayer { @@ -119,77 +161,383 @@ impl Delayer { pub fn wait(&self, tree: &AsyncTree) -> impl Future { #[cfg(test)] self.delay_notifier - .send((tree.block_number(), tree.root_hash())) + .send((tree.next_l1_batch_number(), tree.root_hash())) .ok(); tokio::time::sleep(self.delay_interval) } } -pub(crate) async fn get_logs_for_l1_batch( - storage: &mut StorageProcessor<'_>, - l1_batch_number: L1BatchNumber, -) -> Option { - let header = storage - .blocks_dal() - .get_block_header(l1_batch_number) - .await?; - - // `BTreeMap` is used because tree needs to process slots in lexicographical order. - let mut storage_logs: BTreeMap = BTreeMap::new(); - - let protective_reads = storage - .storage_logs_dedup_dal() - .get_protective_reads_for_l1_batch(l1_batch_number) - .await; - let touched_slots = storage - .storage_logs_dal() - .get_touched_slots_for_l1_batch(l1_batch_number) - .await; +#[derive(Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub(crate) struct L1BatchWithLogs { + pub header: L1BatchHeader, + pub storage_logs: Vec, +} + +impl L1BatchWithLogs { + pub async fn new( + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> Option { + vlog::debug!("Loading storage logs data for L1 batch #{l1_batch_number}"); + let load_changes_latency = TreeUpdateStage::LoadChanges.start(); + + let header_latency = LoadChangesStage::L1BatchHeader.start(); + let header = storage + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await?; + header_latency.report(); + + let protective_reads_latency = LoadChangesStage::ProtectiveReads.start(); + let protective_reads = storage + .storage_logs_dedup_dal() + .get_protective_reads_for_l1_batch(l1_batch_number) + .await; + protective_reads_latency.report_with_count(protective_reads.len()); + + let touched_slots_latency = LoadChangesStage::TouchedSlots.start(); + let mut touched_slots = storage + .storage_logs_dal() + .get_touched_slots_for_l1_batch(l1_batch_number) + .await; + touched_slots_latency.report_with_count(touched_slots.len()); - let hashed_keys: Vec<_> = protective_reads - .iter() - .chain(touched_slots.keys()) - .map(StorageKey::hashed_key) - .collect(); - let previous_values = storage - .storage_logs_dal() - .get_previous_storage_values(&hashed_keys, l1_batch_number) + let mut storage_logs = BTreeMap::new(); + for storage_key in protective_reads { + touched_slots.remove(&storage_key); + // ^ As per deduplication rules, all keys in `protective_reads` haven't *really* changed + // in the considered L1 batch. Thus, we can remove them from `touched_slots` in order to simplify + // their further processing. + + let log = StorageLog::new_read_log(storage_key, H256::zero()); + // ^ The tree doesn't use the read value, so we set it to zero. + storage_logs.insert(storage_key, log); + } + vlog::debug!( + "Made touched slots disjoint with protective reads; remaining touched slots: {}", + touched_slots.len() + ); + + // We don't want to update the tree with zero values which were never written to per storage log + // deduplication rules. If we write such values to the tree, it'd result in bogus tree hashes because + // new (bogus) leaf indices would be allocated for them. To filter out those values, it's sufficient + // to check when a `storage_key` was first written per `initial_writes` table. If this never occurred + // or occurred after the considered `l1_batch_number`, this means that the write must be ignored. + // + // Note that this approach doesn't filter out no-op writes of the same value, but this is fine; + // since no new leaf indices are allocated in the tree for them, such writes are no-op on the tree side as well. + let hashed_keys_for_zero_values: Vec<_> = touched_slots + .iter() + .filter_map(|(key, value)| { + // Only zero values are worth checking for initial writes; non-zero values are always + // written per deduplication rules. + value.is_zero().then(|| key.hashed_key()) + }) + .collect(); + metrics::histogram!( + "server.metadata_calculator.load_changes.zero_values", + hashed_keys_for_zero_values.len() as f64 + ); + + let latency = LoadChangesStage::InitialWritesForZeroValues.start(); + let l1_batches_for_initial_writes = storage + .storage_logs_dal() + .get_l1_batches_for_initial_writes(&hashed_keys_for_zero_values) + .await; + latency.report_with_count(hashed_keys_for_zero_values.len()); + + for (storage_key, value) in touched_slots { + let write_matters = if value.is_zero() { + let initial_write_batch_for_key = + l1_batches_for_initial_writes.get(&storage_key.hashed_key()); + initial_write_batch_for_key.map_or(false, |&number| number <= l1_batch_number) + } else { + true + }; + + if write_matters { + storage_logs.insert(storage_key, StorageLog::new_write_log(storage_key, value)); + } + } + + load_changes_latency.report(); + Some(Self { + header, + storage_logs: storage_logs.into_values().collect(), + }) + } +} + +#[cfg(test)] +mod tests { + use tempfile::TempDir; + + use db_test_macro::db_test; + use zksync_contracts::BaseSystemContracts; + use zksync_dal::ConnectionPool; + use zksync_types::{ + proofs::PrepareBasicCircuitsJob, protocol_version::L1VerifierConfig, + system_contracts::get_system_smart_contracts, Address, L2ChainId, StorageKey, + StorageLogKind, + }; + + use super::*; + use crate::{ + genesis::{ensure_genesis_state, GenesisParams}, + metadata_calculator::tests::{extend_db_state, gen_storage_logs, reset_db_state}, + }; + + impl L1BatchWithLogs { + /// Old, slower method of loading storage logs. We want to test its equivalence to the new implementation. + async fn slow( + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> Option { + let header = storage + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await?; + let protective_reads = storage + .storage_logs_dedup_dal() + .get_protective_reads_for_l1_batch(l1_batch_number) + .await; + let touched_slots = storage + .storage_logs_dal() + .get_touched_slots_for_l1_batch(l1_batch_number) + .await; + + let mut storage_logs = BTreeMap::new(); + + let hashed_keys: Vec<_> = protective_reads + .iter() + .chain(touched_slots.keys()) + .map(StorageKey::hashed_key) + .collect(); + let previous_values = storage + .storage_logs_dal() + .get_previous_storage_values(&hashed_keys, l1_batch_number) + .await; + + for storage_key in protective_reads { + let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); + // Sanity check: value must not change for slots that require protective reads. + if let Some(value) = touched_slots.get(&storage_key) { + assert_eq!( + previous_value, *value, + "Value was changed for slot that requires protective read" + ); + } + + storage_logs.insert( + storage_key, + StorageLog::new_read_log(storage_key, previous_value), + ); + } + + for (storage_key, value) in touched_slots { + let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); + if previous_value != value { + storage_logs.insert(storage_key, StorageLog::new_write_log(storage_key, value)); + } + } + + Some(Self { + header, + storage_logs: storage_logs.into_values().collect(), + }) + } + } + + fn mock_genesis_params() -> GenesisParams { + GenesisParams { + first_validator: Address::repeat_byte(0x01), + base_system_contracts: BaseSystemContracts::load_from_disk(), + system_contracts: get_system_smart_contracts(), + first_l1_verifier_config: L1VerifierConfig::default(), + first_verifier_address: Address::zero(), + } + } + + #[db_test] + async fn loaded_logs_equivalence_basics(pool: ConnectionPool) { + ensure_genesis_state( + &mut pool.access_storage().await, + L2ChainId(270), + &mock_genesis_params(), + ) .await; + reset_db_state(&pool, 5).await; + + let mut storage = pool.access_storage().await; + for l1_batch_number in 0..=5 { + let l1_batch_number = L1BatchNumber(l1_batch_number); + let batch_with_logs = L1BatchWithLogs::new(&mut storage, l1_batch_number) + .await + .unwrap(); + let slow_batch_with_logs = L1BatchWithLogs::slow(&mut storage, l1_batch_number) + .await + .unwrap(); + assert_eq!(batch_with_logs, slow_batch_with_logs); + } + } + + #[db_test] + async fn loaded_logs_equivalence_with_zero_no_op_logs(pool: ConnectionPool) { + let mut storage = pool.access_storage().await; + ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()).await; - for storage_key in protective_reads { - let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); - // Sanity check: value must not change for slots that require protective reads. - if let Some(value) = touched_slots.get(&storage_key) { - assert_eq!( - previous_value, *value, - "Value was changed for slot that requires protective read" - ); + let mut logs = gen_storage_logs(100..200, 2); + for log in &mut logs[0] { + log.value = H256::zero(); + } + for log in logs[1].iter_mut().step_by(3) { + log.value = H256::zero(); } + extend_db_state(&mut storage, logs).await; - storage_logs.insert( - storage_key, - WitnessStorageLog { - storage_log: StorageLog::new_read_log(storage_key, previous_value), - previous_value, - }, + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let mut tree = + AsyncTree::new(temp_dir.path().to_owned(), MerkleTreeMode::Full, 500, 0).await; + for number in 0..3 { + assert_log_equivalence(&mut storage, &mut tree, L1BatchNumber(number)).await; + } + } + + async fn assert_log_equivalence( + storage: &mut StorageProcessor<'_>, + tree: &mut AsyncTree, + l1_batch_number: L1BatchNumber, + ) { + let l1_batch_with_logs = L1BatchWithLogs::new(storage, l1_batch_number) + .await + .unwrap(); + let slow_l1_batch_with_logs = L1BatchWithLogs::slow(storage, l1_batch_number) + .await + .unwrap(); + + // Sanity check: L1 batch headers must be identical + assert_eq!(l1_batch_with_logs.header, slow_l1_batch_with_logs.header); + + tree.save().await; // Necessary for `reset()` below to work properly + let tree_metadata = tree.process_l1_batch(l1_batch_with_logs.storage_logs).await; + tree.as_mut().reset(); + let slow_tree_metadata = tree + .process_l1_batch(slow_l1_batch_with_logs.storage_logs) + .await; + assert_eq!(tree_metadata.root_hash, slow_tree_metadata.root_hash); + assert_eq!( + tree_metadata.rollup_last_leaf_index, + slow_tree_metadata.rollup_last_leaf_index + ); + assert_eq!( + tree_metadata.initial_writes, + slow_tree_metadata.initial_writes ); + assert_eq!( + tree_metadata.initial_writes, + slow_tree_metadata.initial_writes + ); + assert_eq!( + tree_metadata.repeated_writes, + slow_tree_metadata.repeated_writes + ); + assert_equivalent_witnesses( + tree_metadata.witness.unwrap(), + slow_tree_metadata.witness.unwrap(), + ); + } + + fn assert_equivalent_witnesses(lhs: PrepareBasicCircuitsJob, rhs: PrepareBasicCircuitsJob) { + assert_eq!(lhs.next_enumeration_index(), rhs.next_enumeration_index()); + let lhs_paths = lhs.into_merkle_paths(); + let rhs_paths = rhs.into_merkle_paths(); + assert_eq!(lhs_paths.len(), rhs_paths.len()); + for (lhs_path, rhs_path) in lhs_paths.zip(rhs_paths) { + assert_eq!(lhs_path, rhs_path); + } } - for (storage_key, value) in touched_slots { - let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); - if previous_value != value { - storage_logs.insert( - storage_key, - WitnessStorageLog { - storage_log: StorageLog::new_write_log(storage_key, value), - previous_value, - }, - ); + #[db_test] + async fn loaded_logs_equivalence_with_non_zero_no_op_logs(pool: ConnectionPool) { + let mut storage = pool.access_storage().await; + ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()).await; + + let mut logs = gen_storage_logs(100..120, 1); + // Entire batch of no-op logs (writing previous values). + let copied_logs = logs[0].clone(); + logs.push(copied_logs); + + // Batch of effectively no-op logs (overwriting values, then writing old values back). + let mut updated_and_then_copied_logs: Vec<_> = logs[0] + .iter() + .map(|log| StorageLog { + value: H256::repeat_byte(0xff), + ..*log + }) + .collect(); + updated_and_then_copied_logs.extend_from_slice(&logs[0]); + logs.push(updated_and_then_copied_logs); + + // Batch where half of logs are copied and the other half is writing zero values (which is + // not a no-op). + let mut partially_copied_logs = logs[0].clone(); + for log in partially_copied_logs.iter_mut().step_by(2) { + log.value = H256::zero(); + } + logs.push(partially_copied_logs); + + // Batch where 2/3 of logs are copied and the other 1/3 is writing new non-zero values. + let mut partially_copied_logs = logs[0].clone(); + for log in partially_copied_logs.iter_mut().step_by(3) { + log.value = H256::repeat_byte(0x11); + } + logs.push(partially_copied_logs); + extend_db_state(&mut storage, logs).await; + + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let mut tree = + AsyncTree::new(temp_dir.path().to_owned(), MerkleTreeMode::Full, 500, 0).await; + for batch_number in 0..5 { + assert_log_equivalence(&mut storage, &mut tree, L1BatchNumber(batch_number)).await; } } - Some(WitnessBlockWithLogs { - header, - storage_logs: storage_logs.into_values().collect(), - }) + #[db_test] + async fn loaded_logs_equivalence_with_protective_reads(pool: ConnectionPool) { + let mut storage = pool.access_storage().await; + ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()).await; + + let mut logs = gen_storage_logs(100..120, 1); + let logs_copy = logs[0].clone(); + logs.push(logs_copy); + let read_logs: Vec<_> = logs[1] + .iter() + .step_by(3) + .map(StorageLog::to_test_log_query) + .collect(); + extend_db_state(&mut storage, logs).await; + storage + .storage_logs_dedup_dal() + .insert_protective_reads(L1BatchNumber(2), &read_logs) + .await; + + let l1_batch_with_logs = L1BatchWithLogs::new(&mut storage, L1BatchNumber(2)) + .await + .unwrap(); + // Check that we have protective reads transformed into read logs + let read_logs_count = l1_batch_with_logs + .storage_logs + .iter() + .filter(|log| log.kind == StorageLogKind::Read) + .count(); + assert_eq!(read_logs_count, 7); + + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let mut tree = + AsyncTree::new(temp_dir.path().to_owned(), MerkleTreeMode::Full, 500, 0).await; + for batch_number in 0..3 { + assert_log_equivalence(&mut storage, &mut tree, L1BatchNumber(batch_number)).await; + } + } } diff --git a/core/bin/zksync_core/src/metadata_calculator/metrics.rs b/core/bin/zksync_core/src/metadata_calculator/metrics.rs index 75db7accb748..510edad786fc 100644 --- a/core/bin/zksync_core/src/metadata_calculator/metrics.rs +++ b/core/bin/zksync_core/src/metadata_calculator/metrics.rs @@ -2,10 +2,28 @@ use std::time::Instant; +use zksync_config::configs::database::MerkleTreeMode; use zksync_types::block::L1BatchHeader; use zksync_utils::time::seconds_since_epoch; -use super::{MetadataCalculator, MetadataCalculatorMode}; +use super::MetadataCalculator; + +/// Stage of [`MetadataCalculator`] update reported via metric and logged. +pub(super) trait ReportStage: Copy { + /// Name of the histogram using which the stage latency is reported. + const HISTOGRAM_NAME: &'static str; + + /// Returns the stage tag for the histogram. + fn as_tag(self) -> &'static str; + + /// Starts the stage. + fn start(self) -> UpdateTreeLatency { + UpdateTreeLatency { + stage: self, + start: Instant::now(), + } + } +} #[derive(Debug, Clone, Copy)] pub(super) enum TreeUpdateStage { @@ -19,8 +37,10 @@ pub(super) enum TreeUpdateStage { _Backup, } -impl TreeUpdateStage { - pub fn as_str(self) -> &'static str { +impl ReportStage for TreeUpdateStage { + const HISTOGRAM_NAME: &'static str = "server.metadata_calculator.update_tree.latency.stage"; + + fn as_tag(self) -> &'static str { match self { Self::LoadChanges => "load_changes", Self::Compute => "compute", @@ -32,11 +52,30 @@ impl TreeUpdateStage { Self::_Backup => "backup_tree", } } +} - pub fn start(self) -> UpdateTreeLatency { - UpdateTreeLatency { - stage: self, - start: Instant::now(), +/// Sub-stages of [`TreeUpdateStage::LoadChanges`]. +#[derive(Debug, Clone, Copy)] +pub(super) enum LoadChangesStage { + L1BatchHeader, + ProtectiveReads, + TouchedSlots, + InitialWritesForZeroValues, +} + +impl LoadChangesStage { + const COUNT_HISTOGRAM_NAME: &'static str = "server.metadata_calculator.load_changes.count"; +} + +impl ReportStage for LoadChangesStage { + const HISTOGRAM_NAME: &'static str = "server.metadata_calculator.load_changes.latency"; + + fn as_tag(self) -> &'static str { + match self { + Self::L1BatchHeader => "load_l1_batch_header", + Self::ProtectiveReads => "load_protective_reads", + Self::TouchedSlots => "load_touched_slots", + Self::InitialWritesForZeroValues => "load_initial_writes_for_zero_values", } } } @@ -44,34 +83,50 @@ impl TreeUpdateStage { /// Latency metric for a certain stage of the tree update. #[derive(Debug)] #[must_use = "Tree latency should be `report`ed"] -pub(super) struct UpdateTreeLatency { - stage: TreeUpdateStage, +pub(super) struct UpdateTreeLatency { + stage: S, start: Instant, } -impl UpdateTreeLatency { +impl UpdateTreeLatency { pub fn report(self) { + self.report_inner(None); + } + + fn report_inner(self, record_count: Option) { let elapsed = self.start.elapsed(); - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - elapsed, - "stage" => self.stage.as_str() - ); - vlog::trace!( - "Metadata calculator stage `{stage}` completed in {elapsed:?}", - stage = self.stage.as_str() - ); + let stage = self.stage.as_tag(); + metrics::histogram!(S::HISTOGRAM_NAME, elapsed, "stage" => stage); + + if let Some(record_count) = record_count { + vlog::debug!( + "Metadata calculator stage `{stage}` with {record_count} records completed in {elapsed:?}" + ); + } else { + vlog::debug!("Metadata calculator stage `{stage}` completed in {elapsed:?}"); + } + } +} + +impl UpdateTreeLatency { + pub fn report_with_count(self, count: usize) { + let stage = self.stage.as_tag(); + self.report_inner(Some(count)); + metrics::histogram!(LoadChangesStage::COUNT_HISTOGRAM_NAME, count as f64, "stage" => stage); } } impl MetadataCalculator { pub(super) fn update_metrics( - mode: MetadataCalculatorMode, - block_headers: &[L1BatchHeader], + mode: MerkleTreeMode, + batch_headers: &[L1BatchHeader], total_logs: usize, start: Instant, ) { - let mode_tag = mode.as_tag(); + let mode_tag = match mode { + MerkleTreeMode::Full => "full", + MerkleTreeMode::Lightweight => "lightweight", + }; metrics::histogram!( "server.metadata_calculator.update_tree.latency", @@ -84,33 +139,33 @@ impl MetadataCalculator { ); } - let total_tx: usize = block_headers.iter().map(|block| block.tx_count()).sum(); - let total_l1_tx: u64 = block_headers + let total_tx: usize = batch_headers.iter().map(L1BatchHeader::tx_count).sum(); + let total_l1_tx_count: u64 = batch_headers .iter() - .map(|block| u64::from(block.l1_tx_count)) + .map(|batch| u64::from(batch.l1_tx_count)) .sum(); metrics::counter!("server.processed_txs", total_tx as u64, "stage" => "tree"); - metrics::counter!("server.processed_l1_txs", total_l1_tx, "stage" => "tree"); + metrics::counter!("server.processed_l1_txs", total_l1_tx_count, "stage" => "tree"); metrics::histogram!("server.metadata_calculator.log_batch", total_logs as f64); metrics::histogram!( "server.metadata_calculator.blocks_batch", - block_headers.len() as f64 + batch_headers.len() as f64 ); - let first_block_number = block_headers.first().unwrap().number.0; - let last_block_number = block_headers.last().unwrap().number.0; + let first_batch_number = batch_headers.first().unwrap().number.0; + let last_batch_number = batch_headers.last().unwrap().number.0; vlog::info!( "L1 batches #{:?} processed in tree", - first_block_number..=last_block_number + first_batch_number..=last_batch_number ); metrics::gauge!( "server.block_number", - last_block_number as f64, + last_batch_number as f64, "stage" => format!("tree_{mode_tag}_mode") ); let latency = - seconds_since_epoch().saturating_sub(block_headers.first().unwrap().timestamp); + seconds_since_epoch().saturating_sub(batch_headers.first().unwrap().timestamp); metrics::histogram!( "server.block_latency", latency as f64, diff --git a/core/bin/zksync_core/src/metadata_calculator/mod.rs b/core/bin/zksync_core/src/metadata_calculator/mod.rs index edc5dd4f9810..ecfeb20dbe59 100644 --- a/core/bin/zksync_core/src/metadata_calculator/mod.rs +++ b/core/bin/zksync_core/src/metadata_calculator/mod.rs @@ -5,53 +5,38 @@ use tokio::sync::watch; use std::time::Duration; -use zksync_config::configs::chain::OperationsManagerConfig; -use zksync_config::DBConfig; +use zksync_config::configs::{ + chain::OperationsManagerConfig, + database::{DBConfig, MerkleTreeMode}, +}; use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_health_check::{HealthUpdater, ReactiveHealthCheck}; use zksync_merkle_tree::domain::TreeMetadata; use zksync_object_store::ObjectStoreFactory; use zksync_types::{ block::L1BatchHeader, - commitment::{BlockCommitment, BlockMetadata, BlockWithMetadata}, + commitment::{L1BatchCommitment, L1BatchMetadata}, }; -mod healthcheck; mod helpers; mod metrics; #[cfg(test)] mod tests; mod updater; -pub use self::healthcheck::TreeHealthCheck; -pub(crate) use self::helpers::get_logs_for_l1_batch; -use self::{helpers::Delayer, metrics::TreeUpdateStage, updater::TreeUpdater}; - -#[derive(Debug, Copy, Clone)] -enum MetadataCalculatorMode { - Full, - Lightweight, -} - -impl MetadataCalculatorMode { - fn as_tag(self) -> &'static str { - match self { - Self::Full => "full", - Self::Lightweight => "lightweight", - } - } -} - -#[derive(Debug, PartialEq)] -pub enum MetadataCalculatorStatus { - Ready, - NotReady, -} +pub(crate) use self::helpers::L1BatchWithLogs; +use self::{ + helpers::Delayer, + metrics::{ReportStage, TreeUpdateStage}, + updater::TreeUpdater, +}; +use crate::gas_tracker::commit_gas_count_for_l1_batch; -/// Part of [`MetadataCalculator`] related to its syncing mode. +/// Part of [`MetadataCalculator`] related to the operation mode of the Merkle tree. #[derive(Debug, Clone, Copy)] pub enum MetadataCalculatorModeConfig<'a> { /// In this mode, `MetadataCalculator` computes Merkle tree root hashes and some auxiliary information - /// for blocks, but not witness inputs. + /// for L1 batches, but not witness inputs. Lightweight, /// In this mode, `MetadataCalculator` will compute witness inputs for all storage operations /// and put them into the object store as provided by `store_factory` (e.g., GCS). @@ -61,11 +46,11 @@ pub enum MetadataCalculatorModeConfig<'a> { } impl MetadataCalculatorModeConfig<'_> { - fn to_mode(self) -> MetadataCalculatorMode { + fn to_mode(self) -> MerkleTreeMode { if matches!(self, Self::Full { .. }) { - MetadataCalculatorMode::Full + MerkleTreeMode::Full } else { - MetadataCalculatorMode::Lightweight + MerkleTreeMode::Lightweight } } } @@ -75,15 +60,17 @@ impl MetadataCalculatorModeConfig<'_> { pub struct MetadataCalculatorConfig<'a> { /// Filesystem path to the RocksDB instance that stores the tree. pub db_path: &'a str, - /// Tree syncing mode. + /// Configuration of the Merkle tree mode. pub mode: MetadataCalculatorModeConfig<'a>, /// Interval between polling Postgres for updates if no progress was made by the tree. pub delay_interval: Duration, /// Maximum number of L1 batches to get from Postgres on a single update iteration. - pub max_block_batch: usize, - /// Sleep interval between tree updates if the tree has made progress. This is only applied - /// to the tree in the lightweight mode. - pub throttle_interval: Duration, + pub max_l1_batches_per_iter: usize, + /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, + /// but the effects vary wildly depending on the setup (e.g., the filesystem used). + pub multi_get_chunk_size: usize, + /// Capacity of RocksDB block cache in bytes. Reasonable values range from ~100 MB to several GB. + pub block_cache_capacity: usize, } impl<'a> MetadataCalculatorConfig<'a> { @@ -93,11 +80,12 @@ impl<'a> MetadataCalculatorConfig<'a> { mode: MetadataCalculatorModeConfig<'a>, ) -> Self { Self { - db_path: &db_config.new_merkle_tree_ssd_path, + db_path: &db_config.merkle_tree.path, mode, delay_interval: operation_config.delay_interval(), - throttle_interval: db_config.new_merkle_tree_throttle_interval(), - max_block_batch: db_config.max_block_batch(), + max_l1_batches_per_iter: db_config.merkle_tree.max_l1_batches_per_iter, + multi_get_chunk_size: db_config.merkle_tree.multi_get_chunk_size, + block_cache_capacity: db_config.merkle_tree.block_cache_size(), } } } @@ -106,8 +94,7 @@ impl<'a> MetadataCalculatorConfig<'a> { pub struct MetadataCalculator { updater: TreeUpdater, delayer: Delayer, - throttler: Delayer, - status_sender: watch::Sender, + health_updater: HealthUpdater, } impl MetadataCalculator { @@ -121,30 +108,18 @@ impl MetadataCalculator { } MetadataCalculatorModeConfig::Lightweight => None, }; - let updater = TreeUpdater::new(mode, config.db_path, config.max_block_batch, object_store); - let throttle_interval = if matches!(mode, MetadataCalculatorMode::Lightweight) { - config.throttle_interval - } else { - Duration::ZERO - }; - let (status_sender, _) = watch::channel(MetadataCalculatorStatus::NotReady); + let updater = TreeUpdater::new(mode, config, object_store).await; + let (_, health_updater) = ReactiveHealthCheck::new("tree"); Self { updater, delayer: Delayer::new(config.delay_interval), - throttler: Delayer::new(throttle_interval), - status_sender, + health_updater, } } /// Returns a health check for this calculator. - pub fn tree_health_check(&self) -> TreeHealthCheck { - let receiver = self.status_sender.subscribe(); - TreeHealthCheck::new(receiver, self.updater.mode()) - } - - /// Returns the tag for this calculator usable in metrics reporting. - pub fn tree_tag(&self) -> &'static str { - self.updater.mode().as_tag() + pub fn tree_health_check(&self) -> ReactiveHealthCheck { + self.health_updater.subscribe() } pub async fn run( @@ -155,73 +130,70 @@ impl MetadataCalculator { ) { let update_task = self.updater.loop_updating_tree( self.delayer, - self.throttler, &pool, &prover_pool, stop_receiver, - self.status_sender, + self.health_updater, ); update_task.await; } /// This is used to improve L1 gas estimation for the commit operation. The estimations are computed - /// in the State Keeper, where storage writes aren't yet deduplicated, whereas block metadata + /// in the State Keeper, where storage writes aren't yet deduplicated, whereas L1 batch metadata /// contains deduplicated storage writes. - async fn reestimate_block_commit_gas( + async fn reestimate_l1_batch_commit_gas( storage: &mut StorageProcessor<'_>, - block_header: L1BatchHeader, - metadata: BlockMetadata, - ) -> BlockWithMetadata { + header: &L1BatchHeader, + metadata: &L1BatchMetadata, + ) { let reestimate_gas_cost = TreeUpdateStage::ReestimateGasCost.start(); let unsorted_factory_deps = storage .blocks_dal() - .get_l1_batch_factory_deps(block_header.number) + .get_l1_batch_factory_deps(header.number) .await; - let block_with_metadata = - BlockWithMetadata::new(block_header, metadata, unsorted_factory_deps); - let commit_gas_cost = crate::gas_tracker::commit_gas_count_for_block(&block_with_metadata); + let commit_gas_cost = + commit_gas_count_for_l1_batch(header, &unsorted_factory_deps, metadata); storage .blocks_dal() - .update_predicted_block_commit_gas(block_with_metadata.header.number, commit_gas_cost) + .update_predicted_l1_batch_commit_gas(header.number, commit_gas_cost) .await; reestimate_gas_cost.report(); - block_with_metadata } - fn build_block_metadata( - tree_metadata_at_block: TreeMetadata, - l1_batch_header: &L1BatchHeader, - ) -> BlockMetadata { - let merkle_root_hash = tree_metadata_at_block.root_hash; + fn build_l1_batch_metadata( + tree_metadata: TreeMetadata, + header: &L1BatchHeader, + ) -> L1BatchMetadata { + let merkle_root_hash = tree_metadata.root_hash; - let block_commitment = BlockCommitment::new( - l1_batch_header.l2_to_l1_logs.clone(), - tree_metadata_at_block.rollup_last_leaf_index, + let commitment = L1BatchCommitment::new( + header.l2_to_l1_logs.clone(), + tree_metadata.rollup_last_leaf_index, merkle_root_hash, - tree_metadata_at_block.initial_writes, - tree_metadata_at_block.repeated_writes, - l1_batch_header.base_system_contracts_hashes.bootloader, - l1_batch_header.base_system_contracts_hashes.default_aa, + tree_metadata.initial_writes, + tree_metadata.repeated_writes, + header.base_system_contracts_hashes.bootloader, + header.base_system_contracts_hashes.default_aa, ); - let block_commitment_hash = block_commitment.hash(); - vlog::trace!("Block commitment: {block_commitment:?}"); + let commitment_hash = commitment.hash(); + vlog::trace!("L1 batch commitment: {commitment:?}"); - let metadata = BlockMetadata { + let metadata = L1BatchMetadata { root_hash: merkle_root_hash, - rollup_last_leaf_index: tree_metadata_at_block.rollup_last_leaf_index, + rollup_last_leaf_index: tree_metadata.rollup_last_leaf_index, merkle_root_hash, - initial_writes_compressed: block_commitment.initial_writes_compressed().to_vec(), - repeated_writes_compressed: block_commitment.repeated_writes_compressed().to_vec(), - commitment: block_commitment_hash.commitment, - l2_l1_messages_compressed: block_commitment.l2_l1_logs_compressed().to_vec(), - l2_l1_merkle_root: block_commitment.l2_l1_logs_merkle_root(), - block_meta_params: block_commitment.meta_parameters(), - aux_data_hash: block_commitment_hash.aux_output, - meta_parameters_hash: block_commitment_hash.meta_parameters, - pass_through_data_hash: block_commitment_hash.pass_through_data, + initial_writes_compressed: commitment.initial_writes_compressed().to_vec(), + repeated_writes_compressed: commitment.repeated_writes_compressed().to_vec(), + commitment: commitment_hash.commitment, + l2_l1_messages_compressed: commitment.l2_l1_logs_compressed().to_vec(), + l2_l1_merkle_root: commitment.l2_l1_logs_merkle_root(), + block_meta_params: commitment.meta_parameters(), + aux_data_hash: commitment_hash.aux_output, + meta_parameters_hash: commitment_hash.meta_parameters, + pass_through_data_hash: commitment_hash.pass_through_data, }; - vlog::trace!("Block metadata: {metadata:?}"); + vlog::trace!("L1 batch metadata: {metadata:?}"); metadata } } diff --git a/core/bin/zksync_core/src/metadata_calculator/tests.rs b/core/bin/zksync_core/src/metadata_calculator/tests.rs index d2a14053f05c..b216a81c8f23 100644 --- a/core/bin/zksync_core/src/metadata_calculator/tests.rs +++ b/core/bin/zksync_core/src/metadata_calculator/tests.rs @@ -1,33 +1,31 @@ use assert_matches::assert_matches; use db_test_macro::db_test; +use itertools::Itertools; use tempfile::TempDir; use tokio::sync::{mpsc, watch}; -use std::{ - future::Future, - ops, panic, - path::Path, - time::{Duration, Instant}, -}; - -use zksync_config::configs::chain::{NetworkConfig, OperationsManagerConfig}; +use std::{future::Future, ops, panic, path::Path, time::Duration}; -use zksync_config::DBConfig; +use zksync_config::{configs::chain::OperationsManagerConfig, DBConfig}; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_health_check::{CheckHealth, CheckHealthStatus}; +use zksync_health_check::{CheckHealth, HealthStatus}; +use zksync_merkle_tree::domain::ZkSyncTree; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_types::{ block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, - commitment::BlockCommitment, proofs::PrepareBasicCircuitsJob, + protocol_version::L1VerifierConfig, + system_contracts::get_system_smart_contracts, AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, StorageKey, StorageLog, H256, }; use zksync_utils::{miniblock_hash, u32_to_h256}; -use super::{MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig}; -use crate::genesis::{create_genesis_block, save_genesis_block_metadata}; +use super::{ + L1BatchWithLogs, MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig, +}; +use crate::genesis::{ensure_genesis_state, GenesisParams}; const RUN_TIMEOUT: Duration = Duration::from_secs(15); @@ -47,19 +45,26 @@ async fn genesis_creation(pool: ConnectionPool, prover_pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; - assert!(calculator.tree_tag().starts_with("full")); run_calculator(calculator, pool.clone(), prover_pool).await; let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; - assert_eq!(calculator.updater.tree().block_number(), 1); + assert_eq!( + calculator.updater.tree().next_l1_batch_number(), + L1BatchNumber(1) + ); } + #[db_test] async fn basic_workflow(pool: ConnectionPool, prover_pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, object_store) = setup_calculator(temp_dir.path(), &pool).await; reset_db_state(&pool, 1).await; - run_calculator(calculator, pool.clone(), prover_pool).await; + let merkle_tree_hash = run_calculator(calculator, pool.clone(), prover_pool).await; + + // Check the hash against the reference. + let expected_tree_hash = expected_tree_hash(&pool).await; + assert_eq!(merkle_tree_hash, expected_tree_hash); let job: PrepareBasicCircuitsJob = object_store.get(L1BatchNumber(1)).await.unwrap(); assert!(job.next_enumeration_index() > 0); @@ -69,45 +74,79 @@ async fn basic_workflow(pool: ConnectionPool, prover_pool: ConnectionPool) { assert!(merkle_paths.iter().all(|log| log.is_write)); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; - assert_eq!(calculator.updater.tree().block_number(), 2); + assert_eq!( + calculator.updater.tree().next_l1_batch_number(), + L1BatchNumber(2) + ); +} + +async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { + let mut storage = pool.access_storage().await; + let sealed_l1_batch_number = storage.blocks_dal().get_sealed_l1_batch_number().await.0; + let mut all_logs = vec![]; + for i in 0..=sealed_l1_batch_number { + let logs = L1BatchWithLogs::new(&mut storage, L1BatchNumber(i)).await; + let logs = logs.unwrap().storage_logs; + all_logs.extend(logs); + } + ZkSyncTree::process_genesis_batch(&all_logs).root_hash } #[db_test] async fn status_receiver_has_correct_states(pool: ConnectionPool, prover_pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; + let (mut calculator, _) = setup_calculator(temp_dir.path(), &pool).await; let tree_health_check = calculator.tree_health_check(); - assert_matches!( - tree_health_check.check_health().await, - CheckHealthStatus::NotReady(msg) if msg.contains("full") - ); + assert_eq!(tree_health_check.name(), "tree"); + let health = tree_health_check.check_health().await; + assert_matches!(health.status(), HealthStatus::NotReady); + let other_tree_health_check = calculator.tree_health_check(); - assert_matches!( - other_tree_health_check.check_health().await, - CheckHealthStatus::NotReady(msg) if msg.contains("full") - ); + assert_eq!(other_tree_health_check.name(), "tree"); + let health = other_tree_health_check.check_health().await; + assert_matches!(health.status(), HealthStatus::NotReady); + reset_db_state(&pool, 1).await; - run_calculator(calculator, pool, prover_pool).await; + let (stop_sx, stop_rx) = watch::channel(false); + let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); + calculator.delayer.delay_notifier = delay_sx; + + let calculator_handle = tokio::spawn(calculator.run(pool, prover_pool, stop_rx)); + delay_rx.recv().await.unwrap(); + assert_eq!( + tree_health_check.check_health().await.status(), + HealthStatus::Ready + ); + assert_eq!( + other_tree_health_check.check_health().await.status(), + HealthStatus::Ready + ); + + stop_sx.send(true).unwrap(); + tokio::time::timeout(RUN_TIMEOUT, calculator_handle) + .await + .expect("timed out waiting for calculator") + .unwrap(); assert_eq!( - tree_health_check.check_health().await, - CheckHealthStatus::Ready + tree_health_check.check_health().await.status(), + HealthStatus::ShutDown ); assert_eq!( - other_tree_health_check.check_health().await, - CheckHealthStatus::Ready + other_tree_health_check.check_health().await.status(), + HealthStatus::ShutDown ); } #[db_test] -async fn multi_block_workflow(pool: ConnectionPool, prover_pool: ConnectionPool) { - // Run all transactions as a single block +async fn multi_l1_batch_workflow(pool: ConnectionPool, prover_pool: ConnectionPool) { + // Collect all storage logs in a single L1 batch let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; reset_db_state(&pool, 1).await; let root_hash = run_calculator(calculator, pool.clone(), prover_pool.clone()).await; - // Run the same transactions as multiple blocks + // Collect the same logs in multiple L1 batches let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, object_store) = setup_calculator(temp_dir.path(), &pool).await; reset_db_state(&pool, 10).await; @@ -115,9 +154,9 @@ async fn multi_block_workflow(pool: ConnectionPool, prover_pool: ConnectionPool) assert_eq!(multi_block_root_hash, root_hash); let mut prev_index = None; - for block_number in 1..=10 { - let block_number = L1BatchNumber(block_number); - let job: PrepareBasicCircuitsJob = object_store.get(block_number).await.unwrap(); + for l1_batch_number in 1..=10 { + let l1_batch_number = L1BatchNumber(l1_batch_number); + let job: PrepareBasicCircuitsJob = object_store.get(l1_batch_number).await.unwrap(); let next_enumeration_index = job.next_enumeration_index(); let merkle_paths: Vec<_> = job.into_merkle_paths().collect(); assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 10); @@ -139,7 +178,6 @@ async fn running_metadata_calculator_with_additional_blocks( prover_pool: ConnectionPool, ) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; reset_db_state(&pool, 5).await; run_calculator(calculator, pool.clone(), prover_pool.clone()).await; @@ -149,34 +187,27 @@ async fn running_metadata_calculator_with_additional_blocks( let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); calculator.delayer.delay_notifier = delay_sx; - let calculator_handle = { - let pool = pool.clone(); - let prover_pool = prover_pool.clone(); - tokio::task::spawn(calculator.run(pool, prover_pool, stop_rx)) - }; + let calculator_handle = + tokio::spawn(calculator.run(pool.clone(), prover_pool.clone(), stop_rx)); // Wait until the calculator has processed initial blocks. - let (block_count, _) = tokio::time::timeout(RUN_TIMEOUT, delay_rx.recv()) + let (next_l1_batch, _) = tokio::time::timeout(RUN_TIMEOUT, delay_rx.recv()) .await .expect("metadata calculator timed out processing initial blocks") .unwrap(); - assert_eq!(block_count, 6); + assert_eq!(next_l1_batch, L1BatchNumber(6)); // Add some new blocks to the storage. let new_logs = gen_storage_logs(100..200, 10); - extend_db_state( - &mut pool.access_storage_tagged("metadata_calculator").await, - new_logs, - ) - .await; + extend_db_state(&mut pool.access_storage().await, new_logs).await; // Wait until these blocks are processed. The calculator may have spurious delays, // thus we wait in a loop. let updated_root_hash = loop { - let (block_count, root_hash) = tokio::time::timeout(RUN_TIMEOUT, delay_rx.recv()) + let (next_l1_batch, root_hash) = tokio::time::timeout(RUN_TIMEOUT, delay_rx.recv()) .await .expect("metadata calculator shut down prematurely") .unwrap(); - if block_count == 16 { + if next_l1_batch == L1BatchNumber(16) { stop_sx.send(true).unwrap(); // Shut down the calculator. break root_hash; } @@ -192,45 +223,15 @@ async fn running_metadata_calculator_with_additional_blocks( assert_eq!(root_hash_for_full_tree, updated_root_hash); } -#[db_test] -async fn throttling_tree(pool: ConnectionPool, prover_pool: ConnectionPool) { - let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (mut db_config, operation_config, eth) = create_config(temp_dir.path()); - db_config.new_merkle_tree_throttle_ms = 100; - let mut calculator = setup_calculator_with_options( - &db_config, - &operation_config, - ð, - &pool, - MetadataCalculatorModeConfig::Lightweight, - ) - .await; - let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); - calculator.throttler.delay_notifier = delay_sx; - reset_db_state(&pool, 5).await; - - let start = Instant::now(); - run_calculator(calculator, pool, prover_pool).await; - let elapsed = start.elapsed(); - assert!(elapsed >= Duration::from_millis(100), "{:?}", elapsed); - - // Throttling should be enabled only once, when we have no more blocks to process - let (block_count, _) = delay_rx.try_recv().unwrap(); - assert_eq!(block_count, 6); - delay_rx.try_recv().unwrap_err(); -} - #[db_test] async fn shutting_down_calculator(pool: ConnectionPool, prover_pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (mut db_config, mut operation_config, eth) = create_config(temp_dir.path()); + let (db_config, mut operation_config) = create_config(temp_dir.path()); operation_config.delay_interval = 30_000; // ms; chosen to be larger than `RUN_TIMEOUT` - db_config.new_merkle_tree_throttle_ms = 30_000; let calculator = setup_calculator_with_options( &db_config, &operation_config, - ð, &pool, MetadataCalculatorModeConfig::Lightweight, ) @@ -247,38 +248,134 @@ async fn shutting_down_calculator(pool: ConnectionPool, prover_pool: ConnectionP .unwrap(); } +async fn test_postgres_backup_recovery( + pool: ConnectionPool, + prover_pool: ConnectionPool, + sleep_between_batches: bool, + insert_batch_without_metadata: bool, +) { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; + reset_db_state(&pool, 5).await; + run_calculator(calculator, pool.clone(), prover_pool.clone()).await; + + // Simulate recovery from a DB snapshot in which some newer L1 batches are erased. + let last_batch_after_recovery = L1BatchNumber(3); + let mut storage = pool.access_storage().await; + let removed_batches = remove_l1_batches(&mut storage, last_batch_after_recovery).await; + + if insert_batch_without_metadata { + let batches_without_metadata = + remove_l1_batches(&mut storage, last_batch_after_recovery - 1).await; + let [batch_without_metadata] = batches_without_metadata.as_slice() else { + unreachable!() + }; + // Re-insert the last batch without metadata immediately. + storage + .blocks_dal() + .insert_l1_batch(batch_without_metadata, &[], BlockGasCount::default()) + .await; + insert_initial_writes_for_batch(&mut storage, batch_without_metadata.number).await; + } + drop(storage); + + let mut calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; + let (stop_sx, stop_rx) = watch::channel(false); + let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); + calculator.delayer.delay_notifier = delay_sx; + + let calculator_handle = tokio::spawn(calculator.run(pool.clone(), prover_pool, stop_rx)); + // Wait until the calculator has processed initial L1 batches. + let (next_l1_batch, _) = tokio::time::timeout(RUN_TIMEOUT, delay_rx.recv()) + .await + .expect("metadata calculator timed out after recovery") + .unwrap(); + assert_eq!(next_l1_batch, last_batch_after_recovery + 1); + + // Re-insert L1 batches to the storage after recovery. + let mut storage = pool.access_storage().await; + for batch_header in &removed_batches { + storage + .blocks_dal() + .insert_l1_batch(batch_header, &[], BlockGasCount::default()) + .await; + insert_initial_writes_for_batch(&mut storage, batch_header.number).await; + if sleep_between_batches { + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + drop(storage); + + // Wait until these batches are processed. + loop { + let (next_l1_batch, _) = tokio::time::timeout(RUN_TIMEOUT, delay_rx.recv()) + .await + .expect("metadata calculator shut down prematurely") + .unwrap(); + if next_l1_batch == L1BatchNumber(6) { + stop_sx.send(true).unwrap(); // Shut down the calculator. + break; + } + } + tokio::time::timeout(RUN_TIMEOUT, calculator_handle) + .await + .expect("timed out waiting for calculator") + .unwrap(); +} + +#[db_test] +async fn postgres_backup_recovery(pool: ConnectionPool, prover_pool: ConnectionPool) { + test_postgres_backup_recovery(pool, prover_pool, false, false).await; +} + +#[db_test] +async fn postgres_backup_recovery_with_delay_between_batches( + pool: ConnectionPool, + prover_pool: ConnectionPool, +) { + test_postgres_backup_recovery(pool, prover_pool, true, false).await; +} + +#[db_test] +async fn postgres_backup_recovery_with_excluded_metadata( + pool: ConnectionPool, + prover_pool: ConnectionPool, +) { + test_postgres_backup_recovery(pool, prover_pool, false, true).await; +} + async fn setup_calculator( db_path: &Path, pool: &ConnectionPool, ) -> (MetadataCalculator, Box) { let store_factory = &ObjectStoreFactory::mock(); - let (db_config, operation_manager, eth) = create_config(db_path); + let (db_config, operation_manager) = create_config(db_path); let mode = MetadataCalculatorModeConfig::Full { store_factory }; let calculator = - setup_calculator_with_options(&db_config, &operation_manager, ð, pool, mode).await; + setup_calculator_with_options(&db_config, &operation_manager, pool, mode).await; (calculator, store_factory.create_store().await) } async fn setup_lightweight_calculator(db_path: &Path, pool: &ConnectionPool) -> MetadataCalculator { let mode = MetadataCalculatorModeConfig::Lightweight; - let (db_config, operation_config, eth) = create_config(db_path); - setup_calculator_with_options(&db_config, &operation_config, ð, pool, mode).await + let (db_config, operation_config) = create_config(db_path); + setup_calculator_with_options(&db_config, &operation_config, pool, mode).await } -fn create_config(db_path: &Path) -> (DBConfig, OperationsManagerConfig, NetworkConfig) { +fn create_config(db_path: &Path) -> (DBConfig, OperationsManagerConfig) { let mut db_config = DBConfig::from_env(); - let mut operation_config = OperationsManagerConfig::from_env(); - let eth_config = NetworkConfig::from_env(); - operation_config.delay_interval = 50; // ms - db_config.new_merkle_tree_ssd_path = path_to_string(&db_path.join("new")); + db_config.merkle_tree.path = path_to_string(&db_path.join("new")); db_config.backup_interval_ms = 0; - (db_config, operation_config, eth_config) + + let operation_config = OperationsManagerConfig { + delay_interval: 50, // ms + }; + (db_config, operation_config) } async fn setup_calculator_with_options( db_config: &DBConfig, operation_config: &OperationsManagerConfig, - eth: &NetworkConfig, pool: &ConnectionPool, mode: MetadataCalculatorModeConfig<'_>, ) -> MetadataCalculator { @@ -286,27 +383,24 @@ async fn setup_calculator_with_options( MetadataCalculatorConfig::for_main_node(db_config, operation_config, mode); let metadata_calculator = MetadataCalculator::new(&calculator_config).await; - let mut storage = pool.access_storage_tagged("metadata_calculator").await; + let mut storage = pool.access_storage().await; if storage.blocks_dal().is_genesis_needed().await { - let chain_id = L2ChainId(eth.zksync_network_id); + let chain_id = L2ChainId(270); let base_system_contracts = BaseSystemContracts::load_from_disk(); - let block_commitment = BlockCommitment::new( - vec![], - 0, - Default::default(), - vec![], - vec![], - base_system_contracts.bootloader.hash, - base_system_contracts.default_aa.hash, - ); - - let fee_address = Address::repeat_byte(0x01); - create_genesis_block(&mut storage, fee_address, chain_id, base_system_contracts).await; - save_genesis_block_metadata( + let system_contracts = get_system_smart_contracts(); + let first_validator = Address::repeat_byte(0x01); + let first_l1_verifier_config = L1VerifierConfig::default(); + let first_verifier_address = Address::zero(); + ensure_genesis_state( &mut storage, - &block_commitment, - metadata_calculator.updater.tree().root_hash(), - 1, + chain_id, + &GenesisParams { + first_validator, + base_system_contracts, + system_contracts, + first_l1_verifier_config, + first_verifier_address, + }, ) .await; } @@ -326,7 +420,7 @@ async fn run_calculator( let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); calculator.delayer.delay_notifier = delay_sx; let delayer_handle = tokio::spawn(async move { - // Wait until the calculator has processed all initially available blocks, + // Wait until the calculator has processed all initially available L1 batches, // then stop it via signal. let (_, root_hash) = delay_rx .recv() @@ -340,9 +434,9 @@ async fn run_calculator( delayer_handle.await.unwrap() } -async fn reset_db_state(pool: &ConnectionPool, num_blocks: usize) { - let mut storage = pool.access_storage_tagged("metadata_calculator").await; - // Drops all blocks (except the block with number = 0) and their storage logs. +pub(super) async fn reset_db_state(pool: &ConnectionPool, num_batches: usize) { + let mut storage = pool.access_storage().await; + // Drops all L1 batches (except the L1 batch with number 0) and their storage logs. storage .storage_logs_dal() .rollback_storage_logs(MiniblockNumber(0)) @@ -356,24 +450,25 @@ async fn reset_db_state(pool: &ConnectionPool, num_blocks: usize) { .delete_l1_batches(L1BatchNumber(0)) .await; - let logs = gen_storage_logs(0..100, num_blocks); + let logs = gen_storage_logs(0..100, num_batches); extend_db_state(&mut storage, logs).await; } -async fn extend_db_state( +pub(super) async fn extend_db_state( storage: &mut StorageProcessor<'_>, new_logs: impl IntoIterator>, ) { - let next_block = storage.blocks_dal().get_sealed_block_number().await.0 + 1; + let next_l1_batch = storage.blocks_dal().get_sealed_l1_batch_number().await.0 + 1; let base_system_contracts = BaseSystemContracts::load_from_disk(); - for (idx, block_logs) in (next_block..).zip(new_logs) { - let block_number = L1BatchNumber(idx); + for (idx, batch_logs) in (next_l1_batch..).zip(new_logs) { + let batch_number = L1BatchNumber(idx); let mut header = L1BatchHeader::new( - block_number, + batch_number, 0, Address::default(), base_system_contracts.hashes(), + Default::default(), ); header.is_finished = true; @@ -389,11 +484,12 @@ async fn extend_db_state( l1_gas_price: 0, l2_fair_gas_price: 0, base_system_contracts_hashes: base_system_contracts.hashes(), + protocol_version: Some(Default::default()), }; storage .blocks_dal() - .insert_l1_batch(&header, BlockGasCount::default()) + .insert_l1_batch(&header, &[], BlockGasCount::default()) .await; storage .blocks_dal() @@ -401,16 +497,51 @@ async fn extend_db_state( .await; storage .storage_logs_dal() - .insert_storage_logs(miniblock_number, &[(H256::zero(), block_logs)]) + .insert_storage_logs(miniblock_number, &[(H256::zero(), batch_logs)]) .await; storage .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(block_number) + .mark_miniblocks_as_executed_in_l1_batch(batch_number) .await; + insert_initial_writes_for_batch(storage, batch_number).await; } } -fn gen_storage_logs(indices: ops::Range, num_blocks: usize) -> Vec> { +async fn insert_initial_writes_for_batch( + connection: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, +) { + let written_non_zero_slots: Vec<_> = connection + .storage_logs_dal() + .get_touched_slots_for_l1_batch(l1_batch_number) + .await + .into_iter() + .filter_map(|(key, value)| (!value.is_zero()).then_some(key)) + .collect(); + let hashed_keys: Vec<_> = written_non_zero_slots + .iter() + .map(|key| key.hashed_key()) + .collect(); + let pre_written_slots = connection + .storage_logs_dedup_dal() + .filter_written_slots(&hashed_keys) + .await; + + let keys_to_insert: Vec<_> = written_non_zero_slots + .into_iter() + .sorted() + .filter(|key| !pre_written_slots.contains(&key.hashed_key())) + .collect(); + connection + .storage_logs_dedup_dal() + .insert_initial_writes(l1_batch_number, &keys_to_insert) + .await; +} + +pub(super) fn gen_storage_logs( + indices: ops::Range, + num_batches: usize, +) -> Vec> { // Addresses and keys of storage logs must be sorted for the `multi_block_workflow` test. let mut accounts = [ "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2", @@ -439,7 +570,127 @@ fn gen_storage_logs(indices: ops::Range, num_blocks: usize) -> Vec::to_vec) .collect() } + +async fn remove_l1_batches( + storage: &mut StorageProcessor<'_>, + last_l1_batch_to_keep: L1BatchNumber, +) -> Vec { + let sealed_l1_batch_number = storage.blocks_dal().get_sealed_l1_batch_number().await; + assert!(sealed_l1_batch_number >= last_l1_batch_to_keep); + + let mut batch_headers = vec![]; + for batch_number in (last_l1_batch_to_keep.0 + 1)..=sealed_l1_batch_number.0 { + let header = storage + .blocks_dal() + .get_l1_batch_header(L1BatchNumber(batch_number)) + .await; + batch_headers.push(header.unwrap()); + } + + storage + .blocks_dal() + .delete_l1_batches(last_l1_batch_to_keep) + .await; + batch_headers +} + +#[db_test] +async fn deduplication_works_as_expected(pool: ConnectionPool) { + let mut storage = pool.access_storage().await; + + let first_validator = Address::repeat_byte(0x01); + let base_system_contracts = BaseSystemContracts::load_from_disk(); + let system_contracts = get_system_smart_contracts(); + let first_l1_verifier_config = L1VerifierConfig::default(); + let first_verifier_address = Address::zero(); + ensure_genesis_state( + &mut storage, + L2ChainId(270), + &GenesisParams { + first_validator, + base_system_contracts, + system_contracts, + first_l1_verifier_config, + first_verifier_address, + }, + ) + .await; + + let logs = gen_storage_logs(100..120, 1).pop().unwrap(); + let hashed_keys: Vec<_> = logs.iter().map(|log| log.key.hashed_key()).collect(); + extend_db_state(&mut storage, [logs.clone()]).await; + + let initial_writes = storage + .storage_logs_dal() + .get_l1_batches_for_initial_writes(&hashed_keys) + .await; + assert_eq!(initial_writes.len(), hashed_keys.len()); + assert!(initial_writes + .values() + .all(|&batch| batch == L1BatchNumber(1))); + + let mut new_logs = gen_storage_logs(120..140, 1).pop().unwrap(); + let new_hashed_keys: Vec<_> = new_logs.iter().map(|log| log.key.hashed_key()).collect(); + let updated_logs = logs.into_iter().step_by(2).map(|mut log| { + log.value = H256::zero(); + log + }); + new_logs.extend(updated_logs); + extend_db_state(&mut storage, [new_logs]).await; + + // Initial writes for previously inserted keys should not change. + let initial_writes = storage + .storage_logs_dal() + .get_l1_batches_for_initial_writes(&hashed_keys) + .await; + assert_eq!(initial_writes.len(), hashed_keys.len()); + assert!(initial_writes + .values() + .all(|&batch| batch == L1BatchNumber(1))); + + let initial_writes = storage + .storage_logs_dal() + .get_l1_batches_for_initial_writes(&new_hashed_keys) + .await; + assert_eq!(initial_writes.len(), new_hashed_keys.len()); + assert!(initial_writes + .values() + .all(|&batch| batch == L1BatchNumber(2))); + + let mut no_op_logs = gen_storage_logs(140..160, 1).pop().unwrap(); + let no_op_hashed_keys: Vec<_> = no_op_logs.iter().map(|log| log.key.hashed_key()).collect(); + for log in &mut no_op_logs { + log.value = H256::zero(); + } + extend_db_state(&mut storage, [no_op_logs.clone()]).await; + + let initial_writes = storage + .storage_logs_dal() + .get_l1_batches_for_initial_writes(&no_op_hashed_keys) + .await; + assert!(initial_writes.is_empty()); + + let updated_logs: Vec<_> = no_op_logs + .iter() + .step_by(2) + .map(|log| StorageLog { + value: H256::repeat_byte(0x11), + ..*log + }) + .collect(); + no_op_logs.extend_from_slice(&updated_logs); + extend_db_state(&mut storage, [no_op_logs]).await; + + let initial_writes = storage + .storage_logs_dal() + .get_l1_batches_for_initial_writes(&no_op_hashed_keys) + .await; + assert_eq!(initial_writes.len(), no_op_hashed_keys.len() / 2); + for key in no_op_hashed_keys.iter().step_by(2) { + assert_eq!(initial_writes[key], L1BatchNumber(4)); + } +} diff --git a/core/bin/zksync_core/src/metadata_calculator/updater.rs b/core/bin/zksync_core/src/metadata_calculator/updater.rs index 8b02e85de754..21d937f846e2 100644 --- a/core/bin/zksync_core/src/metadata_calculator/updater.rs +++ b/core/bin/zksync_core/src/metadata_calculator/updater.rs @@ -1,196 +1,205 @@ //! Tree updater trait and its implementations. +use futures::{future, FutureExt}; use tokio::sync::watch; -use std::time::Instant; +use std::{ops, time::Instant}; +use zksync_config::configs::database::MerkleTreeMode; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_merkle_tree::domain::ZkSyncTree; +use zksync_health_check::HealthUpdater; +use zksync_merkle_tree::domain::TreeMetadata; use zksync_object_store::ObjectStore; -use zksync_storage::{db::NamedColumnFamily, RocksDB}; -use zksync_types::{block::WitnessBlockWithLogs, L1BatchNumber}; +use zksync_types::{block::L1BatchHeader, writes::InitialStorageWrite, L1BatchNumber, U256}; use super::{ - get_logs_for_l1_batch, - helpers::{AsyncTree, Delayer}, - metrics::TreeUpdateStage, - MetadataCalculator, MetadataCalculatorMode, MetadataCalculatorStatus, + helpers::{AsyncTree, Delayer, L1BatchWithLogs, TreeHealthCheckDetails}, + metrics::{ReportStage, TreeUpdateStage}, + MetadataCalculator, MetadataCalculatorConfig, }; #[derive(Debug)] pub(super) struct TreeUpdater { - mode: MetadataCalculatorMode, + mode: MerkleTreeMode, tree: AsyncTree, - max_block_batch: usize, + max_l1_batches_per_iter: usize, object_store: Option>, } impl TreeUpdater { - pub fn new( - mode: MetadataCalculatorMode, - db_path: &str, - max_block_batch: usize, + pub async fn new( + mode: MerkleTreeMode, + config: &MetadataCalculatorConfig<'_>, object_store: Option>, ) -> Self { assert!( - max_block_batch > 0, - "Maximum block batch is misconfigured to be 0; please update it to positive value" + config.max_l1_batches_per_iter > 0, + "Maximum L1 batches per iteration is misconfigured to be 0; please update it to positive value" ); - let db = Self::create_db(db_path); - let tree = AsyncTree::new(match mode { - MetadataCalculatorMode::Full => ZkSyncTree::new(db), - MetadataCalculatorMode::Lightweight => ZkSyncTree::new_lightweight(db), - }); - + let db_path = config.db_path.into(); + let tree = AsyncTree::new( + db_path, + mode, + config.multi_get_chunk_size, + config.block_cache_capacity, + ) + .await; Self { mode, tree, - max_block_batch, + max_l1_batches_per_iter: config.max_l1_batches_per_iter, object_store, } } - fn create_db(path: &str) -> RocksDB { - let db = RocksDB::new(path, true); - if cfg!(test) { - // We need sync writes for the unit tests to execute reliably. With the default config, - // some writes to RocksDB may occur, but not be visible to the test code. - db.with_sync_writes() - } else { - db - } - } - #[cfg(test)] pub fn tree(&self) -> &AsyncTree { &self.tree } - pub fn mode(&self) -> MetadataCalculatorMode { - self.mode + async fn process_l1_batch( + &mut self, + l1_batch: L1BatchWithLogs, + ) -> (L1BatchHeader, TreeMetadata, Option) { + let compute_latency = TreeUpdateStage::Compute.start(); + let mut metadata = self.tree.process_l1_batch(l1_batch.storage_logs).await; + compute_latency.report(); + + let witness_input = metadata.witness.take(); + let l1_batch_number = l1_batch.header.number; + let object_key = if let Some(object_store) = &self.object_store { + let witness_input = + witness_input.expect("No witness input provided by tree; this is a bug"); + let save_witnesses_latency = TreeUpdateStage::SaveWitnesses.start(); + let object_key = object_store + .put(l1_batch_number, &witness_input) + .await + .unwrap(); + save_witnesses_latency.report(); + + vlog::info!( + "Saved witnesses for L1 batch #{l1_batch_number} to object storage at `{object_key}`" + ); + Some(object_key) + } else { + None + }; + + (l1_batch.header, metadata, object_key) } - async fn process_multiple_blocks( + /// Processes a range of L1 batches with a single flushing of the tree updates to RocksDB at the end. + /// This allows to save on RocksDB I/O ops. + /// + /// Returns the number of the next L1 batch to be processed by the tree. + /// + /// # Implementation details + /// + /// We load L1 batch data from Postgres in parallel with updating the tree. (Naturally, we need to load + /// the first L1 batch data beforehand.) This allows saving some time if we actually process + /// multiple L1 batches at once (e.g., during the initial tree syncing), and if loading data from Postgres + /// is slow for whatever reason. + async fn process_multiple_batches( &mut self, storage: &mut StorageProcessor<'_>, prover_storage: &mut StorageProcessor<'_>, - blocks: Vec, - ) { + l1_batch_numbers: ops::RangeInclusive, + ) -> L1BatchNumber { let start = Instant::now(); + vlog::info!("Processing L1 batches #{l1_batch_numbers:?}"); + let first_l1_batch_number = L1BatchNumber(*l1_batch_numbers.start()); + let last_l1_batch_number = L1BatchNumber(*l1_batch_numbers.end()); + let mut l1_batch_data = L1BatchWithLogs::new(storage, first_l1_batch_number).await; - let compute_latency = TreeUpdateStage::Compute.start(); - let total_logs: usize = blocks.iter().map(|block| block.storage_logs.len()).sum(); - if let (Some(first), Some(last)) = (blocks.first(), blocks.last()) { - let l1_batch_numbers = first.header.number.0..=last.header.number.0; - vlog::info!("Processing L1 batches #{l1_batch_numbers:?} with {total_logs} total logs"); - }; - - let (storage_logs, block_headers): (Vec<_>, Vec<_>) = blocks - .into_iter() - .map(|block| (block.storage_logs, block.header)) - .unzip(); let mut previous_root_hash = self.tree.root_hash(); - let metadata = self.tree.process_blocks(storage_logs).await; - compute_latency.report(); + let mut total_logs = 0; + let mut updated_headers = vec![]; + for l1_batch_number in l1_batch_numbers { + let l1_batch_number = L1BatchNumber(l1_batch_number); + let Some(current_l1_batch_data) = l1_batch_data else { + return l1_batch_number; + }; + total_logs += current_l1_batch_data.storage_logs.len(); + + let process_l1_batch_task = self.process_l1_batch(current_l1_batch_data); + let load_next_l1_batch_task = async { + if l1_batch_number < last_l1_batch_number { + L1BatchWithLogs::new(storage, l1_batch_number + 1).await + } else { + None // Don't need to load the next L1 batch after the last one we're processing. + } + }; + let ((header, metadata, object_key), next_l1_batch_data) = + future::join(process_l1_batch_task, load_next_l1_batch_task).await; - let mut updated_headers = Vec::with_capacity(block_headers.len()); - for (mut metadata_at_block, block_header) in metadata.into_iter().zip(block_headers) { let prepare_results_latency = TreeUpdateStage::PrepareResults.start(); - let witness_input = metadata_at_block.witness.take(); - - let next_root_hash = metadata_at_block.root_hash; - let metadata = - MetadataCalculator::build_block_metadata(metadata_at_block, &block_header); + Self::check_initial_writes_consistency( + storage, + header.number, + &metadata.initial_writes, + ) + .await; + let metadata = MetadataCalculator::build_l1_batch_metadata(metadata, &header); prepare_results_latency.report(); - let block_with_metadata = - MetadataCalculator::reestimate_block_commit_gas(storage, block_header, metadata) - .await; - let block_number = block_with_metadata.header.number; - - let object_key = if let Some(object_store) = &self.object_store { - let witness_input = - witness_input.expect("No witness input provided by tree; this is a bug"); - let save_witnesses_latency = TreeUpdateStage::SaveWitnesses.start(); - let object_key = object_store - .put(block_number, &witness_input) - .await - .unwrap(); - save_witnesses_latency.report(); - - vlog::info!( - "Saved witnesses for L1 batch #{block_number} to object storage at `{object_key}`" - ); - Some(object_key) - } else { - None - }; + MetadataCalculator::reestimate_l1_batch_commit_gas(storage, &header, &metadata).await; - // Save the metadata in case the lightweight tree is behind / not running - let metadata = &block_with_metadata.metadata; let save_postgres_latency = TreeUpdateStage::SavePostgres.start(); storage .blocks_dal() - .save_blocks_metadata(block_number, metadata, previous_root_hash) + .save_l1_batch_metadata(l1_batch_number, &metadata, previous_root_hash) .await; - // ^ Note that `save_blocks_metadata()` will not blindly overwrite changes if the block + // ^ Note that `save_l1_batch_metadata()` will not blindly overwrite changes if L1 batch // metadata already exists; instead, it'll check that the old an new metadata match. - // That is, if we run both tree implementations, we'll get metadata correspondence + // That is, if we run multiple tree instances, we'll get metadata correspondence // right away without having to implement dedicated code. if let Some(object_key) = &object_key { prover_storage .witness_generator_dal() - .save_witness_inputs(block_number, object_key) + .save_witness_inputs(l1_batch_number, object_key) .await; prover_storage .fri_witness_generator_dal() - .save_witness_inputs(block_number, object_key) + .save_witness_inputs(l1_batch_number, object_key) .await; } save_postgres_latency.report(); - vlog::info!("Updated metadata for L1 batch #{block_number} in Postgres"); + vlog::info!("Updated metadata for L1 batch #{l1_batch_number} in Postgres"); - previous_root_hash = next_root_hash; - updated_headers.push(block_with_metadata.header); + previous_root_hash = metadata.merkle_root_hash; + updated_headers.push(header); + l1_batch_data = next_l1_batch_data; } let save_rocksdb_latency = TreeUpdateStage::SaveRocksDB.start(); self.tree.save().await; save_rocksdb_latency.report(); MetadataCalculator::update_metrics(self.mode, &updated_headers, total_logs, start); + + last_l1_batch_number + 1 } async fn step( &mut self, mut storage: StorageProcessor<'_>, mut prover_storage: StorageProcessor<'_>, - next_block_to_seal: &mut L1BatchNumber, + next_l1_batch_to_seal: &mut L1BatchNumber, ) { - let load_changes_latency = TreeUpdateStage::LoadChanges.start(); - let last_sealed_block = storage.blocks_dal().get_sealed_block_number().await; - let last_requested_block = next_block_to_seal.0 + self.max_block_batch as u32 - 1; - let last_requested_block = last_requested_block.min(last_sealed_block.0); - let block_numbers = next_block_to_seal.0..=last_requested_block; - if block_numbers.is_empty() { + let last_sealed_l1_batch = storage.blocks_dal().get_sealed_l1_batch_number().await; + let last_requested_l1_batch = + next_l1_batch_to_seal.0 + self.max_l1_batches_per_iter as u32 - 1; + let last_requested_l1_batch = last_requested_l1_batch.min(last_sealed_l1_batch.0); + let l1_batch_numbers = next_l1_batch_to_seal.0..=last_requested_l1_batch; + if l1_batch_numbers.is_empty() { vlog::trace!( - "No blocks to seal: block numbers range to be loaded {block_numbers:?} is empty" + "No L1 batches to seal: batch numbers range to be loaded {l1_batch_numbers:?} is empty" ); } else { - vlog::info!("Loading blocks with numbers {block_numbers:?} to update Merkle tree"); - } - - let mut new_blocks = vec![]; - for block_number in block_numbers { - let logs = get_logs_for_l1_batch(&mut storage, L1BatchNumber(block_number)).await; - new_blocks.extend(logs); - } - load_changes_latency.report(); - - if let Some(last_block) = new_blocks.last() { - *next_block_to_seal = last_block.header.number + 1; - self.process_multiple_blocks(&mut storage, &mut prover_storage, new_blocks) + vlog::info!("Updating Merkle tree with L1 batches #{l1_batch_numbers:?}"); + *next_l1_batch_to_seal = self + .process_multiple_batches(&mut storage, &mut prover_storage, l1_batch_numbers) .await; } } @@ -199,44 +208,70 @@ impl TreeUpdater { pub async fn loop_updating_tree( mut self, delayer: Delayer, - throttler: Delayer, pool: &ConnectionPool, prover_pool: &ConnectionPool, mut stop_receiver: watch::Receiver, - status_sender: watch::Sender, + health_updater: HealthUpdater, ) { let mut storage = pool.access_storage_tagged("metadata_calculator").await; // Ensure genesis creation let tree = &mut self.tree; if tree.is_empty() { - let Some(logs) = get_logs_for_l1_batch(&mut storage, L1BatchNumber(0)).await else { - panic!("Missing storage logs for the genesis block"); + let Some(logs) = L1BatchWithLogs::new(&mut storage, L1BatchNumber(0)).await else { + panic!("Missing storage logs for the genesis L1 batch"); }; - tree.process_block(logs.storage_logs).await; + tree.process_l1_batch(logs.storage_logs).await; tree.save().await; } - let mut next_block_to_seal = L1BatchNumber(tree.block_number()); + let mut next_l1_batch_to_seal = tree.next_l1_batch_number(); - let current_db_block = storage.blocks_dal().get_sealed_block_number().await + 1; - let last_block_number_with_metadata = storage + let current_db_batch = storage.blocks_dal().get_sealed_l1_batch_number().await; + let last_l1_batch_with_metadata = storage .blocks_dal() - .get_last_block_number_with_metadata() - .await - + 1; + .get_last_l1_batch_number_with_metadata() + .await; drop(storage); vlog::info!( - "Initialized metadata calculator with {max_block_batch} max batch size. \ - Current RocksDB block: {next_block_to_seal}, current Postgres block: {current_db_block}, \ - last block with metadata: {last_block_number_with_metadata}", - max_block_batch = self.max_block_batch - ); - metrics::gauge!( - "server.metadata_calculator.backup_lag", - (last_block_number_with_metadata - *next_block_to_seal).0 as f64 + "Initialized metadata calculator with {max_batches_per_iter} max L1 batches per iteration. \ + Next L1 batch for Merkle tree: {next_l1_batch_to_seal}, current Postgres L1 batch: {current_db_batch}, \ + last L1 batch with metadata: {last_l1_batch_with_metadata}", + max_batches_per_iter = self.max_l1_batches_per_iter ); - status_sender.send_replace(MetadataCalculatorStatus::Ready); + let backup_lag = + (last_l1_batch_with_metadata.0 + 1).saturating_sub(next_l1_batch_to_seal.0); + metrics::gauge!("server.metadata_calculator.backup_lag", backup_lag as f64); + + let health = TreeHealthCheckDetails { + mode: self.mode, + next_l1_batch_to_seal, + }; + health_updater.update(health.into()); + + if next_l1_batch_to_seal > last_l1_batch_with_metadata + 1 { + // Check stop signal before proceeding with a potentially time-consuming operation. + if *stop_receiver.borrow_and_update() { + vlog::info!("Stop signal received, metadata_calculator is shutting down"); + return; + } + + vlog::warn!( + "Next L1 batch of the tree ({next_l1_batch_to_seal}) is greater than last L1 batch with metadata in Postgres \ + ({last_l1_batch_with_metadata}); this may be a result of restoring Postgres from a snapshot. \ + Truncating Merkle tree versions so that this mismatch is fixed..." + ); + tree.revert_logs(last_l1_batch_with_metadata); + tree.save().await; + next_l1_batch_to_seal = tree.next_l1_batch_number(); + vlog::info!("Truncated Merkle tree to L1 batch #{next_l1_batch_to_seal}"); + + let health = TreeHealthCheckDetails { + mode: self.mode, + next_l1_batch_to_seal, + }; + health_updater.update(health.into()); + } loop { if *stop_receiver.borrow_and_update() { @@ -248,21 +283,26 @@ impl TreeUpdater { .access_storage_tagged("metadata_calculator") .await; - let next_block_snapshot = *next_block_to_seal; - self.step(storage, prover_storage, &mut next_block_to_seal) + let snapshot = *next_l1_batch_to_seal; + self.step(storage, prover_storage, &mut next_l1_batch_to_seal) .await; - let delay = if next_block_snapshot == *next_block_to_seal { + let delay = if snapshot == *next_l1_batch_to_seal { vlog::trace!( - "Metadata calculator (next L1 batch: #{next_block_to_seal}) \ + "Metadata calculator (next L1 batch: #{next_l1_batch_to_seal}) \ didn't make any progress; delaying it using {delayer:?}" ); - delayer.wait(&self.tree) + delayer.wait(&self.tree).left_future() } else { + let health = TreeHealthCheckDetails { + mode: self.mode, + next_l1_batch_to_seal, + }; + health_updater.update(health.into()); + vlog::trace!( - "Metadata calculator (next L1 batch: #{next_block_to_seal}) \ - made progress from #{next_block_snapshot}; throttling it using {throttler:?}" + "Metadata calculator (next L1 batch: #{next_l1_batch_to_seal}) made progress from #{snapshot}" ); - throttler.wait(&self.tree) + future::ready(()).right_future() }; // The delays we're operating with are reasonably small, but selecting between the delay @@ -275,5 +315,38 @@ impl TreeUpdater { () = delay => { /* The delay has passed */ } } } + drop(health_updater); // Explicitly mark where the updater should be dropped + } + + async fn check_initial_writes_consistency( + connection: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + tree_initial_writes: &[InitialStorageWrite], + ) { + let pg_initial_writes: Vec<_> = connection + .storage_logs_dedup_dal() + .initial_writes_for_batch(l1_batch_number) + .await; + + let pg_initial_writes: Option> = pg_initial_writes + .into_iter() + .map(|(key, index)| { + let key = U256::from_little_endian(key.as_bytes()); + Some((key, index?)) + }) + .collect(); + let Some(pg_initial_writes) = pg_initial_writes else { + vlog::info!("Skipping indices consistency check as they are missing in Postgres for L1 batch {l1_batch_number}"); + return; + }; + + let tree_initial_writes: Vec<_> = tree_initial_writes + .iter() + .map(|write| (write.key, write.index)) + .collect(); + assert_eq!( + pg_initial_writes, tree_initial_writes, + "Leaf indices are not consistent for L1 batch {l1_batch_number}" + ); } } diff --git a/core/bin/zksync_core/src/proof_data_handler/mod.rs b/core/bin/zksync_core/src/proof_data_handler/mod.rs new file mode 100644 index 000000000000..7c9c02bbad4a --- /dev/null +++ b/core/bin/zksync_core/src/proof_data_handler/mod.rs @@ -0,0 +1,62 @@ +use axum::extract::Path; +use axum::{routing::post, Json, Router}; +use std::net::SocketAddr; +use tokio::sync::watch; + +use zksync_config::configs::ProofDataHandlerConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStore; +use zksync_types::prover_server_api::{ProofGenerationDataRequest, SubmitProofRequest}; + +use crate::proof_data_handler::request_processor::RequestProcessor; + +mod request_processor; + +pub(crate) async fn run_server( + config: ProofDataHandlerConfig, + blob_store: Box, + pool: ConnectionPool, + mut stop_receiver: watch::Receiver, +) { + let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); + vlog::debug!("Starting proof data handler server on {bind_address}"); + + let get_proof_gen_processor = + RequestProcessor::new(blob_store, pool, config.proof_generation_timeout()); + let submit_proof_processor = get_proof_gen_processor.clone(); + let app = Router::new() + .route( + "/proof_generation_data", + post( + // we use post method because the returned data is not idempotent, + // i.e we return different result on each call. + move |payload: Json| async move { + get_proof_gen_processor + .get_proof_generation_data(payload) + .await + }, + ), + ) + .route( + "/submit_proof/:l1_batch_number", + post( + move |l1_batch_number: Path, payload: Json| async move { + submit_proof_processor + .submit_proof(l1_batch_number, payload) + .await + }, + ), + ); + + axum::Server::bind(&bind_address) + .serve(app.into_make_service()) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + vlog::warn!("Stop signal sender for proof data handler server was dropped without sending a signal"); + } + vlog::info!("Stop signal received, proof data handler server is shutting down"); + }) + .await + .expect("Proof data handler server failed"); + vlog::info!("Proof data handler server shut down"); +} diff --git a/core/bin/zksync_core/src/proof_data_handler/request_processor.rs b/core/bin/zksync_core/src/proof_data_handler/request_processor.rs new file mode 100644 index 000000000000..a868dd6106b4 --- /dev/null +++ b/core/bin/zksync_core/src/proof_data_handler/request_processor.rs @@ -0,0 +1,125 @@ +use std::sync::Arc; +use std::time::Duration; + +use axum::extract::Path; +use axum::response::Response; +use axum::{http::StatusCode, response::IntoResponse, Json}; + +use zksync_dal::{ConnectionPool, SqlxError}; +use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_types::prover_server_api::{ + ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, + SubmitProofRequest, SubmitProofResponse, +}; +use zksync_types::L1BatchNumber; + +#[derive(Clone)] +pub(crate) struct RequestProcessor { + blob_store: Arc, + pool: ConnectionPool, + proof_generation_timeout: Duration, +} + +pub(crate) enum RequestProcessorError { + NoPendingBatches, + ObjectStore(ObjectStoreError), + Sqlx(SqlxError), +} + +impl IntoResponse for RequestProcessorError { + fn into_response(self) -> Response { + let (status_code, message) = match self { + Self::NoPendingBatches => ( + StatusCode::NOT_FOUND, + "No pending batches to process".to_owned(), + ), + RequestProcessorError::ObjectStore(err) => { + vlog::error!("GCS error: {:?}", err); + ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from GCS".to_owned(), + ) + } + RequestProcessorError::Sqlx(err) => { + vlog::error!("Sqlx error: {:?}", err); + match err { + SqlxError::RowNotFound => { + (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) + } + _ => ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from db".to_owned(), + ), + } + } + }; + (status_code, message).into_response() + } +} + +impl RequestProcessor { + pub(crate) fn new( + blob_store: Box, + pool: ConnectionPool, + proof_generation_timeout: Duration, + ) -> Self { + Self { + blob_store: Arc::from(blob_store), + pool, + proof_generation_timeout, + } + } + + pub(crate) async fn get_proof_generation_data( + &self, + request: Json, + ) -> Result, RequestProcessorError> { + vlog::info!("Received request for proof generation data: {:?}", request); + + let l1_batch_number = self + .pool + .access_storage() + .await + .proof_generation_dal() + .get_next_block_to_be_proven(self.proof_generation_timeout) + .await + .ok_or(RequestProcessorError::NoPendingBatches)?; + + let blob = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let proof_gen_data = ProofGenerationData { + l1_batch_number, + data: blob, + }; + + Ok(Json(ProofGenerationDataResponse::Success(proof_gen_data))) + } + + pub(crate) async fn submit_proof( + &self, + Path(l1_batch_number): Path, + Json(payload): Json, + ) -> Result, RequestProcessorError> { + vlog::info!("Received proof for block number: {:?}", l1_batch_number); + let l1_batch_number = L1BatchNumber(l1_batch_number); + + let blob_url = self + .blob_store + .put(l1_batch_number, &payload.proof) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let mut storage = self.pool.access_storage().await; + storage + .proof_generation_dal() + .save_proof_artifacts_metadata(l1_batch_number, &blob_url) + .await + .map_err(RequestProcessorError::Sqlx)?; + + Ok(Json(SubmitProofResponse::Success)) + } +} diff --git a/core/bin/zksync_core/src/reorg_detector/mod.rs b/core/bin/zksync_core/src/reorg_detector/mod.rs index d49ed276cf81..78d5a842efc9 100644 --- a/core/bin/zksync_core/src/reorg_detector/mod.rs +++ b/core/bin/zksync_core/src/reorg_detector/mod.rs @@ -1,4 +1,5 @@ -use std::time::Duration; +use zksync_dal::ConnectionPool; +use zksync_types::L1BatchNumber; use zksync_web3_decl::{ jsonrpsee::core::Error as RpcError, jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, @@ -6,8 +7,7 @@ use zksync_web3_decl::{ RpcResult, }; -use zksync_dal::ConnectionPool; -use zksync_types::L1BatchNumber; +use std::{future::Future, time::Duration}; const SLEEP_INTERVAL: Duration = Duration::from_secs(5); @@ -40,36 +40,39 @@ impl ReorgDetector { } /// Compares root hashes of the latest local batch and of the same batch from the main node. - async fn root_hashes_match(&self, block_number: L1BatchNumber) -> RpcResult { + async fn root_hashes_match(&self, l1_batch_number: L1BatchNumber) -> RpcResult { // Unwrapping is fine since the caller always checks that these root hashes exist. let local_hash = self .pool .access_storage() .await .blocks_dal() - .get_block_state_root(block_number) + .get_l1_batch_state_root(l1_batch_number) .await .unwrap_or_else(|| { - panic!("Root hash does not exist for local batch #{}", block_number) + panic!( + "Root hash does not exist for local batch #{}", + l1_batch_number + ) }); let Some(hash) = self .client - .get_l1_batch_details(block_number) + .get_l1_batch_details(l1_batch_number) .await? - .and_then(|b| b.root_hash) - else { - // Due to reorg, locally we may be ahead of the main node. - // Lack of the root hash on the main node is treated as a hash mismatch, - // so we can continue searching for the last correct block. - return Ok(false); - }; + .and_then(|b| b.base.root_hash) + else { + // Due to reorg, locally we may be ahead of the main node. + // Lack of the root hash on the main node is treated as a hash mismatch, + // so we can continue searching for the last correct block. + return Ok(false); + }; Ok(hash == local_hash) } /// Localizes a reorg: performs binary search to determine the last non-diverged block. - async fn detect_reorg(&self, diverged_block: L1BatchNumber) -> RpcResult { - binary_search_with(1, diverged_block.0, |block_number| { - self.root_hashes_match(L1BatchNumber(block_number)) + async fn detect_reorg(&self, diverged_l1_batch: L1BatchNumber) -> RpcResult { + binary_search_with(1, diverged_l1_batch.0, |number| { + self.root_hashes_match(L1BatchNumber(number)) }) .await .map(L1BatchNumber) @@ -78,9 +81,9 @@ impl ReorgDetector { pub async fn run(self) -> L1BatchNumber { loop { match self.run_inner().await { - Ok(batch_number) => return batch_number, + Ok(l1_batch_number) => return l1_batch_number, Err(err @ RpcError::Transport(_) | err @ RpcError::RequestTimeout) => { - vlog::warn!("Following transport error occurred: {}", err); + vlog::warn!("Following transport error occurred: {err}"); vlog::info!("Trying again after a delay"); tokio::time::sleep(SLEEP_INTERVAL).await; } @@ -99,41 +102,41 @@ impl ReorgDetector { /// both on the main node and on the external node. async fn is_legally_ahead_of_main_node( &self, - sealed_block_number: L1BatchNumber, + sealed_l1_batch_number: L1BatchNumber, ) -> RpcResult { // We must know the latest batch on the main node *before* we ask it for a root hash // to prevent a race condition (asked for root hash, batch sealed on main node, we've got // inconsistent results). - let last_main_node_batch = self.client.get_l1_batch_number().await?; - let main_node_batch_root_hash = self + let last_main_node_l1_batch = self.client.get_l1_batch_number().await?; + let main_node_l1_batch_root_hash = self .client - .get_l1_batch_details(sealed_block_number) + .get_l1_batch_details(sealed_l1_batch_number) .await? - .and_then(|b| b.root_hash); + .and_then(|b| b.base.root_hash); - let en_ahead_for = sealed_block_number + let en_ahead_for = sealed_l1_batch_number .0 - .checked_sub(last_main_node_batch.as_u32()); + .checked_sub(last_main_node_l1_batch.as_u32()); // Theoretically it's possible that the EN would not only calculate the root hash, but also seal the batch // quicker than the main node. So, we allow us to be at most one batch ahead of the main node. // If the gap is bigger, it's certainly a reorg. // Allowing the gap is safe: if reorg has happened, it'll be detected anyway in the future iterations. - Ok(main_node_batch_root_hash.is_none() && en_ahead_for <= Some(1)) + Ok(main_node_l1_batch_root_hash.is_none() && en_ahead_for <= Some(1)) } async fn run_inner(&self) -> RpcResult { loop { - let sealed_block_number = self + let sealed_l1_batch_number = self .pool .access_storage() .await .blocks_dal() - .get_last_block_number_with_metadata() + .get_last_l1_batch_number_with_metadata() .await; // If the main node has to catch up with us, we should not do anything just yet. if self - .is_legally_ahead_of_main_node(sealed_block_number) + .is_legally_ahead_of_main_node(sealed_l1_batch_number) .await? { vlog::trace!( @@ -144,20 +147,23 @@ impl ReorgDetector { } // At this point we're certain that if we detect a reorg, it's real. - vlog::trace!("Checking for reorgs - batch number {}", sealed_block_number); - if self.root_hashes_match(sealed_block_number).await? { + vlog::trace!("Checking for reorgs - L1 batch #{sealed_l1_batch_number}"); + if self.root_hashes_match(sealed_l1_batch_number).await? { metrics::gauge!( "external_node.last_correct_batch", - sealed_block_number.0 as f64, + sealed_l1_batch_number.0 as f64, "component" => "reorg_detector", ); tokio::time::sleep(SLEEP_INTERVAL).await; } else { - vlog::warn!("Reorg detected: last state hash doesn't match the state hash from main node (batch #{sealed_block_number})"); + vlog::warn!( + "Reorg detected: last state hash doesn't match the state hash from main node \ + (L1 batch #{sealed_l1_batch_number})" + ); vlog::info!("Searching for the first diverged batch"); - let last_correct_block = self.detect_reorg(sealed_block_number).await?; - vlog::info!("Reorg localized: last correct batch is #{last_correct_block}",); - return Ok(last_correct_block); + let last_correct_l1_batch = self.detect_reorg(sealed_l1_batch_number).await?; + vlog::info!("Reorg localized: last correct L1 batch is #{last_correct_l1_batch}"); + return Ok(last_correct_l1_batch); } } } @@ -166,7 +172,7 @@ impl ReorgDetector { async fn binary_search_with(mut left: u32, mut right: u32, mut f: F) -> Result where F: FnMut(u32) -> Fut, - Fut: std::future::Future>, + Fut: Future>, { while left + 1 < right { let middle = (left + right) / 2; diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs index 0744229e9157..fce87a613338 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -6,17 +6,17 @@ use tokio::{ use std::{collections::HashSet, fmt, time::Instant}; +use multivm::{ + init_vm, init_vm_with_gas_limit, BlockProperties, OracleTools, VmInstance, VmVersion, +}; use vm::{ vm::{VmPartialExecutionResult, VmTxExecutionResult}, - vm_with_bootloader::{ - init_vm, init_vm_with_gas_limit, push_transaction_to_bootloader_memory, BootloaderJobType, - TxExecutionMode, - }, - HistoryEnabled, HistoryMode, TxRevertReason, VmBlockResult, VmInstance, + vm_with_bootloader::{BootloaderJobType, TxExecutionMode}, + TxRevertReason, VmBlockResult, }; use zksync_dal::ConnectionPool; use zksync_state::{RocksdbStorage, StorageView}; -use zksync_types::{tx::ExecutionMetrics, Transaction, U256}; +use zksync_types::{tx::ExecutionMetrics, L1BatchNumber, Transaction, U256}; use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; #[cfg(test)] @@ -59,6 +59,65 @@ impl TxExecutionResult { } } +/// Configuration for the MultiVM. +/// Currently, represents an ordered sequence of (min_batch_number, vm_version) entries, +/// which will be scanned by the MultiVM on each batch. +#[derive(Debug, Clone)] +pub struct MultiVMConfig { + versions: Vec<(L1BatchNumber, VmVersion)>, +} + +impl MultiVMConfig { + /// Creates a new MultiVM config from the provided sequence of (min_batch_number, vm_version) entries. + /// + /// ## Panics + /// + /// Panics if the provided sequence is not ordered by the batch number, if it's empty or if the first entry + /// doesn't correspond to the batch #1. + pub fn new(versions: Vec<(L1BatchNumber, VmVersion)>) -> Self { + // Must-haves: config is not empty, we start from the first batch, config is ordered. + assert!(!versions.is_empty()); + assert_eq!(versions[0].0 .0, 1); + assert!(versions.windows(2).all(|w| w[0].0 < w[1].0)); + + Self { versions } + } + + /// Finds the appropriate VM version for the provided batch number. + pub fn version_for(&self, batch_number: L1BatchNumber) -> VmVersion { + debug_assert!( + batch_number != L1BatchNumber(0), + "Genesis block doesn't need to be actually executed" + ); + // Find the latest version which is not greater than the provided batch number. + let (_, version) = *self + .versions + .iter() + .rev() + .find(|(version_start, _)| batch_number >= *version_start) + .expect("At least one version must match"); + version + } + + /// Returns the config for mainnet. + /// This method is WIP, and returned config is not guaranteed to be full or correct. + pub fn mainnet_config_wip() -> Self { + Self::new(vec![ + (L1BatchNumber(1), VmVersion::M5WithoutRefunds), + (L1BatchNumber(292), VmVersion::M5WithRefunds), + (L1BatchNumber(360), VmVersion::M6Initial), + (L1BatchNumber(390), VmVersion::M6BugWithCompressionFixed), + (L1BatchNumber(49508), VmVersion::Vm1_3_2), + ]) + } + + /// Returns the config for testnet. + /// This method is WIP, and returned config is not guaranteed to be full or correct. + pub fn testnet_config_wip() -> Self { + Self::new(vec![(L1BatchNumber(1), VmVersion::M5WithoutRefunds)]) + } +} + /// An abstraction that allows us to create different kinds of batch executors. /// The only requirement is to return a [`BatchExecutorHandle`], which does its work /// by communicating with the externally initialized thread. @@ -76,6 +135,7 @@ pub struct MainBatchExecutorBuilder { save_call_traces: bool, max_allowed_tx_gas_limit: U256, validation_computational_gas_limit: u32, + multivm_config: Option, } impl MainBatchExecutorBuilder { @@ -85,6 +145,7 @@ impl MainBatchExecutorBuilder { max_allowed_tx_gas_limit: U256, save_call_traces: bool, validation_computational_gas_limit: u32, + multivm_config: Option, ) -> Self { Self { state_keeper_db_path, @@ -92,6 +153,7 @@ impl MainBatchExecutorBuilder { save_call_traces, max_allowed_tx_gas_limit, validation_computational_gas_limit, + multivm_config, } } } @@ -104,13 +166,19 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { secondary_storage.update_from_postgres(&mut conn).await; drop(conn); + let batch_number = l1_batch_params + .context_mode + .inner_block_context() + .context + .block_number; + let vm_version = self + .multivm_config + .as_ref() + .map(|config| config.version_for(L1BatchNumber(batch_number))) + .unwrap_or(VmVersion::latest()); + vlog::info!( - "Secondary storage for batch {} initialized, size is {}", - l1_batch_params - .context_mode - .inner_block_context() - .context - .block_number, + "Secondary storage for batch {batch_number} initialized, size is {}", secondary_storage.estimated_map_size() ); metrics::gauge!( @@ -118,6 +186,7 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { secondary_storage.estimated_map_size() as f64, ); BatchExecutorHandle::new( + vm_version, self.save_call_traces, self.max_allowed_tx_gas_limit, self.validation_computational_gas_limit, @@ -139,6 +208,7 @@ pub struct BatchExecutorHandle { impl BatchExecutorHandle { pub(super) fn new( + vm_version: VmVersion, save_call_traces: bool, max_allowed_tx_gas_limit: U256, validation_computational_gas_limit: u32, @@ -150,6 +220,7 @@ impl BatchExecutorHandle { // until a previous command is processed), capacity 1 is enough for the commands channel. let (commands_sender, commands_receiver) = mpsc::channel(1); let executor = BatchExecutor { + vm_version, save_call_traces, max_allowed_tx_gas_limit, validation_computational_gas_limit, @@ -248,6 +319,7 @@ pub(super) enum Command { /// be constructed. #[derive(Debug)] pub(super) struct BatchExecutor { + vm_version: VmVersion, save_call_traces: bool, max_allowed_tx_gas_limit: U256, validation_computational_gas_limit: u32, @@ -267,20 +339,26 @@ impl BatchExecutor { ); let mut storage_view = StorageView::new(&secondary_storage); - let mut oracle_tools = vm::OracleTools::new(&mut storage_view, HistoryEnabled); + let mut oracle_tools = OracleTools::new(self.vm_version, &mut storage_view); + let block_properties = BlockProperties::new( + self.vm_version, + l1_batch_params.properties.default_aa_code_hash, + ); let mut vm = match self.vm_gas_limit { Some(vm_gas_limit) => init_vm_with_gas_limit( + self.vm_version, &mut oracle_tools, l1_batch_params.context_mode, - &l1_batch_params.properties, + &block_properties, TxExecutionMode::VerifyExecute, &l1_batch_params.base_system_contracts, vm_gas_limit, ), None => init_vm( + self.vm_version, &mut oracle_tools, l1_batch_params.context_mode, - &l1_batch_params.properties, + &block_properties, TxExecutionMode::VerifyExecute, &l1_batch_params.base_system_contracts, ), @@ -321,11 +399,7 @@ impl BatchExecutor { vlog::info!("State keeper exited with an unfinished batch"); } - fn execute_tx( - &self, - tx: &Transaction, - vm: &mut VmInstance<'_, HistoryEnabled>, - ) -> TxExecutionResult { + fn execute_tx(&self, tx: &Transaction, vm: &mut VmInstance<'_>) -> TxExecutionResult { let gas_consumed_before_tx = vm.gas_consumed(); // Save pre-`execute_next_tx` VM snapshot. @@ -391,9 +465,9 @@ impl BatchExecutor { } } - fn rollback_last_tx(&self, vm: &mut VmInstance<'_, HistoryEnabled>) { + fn rollback_last_tx(&self, vm: &mut VmInstance<'_>) { let stage_started_at = Instant::now(); - vm.rollback_to_latest_snapshot_popping(); + vm.rollback_to_snapshot_popping(); metrics::histogram!( "server.state_keeper.tx_execution_time", stage_started_at.elapsed(), @@ -401,7 +475,7 @@ impl BatchExecutor { ); } - fn finish_batch(&self, vm: &mut VmInstance<'_, H>) -> VmBlockResult { + fn finish_batch(&self, vm: &mut VmInstance<'_>) -> VmBlockResult { vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing) } @@ -412,7 +486,7 @@ impl BatchExecutor { fn execute_tx_in_vm( &self, tx: &Transaction, - vm: &mut VmInstance<'_, HistoryEnabled>, + vm: &mut VmInstance<'_>, ) -> Result<(VmTxExecutionResult, Vec), TxRevertReason> { // Note, that the space where we can put the calldata for compressing transactions // is limited and the transactions do not pay for taking it. @@ -432,13 +506,11 @@ impl BatchExecutor { } else { // Deduplicate and filter factory deps preserving original order. let deps = tx.execute.factory_deps.as_deref().unwrap_or_default(); - let storage_ptr = vm.state.storage.storage.get_ptr(); - let mut storage_ptr = storage_ptr.borrow_mut(); let mut deps_hashes = HashSet::with_capacity(deps.len()); let filtered_deps = deps.iter().filter_map(|bytecode| { let bytecode_hash = hash_bytecode(bytecode); - let is_known = !deps_hashes.insert(bytecode_hash) - || storage_ptr.is_bytecode_known(&bytecode_hash); + let is_known = + !deps_hashes.insert(bytecode_hash) || vm.is_bytecode_known(&bytecode_hash); if is_known { None } else { @@ -448,8 +520,7 @@ impl BatchExecutor { filtered_deps.collect() }; - push_transaction_to_bootloader_memory( - vm, + vm.push_transaction_to_bootloader_memory( tx, TxExecutionMode::VerifyExecute, Some(compressed_bytecodes.clone()), @@ -460,18 +531,15 @@ impl BatchExecutor { )?; let at_least_one_unpublished = { - let storage_ptr = vm.state.storage.storage.get_ptr(); - let mut storage_ptr = storage_ptr.borrow_mut(); compressed_bytecodes .iter() - .any(|info| !storage_ptr.is_bytecode_known(&hash_bytecode(&info.original))) + .any(|info| !vm.is_bytecode_known(&hash_bytecode(&info.original))) }; if at_least_one_unpublished { // Rolling back and trying to execute one more time. - vm.rollback_to_latest_snapshot_popping(); - push_transaction_to_bootloader_memory( - vm, + vm.rollback_to_snapshot_popping(); + vm.push_transaction_to_bootloader_memory( tx, TxExecutionMode::VerifyExecute, Some(vec![]), @@ -491,7 +559,7 @@ impl BatchExecutor { fn dryrun_block_tip( &self, - vm: &mut VmInstance<'_, HistoryEnabled>, + vm: &mut VmInstance<'_>, ) -> Result<(VmPartialExecutionResult, ExecutionMetricsForCriteria), TxRevertReason> { let stage_started_at = Instant::now(); let gas_consumed_before = vm.gas_consumed(); @@ -512,7 +580,7 @@ impl BatchExecutor { }; // Rollback to the pre-`execute_till_block_end` state. - vm.rollback_to_latest_snapshot_popping(); + vm.rollback_to_snapshot_popping(); metrics::histogram!( "server.state_keeper.tx_execution_time", @@ -523,8 +591,8 @@ impl BatchExecutor { result } - fn get_execution_metrics( - vm: &VmInstance<'_, H>, + fn get_execution_metrics( + vm: &VmInstance<'_>, tx: Option<&Transaction>, execution_result: &VmPartialExecutionResult, gas_consumed_before: u32, diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 767926e69ad8..6c4199d3e170 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -108,7 +108,8 @@ async fn rollback(connection_pool: ConnectionPool) { tx_metrics: tx_metrics_new, .. }, - ) = (res_old, res_new) else { + ) = (res_old, res_new) + else { unreachable!(); }; @@ -161,7 +162,8 @@ async fn too_big_gas_limit(connection_pool: ConnectionPool) { rejection_reason: rejection_reason_new, .. }, - ) = (res_old, res_new) else { + ) = (res_old, res_new) + else { unreachable!(); }; assert_eq!( diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index c0dba17c22da..c9af99c71bbe 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -1,6 +1,7 @@ //! Testing harness for the batch executor. //! Contains helper functionality to initialize test context and perform tests without too much boilerplate. +use multivm::VmVersion; use tempfile::TempDir; use vm::{ @@ -24,14 +25,15 @@ use zksync_types::{ fee::Fee, l1::{L1Tx, OpProcessingType, PriorityQueueType}, l2::L2Tx, + system_contracts::get_system_smart_contracts, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, L1BatchNumber, L1TxCommonData, L2ChainId, MiniblockNumber, - Nonce, PackedEthSignature, PriorityOpId, StorageLog, Transaction, H256, L2_ETH_TOKEN_ADDRESS, - SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, + Nonce, PackedEthSignature, PriorityOpId, ProtocolVersionId, StorageLog, Transaction, H256, + L2_ETH_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, }; use zksync_utils::{test_utils::LoadnextContractExecutionParams, u256_to_h256}; -use crate::genesis::create_genesis_block; +use crate::genesis::create_genesis_l1_batch; use crate::state_keeper::{ batch_executor::BatchExecutorHandle, io::L1BatchParams, @@ -107,6 +109,7 @@ impl Tester { // We don't use the builder because it would require us to clone the `ConnectionPool`, which is forbidden // for the test pool (see the doc-comment on `TestPool` for details). BatchExecutorHandle::new( + VmVersion::latest(), self.config.save_call_traces, self.config.max_allowed_tx_gas_limit.into(), self.config.validation_computational_gas_limit, @@ -115,6 +118,7 @@ impl Tester { context_mode: block_context, properties: block_properties, base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), + protocol_version: ProtocolVersionId::latest(), }, self.config.vm_gas_limit, ) @@ -151,11 +155,14 @@ impl Tester { pub(super) async fn genesis(&self) { let mut storage = self.pool.access_storage_tagged("state_keeper").await; if storage.blocks_dal().is_genesis_needed().await { - create_genesis_block( + create_genesis_l1_batch( &mut storage, self.fee_account, CHAIN_ID, - BASE_SYSTEM_CONTRACTS.clone(), + &BASE_SYSTEM_CONTRACTS, + &get_system_smart_contracts(), + Default::default(), + Default::default(), ) .await; } diff --git a/core/bin/zksync_core/src/state_keeper/extractors.rs b/core/bin/zksync_core/src/state_keeper/extractors.rs index 5fe1c4e90dba..5da6836ad2d2 100644 --- a/core/bin/zksync_core/src/state_keeper/extractors.rs +++ b/core/bin/zksync_core/src/state_keeper/extractors.rs @@ -61,7 +61,7 @@ async fn wait_for_l1_batch_params_unchecked( loop { let data = storage .blocks_dal() - .get_block_state_root_and_timestamp(number) + .get_l1_batch_state_root_and_timestamp(number) .await; if let Some((root_hash, timestamp)) = data { vlog::trace!( diff --git a/core/bin/zksync_core/src/state_keeper/io/common.rs b/core/bin/zksync_core/src/state_keeper/io/common.rs index b3dc7a16c005..8547f8481c38 100644 --- a/core/bin/zksync_core/src/state_keeper/io/common.rs +++ b/core/bin/zksync_core/src/state_keeper/io/common.rs @@ -6,13 +6,16 @@ use vm::{ }; use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; -use zksync_types::{Address, L1BatchNumber, U256, ZKPORTER_IS_AVAILABLE}; +use zksync_types::{Address, L1BatchNumber, ProtocolVersionId, U256, ZKPORTER_IS_AVAILABLE}; use zksync_utils::h256_to_u256; +use itertools::Itertools; + use super::{L1BatchParams, PendingBatchData}; use crate::state_keeper::extractors; /// Returns the parameters required to initialize the VM for the next L1 batch. +#[allow(clippy::too_many_arguments)] pub(crate) fn l1_batch_params( current_l1_batch_number: L1BatchNumber, operator_address: Address, @@ -21,6 +24,7 @@ pub(crate) fn l1_batch_params( l1_gas_price: u64, fair_l2_gas_price: u64, base_system_contracts: BaseSystemContracts, + protocol_version: ProtocolVersionId, ) -> L1BatchParams { let block_properties = BlockProperties { default_aa_code_hash: h256_to_u256(base_system_contracts.default_aa.hash), @@ -39,6 +43,7 @@ pub(crate) fn l1_batch_params( context_mode: BlockContextMode::NewBlock(context.into(), previous_block_hash), properties: block_properties, base_system_contracts, + protocol_version, } } @@ -97,14 +102,80 @@ pub(crate) async fn load_pending_batch( pending_miniblock_header.l1_gas_price, pending_miniblock_header.l2_fair_gas_price, base_system_contracts, + pending_miniblock_header + .protocol_version + .expect("`protocol_version` must be set for pending miniblock"), ); - let txs = storage + let pending_miniblocks = storage .transactions_dal() - .get_transactions_to_reexecute() + .get_miniblocks_to_reexecute() .await; - Some(PendingBatchData { params, txs }) + Some(PendingBatchData { + params, + pending_miniblocks, + }) +} + +/// Sets missing initial writes indices. +pub async fn set_missing_initial_writes_indices(storage: &mut StorageProcessor<'_>) { + // Indices should start from 1, that's why default is (1, 0). + let (mut next_index, start_from_batch) = storage + .storage_logs_dedup_dal() + .max_set_enumeration_index() + .await + .map(|(index, l1_batch_number)| (index + 1, l1_batch_number + 1)) + .unwrap_or((1, L1BatchNumber(0))); + + let sealed_batch = storage.blocks_dal().get_sealed_l1_batch_number().await; + if start_from_batch > sealed_batch { + vlog::info!("All indices for initial writes are already set, no action is needed"); + return; + } else { + let batches_count = sealed_batch.0 - start_from_batch.0 + 1; + if batches_count > 100 { + vlog::warn!("There are {batches_count} batches to set indices for, it may take substantial time."); + } + } + + vlog::info!( + "Last set index {}. Starting migration from batch {start_from_batch}", + next_index - 1 + ); + let mut current_l1_batch = start_from_batch; + loop { + if current_l1_batch > storage.blocks_dal().get_sealed_l1_batch_number().await { + break; + } + vlog::info!("Setting indices for batch {current_l1_batch}"); + + let (hashed_keys, _): (Vec<_>, Vec<_>) = storage + .storage_logs_dedup_dal() + .initial_writes_for_batch(current_l1_batch) + .await + .into_iter() + .unzip(); + let storage_keys = storage + .storage_logs_dal() + .resolve_hashed_keys(&hashed_keys) + .await; + + // Sort storage key alphanumerically and assign indices. + let indexed_keys: Vec<_> = storage_keys + .into_iter() + .sorted() + .enumerate() + .map(|(pos, key)| (key.hashed_key(), next_index + pos as u64)) + .collect(); + storage + .storage_logs_dedup_dal() + .set_indices_for_initial_writes(&indexed_keys) + .await; + + next_index += indexed_keys.len() as u64; + current_l1_batch += 1; + } } #[cfg(test)] diff --git a/core/bin/zksync_core/src/state_keeper/io/mempool.rs b/core/bin/zksync_core/src/state_keeper/io/mempool.rs index 8fc0efbe9410..6c431dc6396f 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mempool.rs @@ -12,10 +12,12 @@ use vm::{ VmBlockResult, }; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_contracts::BaseSystemContracts; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction, U256}; +use zksync_types::{ + protocol_version::ProtocolUpgradeTx, Address, L1BatchNumber, MiniblockNumber, + ProtocolVersionId, Transaction, U256, +}; use zksync_utils::time::millis_since_epoch; use crate::{ @@ -49,7 +51,6 @@ pub(crate) struct MempoolIO { delay_interval: Duration, // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. l1_gas_price_provider: Arc, - base_system_contracts: BaseSystemContracts, l2_erc20_bridge_addr: Address, } @@ -66,9 +67,11 @@ impl StateKeeperIO for MempoolIO< async fn load_pending_batch(&mut self) -> Option { let mut storage = self.pool.access_storage_tagged("state_keeper").await; - let PendingBatchData { params, txs } = - load_pending_batch(&mut storage, self.current_l1_batch_number, self.fee_account) - .await?; + let PendingBatchData { + params, + pending_miniblocks, + } = load_pending_batch(&mut storage, self.current_l1_batch_number, self.fee_account) + .await?; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. let context = params.context_mode.inner_block_context().context; @@ -80,7 +83,10 @@ impl StateKeeperIO for MempoolIO< gas_per_pubdata: gas_per_pubdata as u32, }; - Some(PendingBatchData { params, txs }) + Some(PendingBatchData { + params, + pending_miniblocks, + }) } async fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { @@ -98,12 +104,15 @@ impl StateKeeperIO for MempoolIO< continue; } - let (prev_hash, prev_timestamp) = self.load_previous_l1_batch_params().await; - // We cannot create two L1 batches with the same timestamp (forbidden by the bootloader). - // Hence, we wait until the current timestamp is larger. We can use `timeout_at` - // since `sleep_past` is cancel-safe; it only uses `sleep()` async calls. - let current_timestamp = - tokio::time::timeout_at(deadline.into(), sleep_past(prev_timestamp)); + let prev_l1_batch_hash = self.load_previous_l1_batch_hash().await; + let prev_miniblock_timestamp = self.load_previous_miniblock_timestamp().await; + // We cannot create two L1 batches or miniblocks with the same timestamp (forbidden by the bootloader). + // Hence, we wait until the current timestamp is larger than the timestamp of the previous miniblock. + // We can use `timeout_at` since `sleep_past` is cancel-safe; it only uses `sleep()` async calls. + let current_timestamp = tokio::time::timeout_at( + deadline.into(), + sleep_past(prev_miniblock_timestamp, self.current_miniblock_number), + ); let current_timestamp = current_timestamp.await.ok()?; vlog::info!( @@ -112,22 +121,37 @@ impl StateKeeperIO for MempoolIO< self.filter.l1_gas_price, self.fair_l2_gas_price ); + let mut storage = self.pool.access_storage().await; + let (base_system_contracts, protocol_version) = storage + .protocol_versions_dal() + .base_system_contracts_by_timestamp(current_timestamp) + .await; return Some(l1_batch_params( self.current_l1_batch_number, self.fee_account, current_timestamp, - prev_hash, + prev_l1_batch_hash, self.filter.l1_gas_price, self.fair_l2_gas_price, - self.base_system_contracts.clone(), + base_system_contracts, + protocol_version, )); } None } - async fn wait_for_new_miniblock_params(&mut self, _max_wait: Duration) -> Option { - let new_miniblock_timestamp = (millis_since_epoch() / 1000) as u64; - Some(new_miniblock_timestamp) + async fn wait_for_new_miniblock_params( + &mut self, + max_wait: Duration, + prev_miniblock_timestamp: u64, + ) -> Option { + // We must provide different timestamps for each miniblock. + // If miniblock sealing interval is greater than 1 second then `sleep_past` won't actually sleep. + let current_timestamp = tokio::time::timeout( + max_wait, + sleep_past(prev_miniblock_timestamp, self.current_miniblock_number), + ); + current_timestamp.await.ok() } async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { @@ -220,19 +244,38 @@ impl StateKeeperIO for MempoolIO< self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. self.current_l1_batch_number += 1; } + + async fn load_previous_batch_version_id(&mut self) -> Option { + let mut storage = self.pool.access_storage().await; + storage + .blocks_dal() + .get_batch_protocol_version_id(self.current_l1_batch_number - 1) + .await + } + + async fn load_upgrade_tx( + &mut self, + version_id: ProtocolVersionId, + ) -> Option { + let mut storage = self.pool.access_storage().await; + storage + .protocol_versions_dal() + .get_protocol_upgrade_tx(version_id) + .await + } } /// Sleeps until the current timestamp is larger than the provided `timestamp`. /// /// Returns the current timestamp after the sleep. It is guaranteed to be larger than `timestamp`. -async fn sleep_past(timestamp: u64) -> u64 { +async fn sleep_past(timestamp: u64, miniblock: MiniblockNumber) -> u64 { let mut current_timestamp_millis = millis_since_epoch(); let mut current_timestamp = (current_timestamp_millis / 1_000) as u64; match timestamp.cmp(¤t_timestamp) { cmp::Ordering::Less => return current_timestamp, cmp::Ordering::Equal => { vlog::info!( - "Current timestamp {} is equal to previous L1 batch timestamp; waiting until \ + "Current timestamp {} for miniblock #{miniblock} is equal to previous miniblock timestamp; waiting until \ timestamp increases", extractors::display_timestamp(current_timestamp) ); @@ -242,7 +285,7 @@ async fn sleep_past(timestamp: u64) -> u64 { // system time, or if it is buggy. Thus, a one-time error could require no actions if L1 batches // are expected to be generated frequently. vlog::error!( - "Previous L1 batch timestamp {} is larger than the current timestamp {}", + "Previous miniblock timestamp {} is larger than the current timestamp {} for miniblock #{miniblock}", extractors::display_timestamp(timestamp), extractors::display_timestamp(current_timestamp) ); @@ -280,12 +323,8 @@ impl MempoolIO { l2_erc20_bridge_addr: Address, ) -> Self { let mut storage = pool.access_storage_tagged("state_keeper").await; - let last_sealed_block_header = storage.blocks_dal().get_newest_block_header().await; + let last_sealed_l1_batch_header = storage.blocks_dal().get_newest_l1_batch_header().await; let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await; - let base_system_contracts = storage - .storage_dal() - .get_base_system_contracts(config.bootloader_hash, config.default_aa_hash) - .await; drop(storage); Self { @@ -293,24 +332,26 @@ impl MempoolIO { pool, filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch - current_l1_batch_number: last_sealed_block_header.number + 1, + current_l1_batch_number: last_sealed_l1_batch_header.number + 1, miniblock_sealer_handle, current_miniblock_number: last_miniblock_number + 1, fee_account: config.fee_account_addr, fair_l2_gas_price: config.fair_l2_gas_price, delay_interval, l1_gas_price_provider, - base_system_contracts, l2_erc20_bridge_addr, } } - async fn load_previous_l1_batch_params(&self) -> (U256, u64) { - vlog::info!("Getting previous L1 batch hash"); + async fn load_previous_l1_batch_hash(&self) -> U256 { + vlog::info!( + "Getting previous L1 batch hash for L1 batch #{}", + self.current_l1_batch_number + ); let stage_started_at: Instant = Instant::now(); let mut storage = self.pool.access_storage_tagged("state_keeper").await; - let (batch_hash, batch_timestamp) = + let (batch_hash, _) = extractors::wait_for_prev_l1_batch_params(&mut storage, self.current_l1_batch_number) .await; @@ -319,10 +360,27 @@ impl MempoolIO { stage_started_at.elapsed() ); vlog::info!( - "Got previous L1 batch hash: {batch_hash} and timestamp: {}", - extractors::display_timestamp(batch_timestamp) + "Got previous L1 batch hash: {batch_hash:0>64x} for L1 batch #{}", + self.current_l1_batch_number + ); + batch_hash + } + + async fn load_previous_miniblock_timestamp(&self) -> u64 { + let stage_started_at: Instant = Instant::now(); + + let mut storage = self.pool.access_storage_tagged("state_keeper").await; + let miniblock_timestamp = storage + .blocks_dal() + .get_miniblock_timestamp(self.current_miniblock_number - 1) + .await + .expect("Previous miniblock must be sealed and header saved to DB"); + + metrics::histogram!( + "server.state_keeper.get_prev_miniblock_timestamp", + stage_started_at.elapsed() ); - (batch_hash, batch_timestamp) + miniblock_timestamp } } @@ -348,29 +406,39 @@ mod tests { let past_timestamps = [0, 1_000, 1_000_000_000, seconds_since_epoch() - 10]; for timestamp in past_timestamps { let deadline = Instant::now() + Duration::from_secs(1); - timeout_at(deadline.into(), sleep_past(timestamp)) + timeout_at(deadline.into(), sleep_past(timestamp, MiniblockNumber(1))) .await .unwrap(); } let current_timestamp = seconds_since_epoch(); let deadline = Instant::now() + Duration::from_secs(2); - let ts = timeout_at(deadline.into(), sleep_past(current_timestamp)) - .await - .unwrap(); + let ts = timeout_at( + deadline.into(), + sleep_past(current_timestamp, MiniblockNumber(1)), + ) + .await + .unwrap(); assert!(ts > current_timestamp); let future_timestamp = seconds_since_epoch() + 1; let deadline = Instant::now() + Duration::from_secs(3); - let ts = timeout_at(deadline.into(), sleep_past(future_timestamp)) - .await - .unwrap(); + let ts = timeout_at( + deadline.into(), + sleep_past(future_timestamp, MiniblockNumber(1)), + ) + .await + .unwrap(); assert!(ts > future_timestamp); let future_timestamp = seconds_since_epoch() + 1; let deadline = Instant::now() + Duration::from_millis(100); // ^ This deadline is too small (we need at least 1_000ms) - let result = timeout_at(deadline.into(), sleep_past(future_timestamp)).await; + let result = timeout_at( + deadline.into(), + sleep_past(future_timestamp, MiniblockNumber(1)), + ) + .await; assert!(result.is_err()); } } diff --git a/core/bin/zksync_core/src/state_keeper/io/mod.rs b/core/bin/zksync_core/src/state_keeper/io/mod.rs index 359d6738aa8f..6cd6bdab6358 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mod.rs @@ -11,7 +11,10 @@ use vm::zk_evm::block_properties::BlockProperties; use vm::VmBlockResult; use zksync_contracts::BaseSystemContracts; use zksync_dal::ConnectionPool; -use zksync_types::{L1BatchNumber, MiniblockNumber, Transaction}; +use zksync_types::{ + block::MiniblockReexecuteData, protocol_version::ProtocolUpgradeTx, L1BatchNumber, + MiniblockNumber, ProtocolVersionId, Transaction, +}; pub(crate) mod common; pub(crate) mod mempool; @@ -32,6 +35,7 @@ pub struct L1BatchParams { pub context_mode: BlockContextMode, pub properties: BlockProperties, pub base_system_contracts: BaseSystemContracts, + pub protocol_version: ProtocolVersionId, } /// Contains information about the un-synced execution state: @@ -48,7 +52,7 @@ pub struct PendingBatchData { /// (e.g. timestamp) are the same, so transaction would have the same result after re-execution. pub(crate) params: L1BatchParams, /// List of miniblocks and corresponding transactions that were executed within batch. - pub(crate) txs: Vec<(MiniblockNumber, Vec)>, + pub(crate) pending_miniblocks: Vec, } /// `StateKeeperIO` provides the interactive layer for the state keeper: @@ -68,7 +72,11 @@ pub trait StateKeeperIO: 'static + Send { async fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option; /// Blocks for up to `max_wait` until the parameters for the next miniblock are available. /// Right now it's only a timestamp. - async fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option; + async fn wait_for_new_miniblock_params( + &mut self, + max_wait: Duration, + prev_miniblock_timestamp: u64, + ) -> Option; /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option; @@ -86,6 +94,11 @@ pub trait StateKeeperIO: 'static + Send { updates_manager: UpdatesManager, block_context: DerivedBlockContext, ); + /// Loads protocol version of the previous l1 batch. + async fn load_previous_batch_version_id(&mut self) -> Option; + /// Loads protocol upgrade tx for given version. + async fn load_upgrade_tx(&mut self, version_id: ProtocolVersionId) + -> Option; } impl fmt::Debug for dyn StateKeeperIO { diff --git a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs index b6c793b9050c..8136581108fc 100644 --- a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs @@ -26,13 +26,14 @@ use zksync_types::{ }, zk_evm::aux_structures::LogQuery, zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - Address, ExecuteTransactionCommon, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, - StorageLogQuery, StorageValue, Transaction, VmEvent, H256, U256, + AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber, MiniblockNumber, StorageKey, + StorageLog, StorageLogQuery, StorageValue, Transaction, VmEvent, H256, U256, }; -use zksync_utils::{miniblock_hash, time::millis_since_epoch}; +use zksync_utils::{miniblock_hash, time::millis_since_epoch, u256_to_h256}; use crate::state_keeper::{ extractors, + io::common::set_missing_initial_writes_indices, updates::{L1BatchUpdates, MiniblockSealCommand, UpdatesManager}, }; @@ -194,7 +195,6 @@ impl UpdatesManager { extractors::display_timestamp(prev_timestamp), extractors::display_timestamp(timestamp) ); - let block_context_properties = BlockContextMode::NewBlock(block_context, prev_hash); let l1_batch = L1BatchHeader { number: current_l1_batch_number, @@ -207,20 +207,25 @@ impl UpdatesManager { l2_to_l1_logs: full_result.l2_to_l1_logs, l2_to_l1_messages: extract_long_l2_to_l1_messages(&full_result.events), bloom: Default::default(), - initial_bootloader_contents: Self::initial_bootloader_memory( - &self.l1_batch, - block_context_properties, - ), used_contract_hashes: full_result.used_contract_hashes, base_fee_per_gas: block_context.base_fee, l1_gas_price: self.l1_gas_price(), l2_fair_gas_price: self.fair_l2_gas_price(), base_system_contracts_hashes: self.base_system_contract_hashes(), + protocol_version: Some(self.protocol_version()), }; + let block_context_properties = BlockContextMode::NewBlock(block_context, prev_hash); + let initial_bootloader_contents = + Self::initial_bootloader_memory(&self.l1_batch, block_context_properties); + transaction .blocks_dal() - .insert_l1_batch(&l1_batch, self.l1_batch.l1_gas_count) + .insert_l1_batch( + &l1_batch, + &initial_bootloader_contents, + self.l1_batch.l1_gas_count, + ) .await; progress.end_stage("insert_l1_batch_header", None); @@ -248,9 +253,36 @@ impl UpdatesManager { .await; progress.end_stage("insert_protective_reads", Some(protective_reads.len())); + let deduplicated_writes_hashed_keys: Vec<_> = deduplicated_writes + .iter() + .map(|log| { + H256(StorageKey::raw_hashed_key( + &log.address, + &u256_to_h256(log.key), + )) + }) + .collect(); + let non_initial_writes = transaction + .storage_logs_dedup_dal() + .filter_written_slots(&deduplicated_writes_hashed_keys) + .await; + progress.end_stage("filter_written_slots", Some(deduplicated_writes.len())); + + let written_storage_keys: Vec<_> = deduplicated_writes + .iter() + .filter_map(|log| { + let key = StorageKey::new(AccountTreeId::new(log.address), u256_to_h256(log.key)); + (!non_initial_writes.contains(&key.hashed_key())).then_some(key) + }) + .collect(); + + // One-time migration completion for initial writes' indices. + set_missing_initial_writes_indices(&mut transaction).await; + progress.end_stage("set_missing_initial_writes_indices", None); + transaction .storage_logs_dedup_dal() - .insert_initial_writes(current_l1_batch_number, &deduplicated_writes) + .insert_initial_writes(current_l1_batch_number, &written_storage_keys) .await; progress.end_stage("insert_initial_writes", Some(deduplicated_writes.len())); @@ -337,7 +369,7 @@ impl UpdatesManager { started_at.elapsed(), ); vlog::debug!( - "sealed l1 batch {current_l1_batch_number} in {:?}", + "Sealed L1 batch {current_l1_batch_number} in {:?}", started_at.elapsed() ); } @@ -387,6 +419,7 @@ impl MiniblockSealCommand { l1_gas_price: self.l1_gas_price, l2_fair_gas_price: self.fair_l2_gas_price, base_system_contracts_hashes: self.base_system_contracts_hashes, + protocol_version: Some(self.protocol_version), }; transaction @@ -529,6 +562,8 @@ impl MiniblockSealCommand { for (key, (_, value)) in unique_updates { if *key.account().address() == ACCOUNT_CODE_STORAGE_ADDRESS { let bytecode_hash = *value; + // For now, we expected that if the `bytecode_hash` is zero, the contract was not deployed + // in the first place, so we don't do anything if bytecode_hash != H256::zero() { count += 1; } @@ -601,7 +636,7 @@ impl MiniblockSealCommand { ); vlog::debug!( - "sealed miniblock {miniblock_number} in {:?}", + "Sealed miniblock {miniblock_number} in {:?}", started_at.elapsed() ); } diff --git a/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs index ee9b1ac14b51..1968f6b54b7a 100644 --- a/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs @@ -9,16 +9,16 @@ use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; use zksync_types::{ block::BlockGasCount, tx::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, - MiniblockNumber, StorageKey, VmEvent, H256, U256, + MiniblockNumber, ProtocolVersionId, StorageKey, StorageLog, VmEvent, H256, U256, }; -use zksync_utils::time::millis_since_epoch; +use zksync_utils::time::seconds_since_epoch; use crate::state_keeper::{ - io::{MiniblockSealer, StateKeeperIO}, + io::{common::set_missing_initial_writes_indices, MiniblockSealer, StateKeeperIO}, mempool_actor::l2_tx_filter, tests::{ - create_block_metadata, create_execution_result, create_transaction, create_updates_manager, - default_block_context, default_vm_block_result, Query, + create_execution_result, create_l1_batch_metadata, create_transaction, + create_updates_manager, default_block_context, default_vm_block_result, Query, }, updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }; @@ -122,17 +122,21 @@ async fn test_filter_with_no_pending_batch(connection_pool: ConnectionPool) { assert_eq!(mempool.filter(), &want_filter); } -async fn test_l1_batch_timestamps_are_distinct( +async fn test_timestamps_are_distinct( connection_pool: ConnectionPool, - prev_l1_batch_timestamp: u64, + prev_miniblock_timestamp: u64, + delay_prev_miniblock_compared_to_batch: bool, ) { let mut tester = Tester::new(); tester.genesis(&connection_pool).await; - tester.set_timestamp(prev_l1_batch_timestamp); + tester.set_timestamp(prev_miniblock_timestamp); tester .insert_miniblock(&connection_pool, 1, 5, 55, 555) .await; + if delay_prev_miniblock_compared_to_batch { + tester.set_timestamp(prev_miniblock_timestamp - 1); + } tester.insert_sealed_batch(&connection_pool, 1).await; let (mut mempool, mut guard) = tester.create_test_mempool_io(connection_pool, 1).await; @@ -147,19 +151,33 @@ async fn test_l1_batch_timestamps_are_distinct( .wait_for_new_batch_params(Duration::from_secs(10)) .await .expect("No batch params in the test mempool"); - assert!(batch_params.context_mode.timestamp() > prev_l1_batch_timestamp); + assert!(batch_params.context_mode.timestamp() > prev_miniblock_timestamp); } #[db_test] async fn l1_batch_timestamp_basics(connection_pool: ConnectionPool) { - let current_timestamp = (millis_since_epoch() / 1_000) as u64; - test_l1_batch_timestamps_are_distinct(connection_pool, current_timestamp).await; + let current_timestamp = seconds_since_epoch(); + test_timestamps_are_distinct(connection_pool, current_timestamp, false).await; } #[db_test] async fn l1_batch_timestamp_with_clock_skew(connection_pool: ConnectionPool) { - let current_timestamp = (millis_since_epoch() / 1_000) as u64; - test_l1_batch_timestamps_are_distinct(connection_pool, current_timestamp + 2).await; + let current_timestamp = seconds_since_epoch(); + test_timestamps_are_distinct(connection_pool, current_timestamp + 2, false).await; +} + +#[db_test] +async fn l1_batch_timestamp_respects_prev_miniblock(connection_pool: ConnectionPool) { + let current_timestamp = seconds_since_epoch(); + test_timestamps_are_distinct(connection_pool, current_timestamp, true).await; +} + +#[db_test] +async fn l1_batch_timestamp_respects_prev_miniblock_with_clock_skew( + connection_pool: ConnectionPool, +) { + let current_timestamp = seconds_since_epoch(); + test_timestamps_are_distinct(connection_pool, current_timestamp + 2, true).await; } #[db_test] @@ -215,9 +233,13 @@ async fn processing_storage_logs_when_sealing_miniblock(connection_pool: Connect fair_l2_gas_price: 100, base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), + protocol_version: ProtocolVersionId::default(), l2_erc20_bridge_addr: Address::default(), }; let mut conn = connection_pool.access_storage_tagged("state_keeper").await; + conn.protocol_versions_dal() + .save_protocol_version(Default::default()) + .await; seal_command.seal(&mut conn).await; // Manually mark the miniblock as executed so that getting touched slots from it works @@ -280,9 +302,13 @@ async fn processing_events_when_sealing_miniblock(pool: ConnectionPool) { fair_l2_gas_price: 100, base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), + protocol_version: ProtocolVersionId::default(), l2_erc20_bridge_addr: Address::default(), }; let mut conn = pool.access_storage_tagged("state_keeper").await; + conn.protocol_versions_dal() + .save_protocol_version(Default::default()) + .await; seal_command.seal(&mut conn).await; let logs = conn @@ -308,9 +334,9 @@ async fn test_miniblock_and_l1_batch_processing( tester.genesis(&pool).await; let mut conn = pool.access_storage_tagged("state_keeper").await; // Save metadata for the genesis L1 batch so that we don't hang in `seal_l1_batch`. - let block_metadata = create_block_metadata(0); + let metadata = create_l1_batch_metadata(0); conn.blocks_dal() - .save_blocks_metadata(L1BatchNumber(0), &block_metadata, H256::zero()) + .save_l1_batch_metadata(L1BatchNumber(0), &metadata, H256::zero()) .await; drop(conn); @@ -321,8 +347,11 @@ async fn test_miniblock_and_l1_batch_processing( let mut block_context = default_block_context(); block_context.context.block_timestamp = 100; // change timestamp to pass monotonicity check let block_context_mode = BlockContextMode::NewBlock(block_context, 0.into()); - let mut updates = - UpdatesManager::new(&block_context_mode, BaseSystemContractsHashes::default()); + let mut updates = UpdatesManager::new( + &block_context_mode, + BaseSystemContractsHashes::default(), + ProtocolVersionId::default(), + ); let tx = create_transaction(10, 100); updates.extend_from_executed_transaction( @@ -348,7 +377,7 @@ async fn test_miniblock_and_l1_batch_processing( ); let l1_batch_header = conn .blocks_dal() - .get_block_header(L1BatchNumber(1)) + .get_l1_batch_header(L1BatchNumber(1)) .await .unwrap(); assert_eq!(l1_batch_header.l2_tx_count, 1); @@ -441,3 +470,136 @@ async fn miniblock_sealer_handle_parallel_processing(pool: ConnectionPool) { sealer_handle.wait_for_all_commands().await; } + +#[db_test] +async fn initial_writes_index_migration(pool: ConnectionPool) { + let tester = Tester::new(); + + // Genesis is needed for proper mempool initialization. + tester.genesis(&pool).await; + let (last_index, _) = { + let mut storage = pool.access_storage().await; + storage + .storage_logs_dedup_dal() + .max_set_enumeration_index() + .await + .unwrap() + }; + + tester.insert_miniblock(&pool, 1, 100, 100, 100).await; + tester.insert_sealed_batch(&pool, 1).await; + let keys1: Vec<_> = vec![2u64, 3, 5, 7] + .into_iter() + .map(|k| { + StorageKey::new( + AccountTreeId::new(Address::from_low_u64_be(1)), + H256::from_low_u64_be(k), + ) + }) + .collect(); + let storage_logs: Vec<_> = keys1 + .iter() + .map(|k| StorageLog::new_write_log(*k, H256::random())) + .collect(); + { + let mut storage = pool.access_storage().await; + storage + .storage_logs_dal() + .insert_storage_logs(1u32.into(), &[(H256::zero(), storage_logs)]) + .await; + storage + .storage_logs_dedup_dal() + .insert_initial_writes(1u32.into(), &keys1) + .await; + } + + tester.insert_miniblock(&pool, 2, 100, 100, 100).await; + tester.insert_sealed_batch(&pool, 2).await; + let keys2: Vec<_> = vec![1u64, 4, 6, 8] + .into_iter() + .map(|k| { + StorageKey::new( + AccountTreeId::new(Address::from_low_u64_be(1)), + H256::from_low_u64_be(k), + ) + }) + .collect(); + let storage_logs: Vec<_> = keys2 + .iter() + .map(|k| StorageLog::new_write_log(*k, H256::random())) + .collect(); + { + let mut storage = pool.access_storage().await; + storage + .storage_logs_dal() + .insert_storage_logs(2u32.into(), &[(H256::zero(), storage_logs)]) + .await; + storage + .storage_logs_dedup_dal() + .insert_initial_writes(2u32.into(), &keys2) + .await; + } + + let expected: Vec<_> = keys1 + .iter() + .chain(&keys2) + .enumerate() + .map(|(i, k)| (k.hashed_key(), i as u64 + last_index + 1)) + .collect(); + let actual = { + let mut storage = pool.access_storage().await; + let iw1 = storage + .storage_logs_dedup_dal() + .initial_writes_for_batch(1u32.into()) + .await; + let iw2 = storage + .storage_logs_dedup_dal() + .initial_writes_for_batch(2u32.into()) + .await; + + iw1.into_iter() + .chain(iw2) + .map(|(key, index)| (key, index.unwrap())) + .collect::>() + }; + assert_eq!(expected, actual); + + { + let mut storage = pool.access_storage().await; + storage.storage_logs_dedup_dal().reset_indices().await; + set_missing_initial_writes_indices(&mut storage).await; + }; + let actual = { + let mut storage = pool.access_storage().await; + let iw1 = storage + .storage_logs_dedup_dal() + .initial_writes_for_batch(1u32.into()) + .await; + let iw2 = storage + .storage_logs_dedup_dal() + .initial_writes_for_batch(2u32.into()) + .await; + + iw1.into_iter() + .chain(iw2) + .map(|(key, index)| (key, index.unwrap())) + .collect::>() + }; + assert_eq!(expected, actual); +} + +/// Ensure that subsequent miniblocks that belong to the same L1 batch have different timestamps +#[db_test] +async fn different_timestamp_for_miniblocks_in_same_batch(connection_pool: ConnectionPool) { + let tester = Tester::new(); + + // Genesis is needed for proper mempool initialization. + tester.genesis(&connection_pool).await; + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool, 1).await; + let current_timestamp = seconds_since_epoch(); + let next_timestamp = mempool + .wait_for_new_miniblock_params(Duration::from_secs(10), current_timestamp) + .await + .unwrap(); + assert!(next_timestamp > current_timestamp); +} diff --git a/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs index 88d5a9401530..80211e012694 100644 --- a/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs @@ -9,11 +9,13 @@ use zksync_dal::ConnectionPool; use zksync_eth_client::clients::mock::MockEthereum; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, PriorityOpId, H256, + protocol_version::L1VerifierConfig, + system_contracts::get_system_smart_contracts, + Address, L1BatchNumber, L2ChainId, MiniblockNumber, PriorityOpId, ProtocolVersionId, H256, }; use crate::{ - genesis::create_genesis_block, + genesis::create_genesis_l1_batch, l1_gas_price::GasAdjuster, state_keeper::{io::MiniblockSealer, tests::create_transaction, MempoolGuard, MempoolIO}, }; @@ -98,11 +100,14 @@ impl Tester { pub(super) async fn genesis(&self, pool: &ConnectionPool) { let mut storage = pool.access_storage_tagged("state_keeper").await; if storage.blocks_dal().is_genesis_needed().await { - create_genesis_block( + create_genesis_l1_batch( &mut storage, Address::repeat_byte(0x01), L2ChainId(270), - self.base_system_contracts.clone(), + &self.base_system_contracts, + &get_system_smart_contracts(), + L1VerifierConfig::default(), + Address::zero(), ) .await; } @@ -129,6 +134,7 @@ impl Tester { l1_gas_price, l2_fair_gas_price, base_system_contracts_hashes: self.base_system_contracts.hashes(), + protocol_version: Some(ProtocolVersionId::latest()), }) .await; } @@ -139,13 +145,14 @@ impl Tester { self.current_timestamp, Address::default(), self.base_system_contracts.hashes(), + Default::default(), ); batch_header.is_finished = true; let mut storage = pool.access_storage_tagged("state_keeper").await; storage .blocks_dal() - .insert_l1_batch(&batch_header, Default::default()) + .insert_l1_batch(&batch_header, &[], Default::default()) .await; storage .blocks_dal() diff --git a/core/bin/zksync_core/src/state_keeper/keeper.rs b/core/bin/zksync_core/src/state_keeper/keeper.rs index adfc37d4a02d..69b5b7cb7ac1 100644 --- a/core/bin/zksync_core/src/state_keeper/keeper.rs +++ b/core/bin/zksync_core/src/state_keeper/keeper.rs @@ -4,7 +4,9 @@ use std::time::{Duration, Instant}; use vm::TxRevertReason; use zksync_types::{ - storage_writes_deduplicator::StorageWritesDeduplicator, MiniblockNumber, Transaction, + block::MiniblockReexecuteData, l2::TransactionType, protocol_version::ProtocolUpgradeTx, + storage_writes_deduplicator::StorageWritesDeduplicator, + tx::tx_execution_info::TxExecutionStatus, Transaction, }; use crate::gas_tracker::gas_count_from_writes; @@ -80,16 +82,16 @@ impl ZkSyncStateKeeper { // Re-execute pending batch if it exists. Otherwise, initialize a new batch. let PendingBatchData { params, - txs: txs_to_reexecute, + pending_miniblocks, } = match self.io.load_pending_batch().await { Some(params) => { vlog::info!( "There exists a pending batch consisting of {} miniblocks, the first one is {}", - params.txs.len(), + params.pending_miniblocks.len(), params - .txs + .pending_miniblocks .first() - .map(|(number, _)| number) + .map(|miniblock| miniblock.number) .expect("Empty pending block represented as Some") ); params @@ -98,7 +100,7 @@ impl ZkSyncStateKeeper { vlog::info!("There is no open pending batch, starting a new empty batch"); PendingBatchData { params: self.wait_for_new_batch_params().await?, - txs: Vec::new(), + pending_miniblocks: Vec::new(), } } }; @@ -107,13 +109,48 @@ impl ZkSyncStateKeeper { let mut updates_manager = UpdatesManager::new( &l1_batch_params.context_mode, l1_batch_params.base_system_contracts.hashes(), + l1_batch_params.protocol_version, ); + let previous_batch_protocol_version = self.io.load_previous_batch_version_id().await; + let version_changed = match previous_batch_protocol_version { + Some(previous_batch_protocol_version) => { + l1_batch_params.protocol_version != previous_batch_protocol_version + } + // None is only the case for old blocks. Match will be removed when migration will be done. + None => false, + }; + + let mut protocol_upgrade_tx = if pending_miniblocks.is_empty() && version_changed { + self.io + .load_upgrade_tx(l1_batch_params.protocol_version) + .await + } else if !pending_miniblocks.is_empty() && version_changed { + // Sanity check: if `txs_to_reexecute` is not empty and upgrade tx is present for this block + // then it must be the first one in `txs_to_reexecute`. + if self + .io + .load_upgrade_tx(l1_batch_params.protocol_version) + .await + .is_some() + { + let first_tx_to_reexecute = &pending_miniblocks[0].txs[0]; + assert_eq!( + first_tx_to_reexecute.tx_format(), + TransactionType::ProtocolUpgradeTransaction + ) + } + + None + } else { + None + }; + let mut batch_executor = self .batch_executor_base .init_batch(l1_batch_params.clone()) .await; - self.restore_state(&batch_executor, &mut updates_manager, txs_to_reexecute) + self.restore_state(&batch_executor, &mut updates_manager, pending_miniblocks) .await?; let mut l1_batch_seal_delta: Option = None; @@ -121,7 +158,7 @@ impl ZkSyncStateKeeper { self.check_if_cancelled()?; // This function will run until the batch can be sealed. - self.process_l1_batch(&batch_executor, &mut updates_manager) + self.process_l1_batch(&batch_executor, &mut updates_manager, protocol_upgrade_tx) .await?; // Finish current batch. @@ -129,10 +166,13 @@ impl ZkSyncStateKeeper { self.io.seal_miniblock(&updates_manager).await; // We've sealed the miniblock that we had, but we still need to setup the timestamp // for the fictive miniblock. - let fictive_miniblock_timestamp = self.wait_for_new_miniblock_params().await?; + let fictive_miniblock_timestamp = self + .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) + .await?; updates_manager.push_miniblock(fictive_miniblock_timestamp); } let block_result = batch_executor.finish_batch().await; + let sealed_batch_protocol_version = updates_manager.protocol_version(); self.io .seal_l1_batch( block_result, @@ -147,14 +187,25 @@ impl ZkSyncStateKeeper { // Start the new batch. l1_batch_params = self.wait_for_new_batch_params().await?; + updates_manager = UpdatesManager::new( &l1_batch_params.context_mode, l1_batch_params.base_system_contracts.hashes(), + l1_batch_params.protocol_version, ); batch_executor = self .batch_executor_base .init_batch(l1_batch_params.clone()) .await; + + let version_changed = l1_batch_params.protocol_version != sealed_batch_protocol_version; + protocol_upgrade_tx = if version_changed { + self.io + .load_upgrade_tx(l1_batch_params.protocol_version) + .await + } else { + None + }; } } @@ -175,11 +226,14 @@ impl ZkSyncStateKeeper { Ok(params) } - async fn wait_for_new_miniblock_params(&mut self) -> Result { + async fn wait_for_new_miniblock_params( + &mut self, + prev_miniblock_timestamp: u64, + ) -> Result { let params = loop { if let Some(params) = self .io - .wait_for_new_miniblock_params(POLL_WAIT_DURATION) + .wait_for_new_miniblock_params(POLL_WAIT_DURATION, prev_miniblock_timestamp) .await { break params; @@ -198,27 +252,37 @@ impl ZkSyncStateKeeper { &mut self, batch_executor: &BatchExecutorHandle, updates_manager: &mut UpdatesManager, - txs_to_reexecute: Vec<(MiniblockNumber, Vec)>, + miniblocks_to_reexecute: Vec, ) -> Result<(), Canceled> { - let miniblocks_count = txs_to_reexecute.len(); - for (idx, (miniblock_number, txs)) in txs_to_reexecute.into_iter().enumerate() { + if miniblocks_to_reexecute.is_empty() { + return Ok(()); + } + + for (index, miniblock) in miniblocks_to_reexecute.into_iter().enumerate() { + // Push any non-first miniblock to updates manager. The first one was pushed when `updates_manager` was initialized. + if index > 0 { + updates_manager.push_miniblock(miniblock.timestamp); + } + + let miniblock_number = miniblock.number; vlog::info!( "Starting to reexecute transactions from sealed miniblock {}", miniblock_number ); - for tx in txs { + for tx in miniblock.txs { let result = batch_executor.execute_tx(tx.clone()).await; let TxExecutionResult::Success { - tx_result, - tx_metrics, - compressed_bytecodes, - .. - } = result else { - panic!( - "Re-executing stored tx failed. Tx: {:?}. Err: {:?}", - tx, - result.err() - ); + tx_result, + tx_metrics, + compressed_bytecodes, + .. + } = result + else { + panic!( + "Re-executing stored tx failed. Tx: {:?}. Err: {:?}", + tx, + result.err() + ); }; let ExecutionMetricsForCriteria { @@ -250,17 +314,14 @@ impl ZkSyncStateKeeper { block_execution_metrics = updates_manager.pending_execution_metrics() ); } - - if idx == miniblocks_count - 1 { - // We've processed all the miniblocks, and right now we're initializing the next *actual* miniblock. - let new_timestamp = self.wait_for_new_miniblock_params().await?; - updates_manager.push_miniblock(new_timestamp); - } else { - // For all the blocks except the last one we pass 0 as a timestamp, since we don't expect it to be used - // anywhere. Using an obviously wrong value would make bugs easier to spot. - updates_manager.push_miniblock(0); - } } + + // We've processed all the miniblocks, and right now we're initializing the next *actual* miniblock. + let new_timestamp = self + .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) + .await?; + updates_manager.push_miniblock(new_timestamp); + Ok(()) } @@ -268,7 +329,13 @@ impl ZkSyncStateKeeper { &mut self, batch_executor: &BatchExecutorHandle, updates_manager: &mut UpdatesManager, + protocol_upgrade_tx: Option, ) -> Result<(), Canceled> { + if let Some(protocol_upgrade_tx) = protocol_upgrade_tx { + self.process_upgrade_tx(batch_executor, updates_manager, protocol_upgrade_tx) + .await; + } + loop { self.check_if_cancelled()?; if self @@ -290,7 +357,9 @@ impl ZkSyncStateKeeper { ); self.io.seal_miniblock(updates_manager).await; - let new_timestamp = self.wait_for_new_miniblock_params().await?; + let new_timestamp = self + .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) + .await?; vlog::debug!( "Initialized new miniblock #{} (L1 batch #{}) with timestamp {}", self.io.current_miniblock_number(), @@ -301,11 +370,16 @@ impl ZkSyncStateKeeper { } let started_waiting = Instant::now(); + let Some(tx) = self.io.wait_for_next_tx(POLL_WAIT_DURATION).await else { - metrics::histogram!("server.state_keeper.waiting_for_tx", started_waiting.elapsed()); + metrics::histogram!( + "server.state_keeper.waiting_for_tx", + started_waiting.elapsed() + ); vlog::trace!("No new transactions. Waiting!"); continue; }; + metrics::histogram!( "server.state_keeper.waiting_for_tx", started_waiting.elapsed(), @@ -323,7 +397,8 @@ impl ZkSyncStateKeeper { tx_metrics, compressed_bytecodes, .. - } = exec_result else { + } = exec_result + else { unreachable!( "Tx inclusion seal resolution must be a result of a successful tx execution", ); @@ -361,6 +436,65 @@ impl ZkSyncStateKeeper { } } + async fn process_upgrade_tx( + &mut self, + batch_executor: &BatchExecutorHandle, + updates_manager: &mut UpdatesManager, + protocol_upgrade_tx: ProtocolUpgradeTx, + ) { + // Sanity check: protocol upgrade tx must be the first one in the batch. + assert_eq!(updates_manager.pending_executed_transactions_len(), 0); + + let tx: Transaction = protocol_upgrade_tx.into(); + let (seal_resolution, exec_result) = self + .process_one_tx(batch_executor, updates_manager, tx.clone()) + .await; + + match &seal_resolution { + SealResolution::NoSeal | SealResolution::IncludeAndSeal => { + let TxExecutionResult::Success { + tx_result, + tx_metrics, + compressed_bytecodes, + .. + } = exec_result else { + panic!( + "Tx inclusion seal resolution must be a result of a successful tx execution", + ); + }; + + // Despite success of upgrade transaction is not enforced by protocol, + // we panic here because failed upgrade tx is not intended in any case. + if tx_result.status != TxExecutionStatus::Success { + panic!("Failed upgrade tx {:?}", tx.hash()); + } + + let ExecutionMetricsForCriteria { + l1_gas: tx_l1_gas_this_tx, + execution_metrics: tx_execution_metrics, + .. + } = tx_metrics; + updates_manager.extend_from_executed_transaction( + tx, + *tx_result, + compressed_bytecodes, + tx_l1_gas_this_tx, + tx_execution_metrics, + ); + } + SealResolution::ExcludeAndSeal => { + unreachable!("First tx in batch cannot result into `ExcludeAndSeal`"); + } + SealResolution::Unexecutable(reason) => { + panic!( + "Upgrade transaction {:?} is unexecutable: {}", + tx.hash(), + reason + ); + } + }; + } + /// Executes one transaction in the batch executor, and then decides whether the batch should be sealed. /// Batch may be sealed because of one of the following reasons: /// 1. The VM entered an incorrect state (e.g. out of gas). In that case, we must revert the transaction and seal diff --git a/core/bin/zksync_core/src/state_keeper/mod.rs b/core/bin/zksync_core/src/state_keeper/mod.rs index e9fb5268f7b9..3d01aff3926d 100644 --- a/core/bin/zksync_core/src/state_keeper/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/mod.rs @@ -21,7 +21,10 @@ mod types; pub(crate) mod updates; pub use self::{ - batch_executor::MainBatchExecutorBuilder, keeper::ZkSyncStateKeeper, seal_criteria::SealManager, + batch_executor::{L1BatchExecutorBuilder, MainBatchExecutorBuilder, MultiVMConfig}, + io::common::set_missing_initial_writes_indices, + keeper::ZkSyncStateKeeper, + seal_criteria::SealManager, }; pub(crate) use self::{io::MiniblockSealer, mempool_actor::MempoolFetcher, types::MempoolGuard}; @@ -56,6 +59,7 @@ where state_keeper_config.max_allowed_l2_tx_gas_limit.into(), state_keeper_config.save_call_traces, state_keeper_config.validation_computational_gas_limit, + None, // MultiVM is not used on the main node, we always use the latest version. ); let io = MempoolIO::new( diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs index 394370c0199f..202f40670ad5 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs @@ -53,7 +53,7 @@ impl ConditionalSealer { block_data: &SealData, tx_data: &SealData, ) -> SealResolution { - vlog::debug!( + vlog::trace!( "Determining seal resolution for L1 batch #{l1_batch_number} with {tx_count} transactions \ and metrics {:?}", block_data.execution_metrics diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs index 75df352a5bb5..47985dc389bb 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs @@ -26,14 +26,14 @@ impl SealCriterion for GasCriterion { let block_bound = (config.max_single_tx_gas as f64 * config.close_block_at_gas_percentage).round() as u32; - if (tx_data.gas_count + new_block_gas_count()).has_greater_than(tx_bound) { + if (tx_data.gas_count + new_block_gas_count()).any_field_greater_than(tx_bound) { SealResolution::Unexecutable("Transaction requires too much gas".into()) } else if block_data .gas_count - .has_greater_than(config.max_single_tx_gas) + .any_field_greater_than(config.max_single_tx_gas) { SealResolution::ExcludeAndSeal - } else if block_data.gas_count.has_greater_than(block_bound) { + } else if block_data.gas_count.any_field_greater_than(block_bound) { SealResolution::IncludeAndSeal } else { SealResolution::NoSeal diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs index 0402c6e45162..a6f34f90325c 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs @@ -13,7 +13,6 @@ use std::fmt; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ block::BlockGasCount, fee::TransactionExecutionMetrics, @@ -158,18 +157,19 @@ impl SealManager { /// Creates a default pre-configured seal manager for the main node. pub(super) fn new(config: StateKeeperConfig) -> Self { let timeout_batch_sealer = Self::timeout_batch_sealer(config.block_commit_deadline_ms); - let code_hash_batch_sealer = Self::code_hash_batch_sealer(BaseSystemContractsHashes { - bootloader: config.bootloader_hash, - default_aa: config.default_aa_hash, - }); let timeout_miniblock_sealer = Self::timeout_miniblock_sealer(config.miniblock_commit_deadline_ms); + // Currently, it's assumed that timeout is the only criterion for miniblock sealing. + // If this doesn't hold and some miniblocks are sealed in less than 1 second, + // then state keeper will be blocked waiting for the miniblock timestamp to be changed. + let miniblock_sealers = vec![timeout_miniblock_sealer]; + let conditional_sealer = ConditionalSealer::new(config); Self::custom( Some(conditional_sealer), - vec![timeout_batch_sealer, code_hash_batch_sealer], - vec![timeout_miniblock_sealer], + vec![timeout_batch_sealer], + miniblock_sealers, ) } @@ -207,34 +207,13 @@ impl SealManager { }) } - /// Creates a sealer function that would seal the batch if the provided base system contract hashes are different - /// from ones in the updates manager. - pub(super) fn code_hash_batch_sealer( - base_system_contracts_hashes: BaseSystemContractsHashes, - ) -> Box { - const RULE_NAME: &str = "different_code_hashes"; - - Box::new(move |manager| { - // Verify code hashes - let should_seal_code_hashes = - base_system_contracts_hashes != manager.base_system_contract_hashes(); - - if should_seal_code_hashes { - metrics::increment_counter!("server.tx_aggregation.reason", "criterion" => RULE_NAME); - vlog::debug!( - "Decided to seal L1 batch using rule `{RULE_NAME}`; L1 batch code hashes: {:?}, \ - expected code hashes: {:?}", - base_system_contracts_hashes, - manager.base_system_contract_hashes() - ); - } - should_seal_code_hashes - }) - } - /// Creates a sealer function that would seal the miniblock because of the timeout. /// Will only trigger for the non-empty miniblocks. fn timeout_miniblock_sealer(miniblock_commit_deadline_ms: u64) -> Box { + if miniblock_commit_deadline_ms < 1000 { + panic!("`miniblock_commit_deadline_ms` should be at least 1000, because miniblocks must have different timestamps"); + } + Box::new(move |manager| { !manager.miniblock.executed_transactions.is_empty() && millis_since(manager.miniblock.timestamp) > miniblock_commit_deadline_ms diff --git a/core/bin/zksync_core/src/state_keeper/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/tests/mod.rs index 4bb747911901..8365d120682a 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/mod.rs @@ -16,8 +16,10 @@ use vm::{ use zksync_config::{configs::chain::StateKeeperConfig, constants::ZKPORTER_IS_AVAILABLE}; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_types::{ + aggregated_operations::AggregatedActionType, block::BlockGasCount, - commitment::{BlockMetaParameters, BlockMetadata}, + block::MiniblockReexecuteData, + commitment::{L1BatchMetaParameters, L1BatchMetadata}, fee::Fee, l2::L2Tx, transaction_request::PaymasterParams, @@ -25,8 +27,8 @@ use zksync_types::{ vm_trace::{VmExecutionTrace, VmTrace}, zk_evm::aux_structures::{LogQuery, Timestamp}, zk_evm::block_properties::BlockProperties, - Address, L2ChainId, MiniblockNumber, Nonce, StorageLogQuery, StorageLogQueryType, Transaction, - H256, U256, + Address, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, StorageLogQuery, + StorageLogQueryType, Transaction, H256, U256, }; use zksync_utils::h256_to_u256; @@ -34,9 +36,7 @@ use self::tester::{ bootloader_tip_out_of_gas, pending_batch_data, random_tx, rejected_exec, successful_exec, successful_exec_with_metrics, TestScenario, }; -use crate::gas_tracker::constants::{ - BLOCK_COMMIT_BASE_COST, BLOCK_EXECUTE_BASE_COST, BLOCK_PROVE_BASE_COST, -}; +use crate::gas_tracker::l1_batch_base_cost; use crate::state_keeper::{ keeper::POLL_WAIT_DURATION, seal_criteria::{ @@ -59,8 +59,8 @@ pub(super) fn default_block_properties() -> BlockProperties { } } -pub(super) fn create_block_metadata(number: u32) -> BlockMetadata { - BlockMetadata { +pub(super) fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { + L1BatchMetadata { root_hash: H256::from_low_u64_be(number.into()), rollup_last_leaf_index: u64::from(number) + 20, merkle_root_hash: H256::from_low_u64_be(number.into()), @@ -69,7 +69,7 @@ pub(super) fn create_block_metadata(number: u32) -> BlockMetadata { commitment: H256::from_low_u64_be(number.into()), l2_l1_messages_compressed: vec![], l2_l1_merkle_root: H256::from_low_u64_be(number.into()), - block_meta_params: BlockMetaParameters { + block_meta_params: L1BatchMetaParameters { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: BASE_SYSTEM_CONTRACTS.bootloader.hash, default_aa_code_hash: BASE_SYSTEM_CONTRACTS.default_aa.hash, @@ -121,7 +121,11 @@ pub(super) fn default_block_context() -> DerivedBlockContext { pub(super) fn create_updates_manager() -> UpdatesManager { let block_context = BlockContextMode::NewBlock(default_block_context(), 0.into()); - UpdatesManager::new(&block_context, BaseSystemContractsHashes::default()) + UpdatesManager::new( + &block_context, + BaseSystemContractsHashes::default(), + ProtocolVersionId::default(), + ) } pub(super) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L2Tx { @@ -299,11 +303,11 @@ async fn sealed_by_gas() { assert_eq!( updates.l1_batch.l1_gas_count, BlockGasCount { - commit: BLOCK_COMMIT_BASE_COST + 2, - prove: BLOCK_PROVE_BASE_COST, - execute: BLOCK_EXECUTE_BASE_COST, + commit: l1_batch_base_cost(AggregatedActionType::Commit) + 2, + prove: l1_batch_base_cost(AggregatedActionType::PublishProofOnchain), + execute: l1_batch_base_cost(AggregatedActionType::Execute), }, - "L1 gas used by a batch should consists of gas used by its txs + basic block gas cost" + "L1 gas used by a batch should consist of gas used by its txs + basic block gas cost" ); }) .run(sealer).await; @@ -497,44 +501,6 @@ async fn bootloader_tip_out_of_gas_flow() { .await; } -#[tokio::test] -async fn bootloader_config_has_been_updated() { - let sealer = SealManager::custom( - None, - vec![SealManager::code_hash_batch_sealer( - BaseSystemContractsHashes { - bootloader: Default::default(), - default_aa: Default::default(), - }, - )], - vec![Box::new(|_| false)], - ); - - let pending_batch = - pending_batch_data(vec![(MiniblockNumber(1), vec![random_tx(1), random_tx(2)])]); - - TestScenario::new() - .load_pending_batch(pending_batch) - .batch_sealed_with("Batch sealed with all 2 tx", |_, updates, _| { - assert_eq!( - updates.l1_batch.executed_transactions.len(), - 2, - "There should be 2 transactions in the batch" - ); - }) - .next_tx("Final tx of batch", random_tx(3), successful_exec()) - .miniblock_sealed("Miniblock with this tx sealed") - .batch_sealed_with("Batch sealed with all 1 tx", |_, updates, _| { - assert_eq!( - updates.l1_batch.executed_transactions.len(), - 1, - "There should be 1 transactions in the batch" - ); - }) - .run(sealer) - .await; -} - #[tokio::test] async fn pending_batch_is_applied() { let config = StateKeeperConfig { @@ -554,8 +520,16 @@ async fn pending_batch_is_applied() { ); let pending_batch = pending_batch_data(vec![ - (MiniblockNumber(1), vec![random_tx(1)]), - (MiniblockNumber(2), vec![random_tx(2)]), + MiniblockReexecuteData { + number: MiniblockNumber(1), + timestamp: 1, + txs: vec![random_tx(1)], + }, + MiniblockReexecuteData { + number: MiniblockNumber(2), + timestamp: 2, + txs: vec![random_tx(2)], + }, ]); // We configured state keeper to use different system contract hashes, so it must seal the pending batch immediately. @@ -644,7 +618,11 @@ async fn miniblock_timestamp_after_pending_batch() { })], ); - let pending_batch = pending_batch_data(vec![(MiniblockNumber(1), vec![random_tx(1)])]); + let pending_batch = pending_batch_data(vec![MiniblockReexecuteData { + number: MiniblockNumber(1), + timestamp: 1, + txs: vec![random_tx(1)], + }]); TestScenario::new() .load_pending_batch(pending_batch) @@ -733,3 +711,49 @@ async fn time_is_monotonic() { .run(sealer) .await; } + +#[tokio::test] +async fn protocol_upgrade() { + let config = StateKeeperConfig { + transaction_slots: 2, + ..Default::default() + }; + let conditional_sealer = Some(ConditionalSealer::with_sealers( + config, + vec![Box::new(SlotsCriterion)], + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], + ); + + TestScenario::new() + .next_tx("First tx", random_tx(1), successful_exec()) + .miniblock_sealed("Miniblock 1") + .increment_protocol_version("Increment protocol version") + .next_tx("Second tx", random_tx(2), successful_exec()) + .miniblock_sealed("Miniblock 2") + .batch_sealed_with("Batch 1", move |_, updates, _| { + assert_eq!( + updates.protocol_version(), + ProtocolVersionId::latest(), + "Should close batch with initial protocol version" + ) + }) + .next_tx("Third tx", random_tx(3), successful_exec()) + .miniblock_sealed_with("Miniblock 3", move |updates| { + assert_eq!( + updates.protocol_version(), + ProtocolVersionId::next(), + "Should open batch with current protocol version" + ) + }) + .next_tx("Fourth tx", random_tx(4), successful_exec()) + .miniblock_sealed("Miniblock 4") + .batch_sealed("Batch 2") + .run(sealer) + .await; +} diff --git a/core/bin/zksync_core/src/state_keeper/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/tests/tester.rs index b3d60438b2f4..44b793bab345 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/tester.rs @@ -3,6 +3,7 @@ use tokio::sync::{mpsc, watch}; use std::{ collections::{HashMap, HashSet, VecDeque}, + convert::TryInto, sync::{Arc, RwLock}, time::{Duration, Instant}, }; @@ -13,8 +14,9 @@ use vm::{ VmBlockResult, }; use zksync_types::{ - tx::tx_execution_info::TxExecutionStatus, Address, L1BatchNumber, MiniblockNumber, Transaction, - H256, U256, + block::MiniblockReexecuteData, protocol_version::ProtocolUpgradeTx, + tx::tx_execution_info::TxExecutionStatus, Address, L1BatchNumber, MiniblockNumber, + ProtocolVersionId, Transaction, H256, U256, }; use crate::state_keeper::{ @@ -73,6 +75,13 @@ impl TestScenario { self } + /// Increments protocol version returned by IO. + pub(crate) fn increment_protocol_version(mut self, description: &'static str) -> Self { + self.actions + .push_back(ScenarioItem::IncrementProtocolVersion(description)); + self + } + /// Expect the state keeper to request a transaction from IO. /// Adds both a transaction and an outcome of this transaction (that would be returned to the state keeper from the /// batch executor). @@ -267,7 +276,7 @@ pub(crate) fn bootloader_tip_out_of_gas() -> TxExecutionResult { /// Creates a mock `PendingBatchData` object containing the provided sequence of miniblocks. pub(crate) fn pending_batch_data( - txs: Vec<(MiniblockNumber, Vec)>, + pending_miniblocks: Vec, ) -> PendingBatchData { let block_properties = default_block_properties(); @@ -287,15 +296,21 @@ pub(crate) fn pending_batch_data( context_mode: BlockContextMode::NewBlock(derived_context, Default::default()), properties: block_properties, base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), + protocol_version: ProtocolVersionId::latest(), }; - PendingBatchData { params, txs } + PendingBatchData { + params, + pending_miniblocks, + } } #[allow(clippy::type_complexity, clippy::large_enum_variant)] // It's OK for tests. enum ScenarioItem { /// Configures scenario to repeatedly return `None` to tx requests until the next action from the scenario happens. NoTxsUntilNextAction(&'static str), + /// Increments protocol version in IO state. + IncrementProtocolVersion(&'static str), Tx(&'static str, Transaction, TxExecutionResult), Rollback(&'static str, Transaction), Reject(&'static str, Transaction, Option), @@ -315,6 +330,10 @@ impl std::fmt::Debug for ScenarioItem { Self::NoTxsUntilNextAction(descr) => { f.debug_tuple("NoTxsUntilNextAction").field(descr).finish() } + Self::IncrementProtocolVersion(descr) => f + .debug_tuple("IncrementProtocolVersion") + .field(descr) + .finish(), Self::Tx(descr, tx, result) => f .debug_tuple("Tx") .field(descr) @@ -356,7 +375,11 @@ impl TestBatchExecutorBuilder { // Insert data about the pending batch, if it exists. // All the txs from the pending batch must succeed. if let Some(pending_batch) = &scenario.pending_batch { - for tx in pending_batch.txs.iter().flat_map(|(_, txs)| txs) { + for tx in pending_batch + .pending_miniblocks + .iter() + .flat_map(|miniblock| &miniblock.txs) + { batch_txs.insert(tx.hash(), vec![successful_exec()].into()); } } @@ -500,6 +523,8 @@ pub(crate) struct TestIO { /// Internal flag that is being set if scenario was configured to return `None` to all the transaction /// requests until some other action happens. skipping_txs: bool, + protocol_version: ProtocolVersionId, + previous_batch_protocol_version: ProtocolVersionId, } impl TestIO { @@ -514,6 +539,8 @@ impl TestIO { fee_account: FEE_ACCOUNT, scenario, skipping_txs: false, + protocol_version: ProtocolVersionId::latest(), + previous_batch_protocol_version: ProtocolVersionId::latest(), } } @@ -532,6 +559,14 @@ impl TestIO { return self.pop_next_item(request); } + if matches!(action, ScenarioItem::IncrementProtocolVersion(_)) { + self.protocol_version = (self.protocol_version as u16 + 1) + .try_into() + .expect("Cannot increment latest version"); + // This is a mock item, so pop an actual one for the IO to process. + return self.pop_next_item(request); + } + // If that was a last action, tell the state keeper to stop after that. if self.scenario.actions.is_empty() { self.stop_sender.send(true).unwrap(); @@ -574,10 +609,15 @@ impl StateKeeperIO for TestIO { context_mode: BlockContextMode::NewBlock(derived_context, previous_block_hash), properties: block_properties, base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), + protocol_version: self.protocol_version, }) } - async fn wait_for_new_miniblock_params(&mut self, _max_wait: Duration) -> Option { + async fn wait_for_new_miniblock_params( + &mut self, + _max_wait: Duration, + _prev_miniblock_timestamp: u64, + ) -> Option { Some(self.timestamp) } @@ -658,7 +698,19 @@ impl StateKeeperIO for TestIO { self.miniblock_number += 1; // Seal the fictive miniblock. self.batch_number += 1; + self.previous_batch_protocol_version = self.protocol_version; self.timestamp += 1; self.skipping_txs = false; } + + async fn load_previous_batch_version_id(&mut self) -> Option { + Some(self.previous_batch_protocol_version) + } + + async fn load_upgrade_tx( + &mut self, + _version_id: ProtocolVersionId, + ) -> Option { + None + } } diff --git a/core/bin/zksync_core/src/state_keeper/updates/mod.rs b/core/bin/zksync_core/src/state_keeper/updates/mod.rs index a79d328ba4c1..b537e7eea9c1 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/mod.rs @@ -6,7 +6,7 @@ use zksync_types::{ block::BlockGasCount, storage_writes_deduplicator::StorageWritesDeduplicator, tx::tx_execution_info::{ExecutionMetrics, VmExecutionLogs}, - Address, L1BatchNumber, MiniblockNumber, Transaction, + Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, }; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -28,6 +28,7 @@ pub struct UpdatesManager { fair_l2_gas_price: u64, base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, + protocol_version: ProtocolVersionId, pub l1_batch: L1BatchUpdates, pub miniblock: MiniblockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, @@ -37,6 +38,7 @@ impl UpdatesManager { pub(crate) fn new( block_context: &BlockContextMode, base_system_contract_hashes: BaseSystemContractsHashes, + protocol_version: ProtocolVersionId, ) -> Self { let batch_timestamp = block_context.timestamp(); let context = block_context.inner_block_context().context; @@ -45,6 +47,7 @@ impl UpdatesManager { l1_gas_price: context.l1_gas_price, fair_l2_gas_price: context.fair_l2_gas_price, base_fee_per_gas: block_context.inner_block_context().base_fee, + protocol_version, base_system_contract_hashes, l1_batch: L1BatchUpdates::new(), miniblock: MiniblockUpdates::new(batch_timestamp), @@ -83,10 +86,15 @@ impl UpdatesManager { fair_l2_gas_price: self.fair_l2_gas_price, base_fee_per_gas: self.base_fee_per_gas, base_system_contracts_hashes: self.base_system_contract_hashes, + protocol_version: self.protocol_version, l2_erc20_bridge_addr, } } + pub(crate) fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } + pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, @@ -151,6 +159,7 @@ pub(crate) struct MiniblockSealCommand { pub fair_l2_gas_price: u64, pub base_fee_per_gas: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, + pub protocol_version: ProtocolVersionId, pub l2_erc20_bridge_addr: Address, } diff --git a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs index f77b1b133282..c6488d98daae 100644 --- a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs +++ b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs @@ -5,8 +5,8 @@ use tokio::sync::watch::Receiver; use zksync_dal::ConnectionPool; use zksync_types::{ - aggregated_operations::AggregatedActionType, explorer_api::BlockDetails, L1BatchNumber, - MiniblockNumber, H256, + aggregated_operations::AggregatedActionType, api::BlockDetails, L1BatchNumber, MiniblockNumber, + H256, }; use zksync_web3_decl::{ @@ -67,17 +67,17 @@ impl BatchStatusUpdater { let mut storage = pool.access_storage_tagged("sync_layer").await; let last_executed_l1_batch = storage .blocks_dal() - .get_number_of_last_block_executed_on_eth() + .get_number_of_last_l1_batch_executed_on_eth() .await .unwrap_or_default(); let last_proven_l1_batch = storage .blocks_dal() - .get_number_of_last_block_proven_on_eth() + .get_number_of_last_l1_batch_proven_on_eth() .await .unwrap_or_default(); let last_committed_l1_batch = storage .blocks_dal() - .get_number_of_last_block_committed_on_eth() + .get_number_of_last_l1_batch_committed_on_eth() .await .unwrap_or_default(); drop(storage); @@ -128,7 +128,7 @@ impl BatchStatusUpdater { .access_storage_tagged("sync_layer") .await .blocks_dal() - .get_newest_block_header() + .get_newest_l1_batch_header() .await .number; @@ -193,13 +193,13 @@ impl BatchStatusUpdater { Self::update_executed_batch(status_changes, &batch_info, &mut last_executed_l1_batch); // Check whether we can skip a part of the range. - if batch_info.commit_tx_hash.is_none() { + if batch_info.base.commit_tx_hash.is_none() { // No committed batches after this one. break; - } else if batch_info.prove_tx_hash.is_none() && batch < last_committed_l1_batch { + } else if batch_info.base.prove_tx_hash.is_none() && batch < last_committed_l1_batch { // The interval between this batch and the last committed one is not proven. batch = last_committed_l1_batch.next(); - } else if batch_info.executed_at.is_none() && batch < last_proven_l1_batch { + } else if batch_info.base.executed_at.is_none() && batch < last_proven_l1_batch { // The interval between this batch and the last proven one is not executed. batch = last_proven_l1_batch.next(); } else { @@ -216,17 +216,17 @@ impl BatchStatusUpdater { batch_info: &BlockDetails, last_committed_l1_batch: &mut L1BatchNumber, ) { - if batch_info.commit_tx_hash.is_some() + if batch_info.base.commit_tx_hash.is_some() && batch_info.l1_batch_number == last_committed_l1_batch.next() { assert!( - batch_info.committed_at.is_some(), + batch_info.base.committed_at.is_some(), "Malformed API response: batch is committed, but has no commit timestamp" ); status_changes.commit.push(BatchStatusChange { number: batch_info.l1_batch_number, - l1_tx_hash: batch_info.commit_tx_hash.unwrap(), - happened_at: batch_info.committed_at.unwrap(), + l1_tx_hash: batch_info.base.commit_tx_hash.unwrap(), + happened_at: batch_info.base.committed_at.unwrap(), }); vlog::info!("Batch {}: committed", batch_info.l1_batch_number); metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "committed"); @@ -239,17 +239,17 @@ impl BatchStatusUpdater { batch_info: &BlockDetails, last_proven_l1_batch: &mut L1BatchNumber, ) { - if batch_info.prove_tx_hash.is_some() + if batch_info.base.prove_tx_hash.is_some() && batch_info.l1_batch_number == last_proven_l1_batch.next() { assert!( - batch_info.proven_at.is_some(), + batch_info.base.proven_at.is_some(), "Malformed API response: batch is proven, but has no prove timestamp" ); status_changes.prove.push(BatchStatusChange { number: batch_info.l1_batch_number, - l1_tx_hash: batch_info.prove_tx_hash.unwrap(), - happened_at: batch_info.proven_at.unwrap(), + l1_tx_hash: batch_info.base.prove_tx_hash.unwrap(), + happened_at: batch_info.base.proven_at.unwrap(), }); vlog::info!("Batch {}: proven", batch_info.l1_batch_number); metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "proven"); @@ -262,17 +262,17 @@ impl BatchStatusUpdater { batch_info: &BlockDetails, last_executed_l1_batch: &mut L1BatchNumber, ) { - if batch_info.execute_tx_hash.is_some() + if batch_info.base.execute_tx_hash.is_some() && batch_info.l1_batch_number == last_executed_l1_batch.next() { assert!( - batch_info.executed_at.is_some(), + batch_info.base.executed_at.is_some(), "Malformed API response: batch is executed, but has no execute timestamp" ); status_changes.execute.push(BatchStatusChange { number: batch_info.l1_batch_number, - l1_tx_hash: batch_info.execute_tx_hash.unwrap(), - happened_at: batch_info.executed_at.unwrap(), + l1_tx_hash: batch_info.base.execute_tx_hash.unwrap(), + happened_at: batch_info.base.executed_at.unwrap(), }); vlog::info!("Batch {}: executed", batch_info.l1_batch_number); metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "executed"); @@ -303,7 +303,7 @@ impl BatchStatusUpdater { .eth_sender_dal() .insert_bogus_confirmed_eth_tx( change.number, - AggregatedActionType::CommitBlocks, + AggregatedActionType::Commit, change.l1_tx_hash, change.happened_at, ) @@ -321,7 +321,7 @@ impl BatchStatusUpdater { .eth_sender_dal() .insert_bogus_confirmed_eth_tx( change.number, - AggregatedActionType::PublishProofBlocksOnchain, + AggregatedActionType::PublishProofOnchain, change.l1_tx_hash, change.happened_at, ) @@ -340,7 +340,7 @@ impl BatchStatusUpdater { .eth_sender_dal() .insert_bogus_confirmed_eth_tx( change.number, - AggregatedActionType::ExecuteBlocks, + AggregatedActionType::Execute, change.l1_tx_hash, change.happened_at, ) diff --git a/core/bin/zksync_core/src/sync_layer/external_io.rs b/core/bin/zksync_core/src/sync_layer/external_io.rs index 321b8d0d93bf..01c04db706d3 100644 --- a/core/bin/zksync_core/src/sync_layer/external_io.rs +++ b/core/bin/zksync_core/src/sync_layer/external_io.rs @@ -6,8 +6,8 @@ use async_trait::async_trait; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::ConnectionPool; use zksync_types::{ - ethabi::Address, l1::L1Tx, l2::L2Tx, L1BatchNumber, L1BlockNumber, MiniblockNumber, - Transaction, H256, U256, + ethabi::Address, l1::L1Tx, l2::L2Tx, protocol_version::ProtocolUpgradeTx, L1BatchNumber, + L1BlockNumber, MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, }; use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; @@ -97,13 +97,13 @@ impl ExternalIO { l2_erc20_bridge_addr: Address, ) -> Self { let mut storage = pool.access_storage_tagged("sync_layer").await; - let last_sealed_block_header = storage.blocks_dal().get_newest_block_header().await; + let last_sealed_l1_batch_header = storage.blocks_dal().get_newest_l1_batch_header().await; let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await; drop(storage); vlog::info!( "Initialized the ExternalIO: current L1 batch number {}, current miniblock number {}", - last_sealed_block_header.number + 1, + last_sealed_l1_batch_header.number + 1, last_miniblock_number + 1, ); @@ -111,7 +111,7 @@ impl ExternalIO { Self { pool, - current_l1_batch_number: last_sealed_block_header.number + 1, + current_l1_batch_number: last_sealed_l1_batch_header.number + 1, current_miniblock_number: last_miniblock_number + 1, actions, sync_state, @@ -184,7 +184,7 @@ impl StateKeeperIO for ExternalIO { let fee_account = storage .blocks_dal() - .get_block_header(self.current_l1_batch_number - 1) + .get_l1_batch_header(self.current_l1_batch_number - 1) .await .unwrap_or_else(|| { panic!( @@ -211,6 +211,7 @@ impl StateKeeperIO for ExternalIO { default_aa, }, operator_address, + protocol_version, }) => { assert_eq!( number, self.current_l1_batch_number, @@ -233,6 +234,7 @@ impl StateKeeperIO for ExternalIO { l1_gas_price, l2_fair_gas_price, base_system_contracts, + protocol_version.unwrap_or_default(), )); } Some(other) => { @@ -246,7 +248,11 @@ impl StateKeeperIO for ExternalIO { None } - async fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option { + async fn wait_for_new_miniblock_params( + &mut self, + max_wait: Duration, + _prev_miniblock_timestamp: u64, + ) -> Option { // Wait for the next miniblock to appear in the queue. let actions = &self.actions; for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { @@ -293,7 +299,9 @@ impl StateKeeperIO for ExternalIO { // Whatever item it is, we don't have to poll anymore and may exit, thus double option use. match actions.peek_action() { Some(SyncAction::Tx(_)) => { - let SyncAction::Tx(tx) = actions.pop_action().unwrap() else { unreachable!() }; + let SyncAction::Tx(tx) = actions.pop_action().unwrap() else { + unreachable!() + }; return Some(*tx); } _ => { @@ -307,14 +315,15 @@ impl StateKeeperIO for ExternalIO { async fn rollback(&mut self, tx: Transaction) { // We are replaying the already sealed batches so no rollbacks are expected to occur. - panic!("Rollback requested: {:?}", tx); + panic!("Rollback requested. Transaction hash: {:?}", tx.hash()); } async fn reject(&mut self, tx: &Transaction, error: &str) { // We are replaying the already executed transactions so no rejections are expected to occur. panic!( - "Reject requested because of the following error: {}.\n Transaction is: {:?}", - error, tx + "Reject requested because of the following error: {}.\n Transaction hash is: {:?}", + error, + tx.hash() ); } @@ -410,4 +419,20 @@ impl StateKeeperIO for ExternalIO { self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. self.current_l1_batch_number += 1; } + + async fn load_previous_batch_version_id(&mut self) -> Option { + let mut storage = self.pool.access_storage().await; + storage + .blocks_dal() + .get_batch_protocol_version_id(self.current_l1_batch_number - 1) + .await + } + + async fn load_upgrade_tx( + &mut self, + _version_id: ProtocolVersionId, + ) -> Option { + // External node will fetch upgrade tx from the main node. + None + } } diff --git a/core/bin/zksync_core/src/sync_layer/fetcher.rs b/core/bin/zksync_core/src/sync_layer/fetcher.rs index 826aa45746c6..f55201409655 100644 --- a/core/bin/zksync_core/src/sync_layer/fetcher.rs +++ b/core/bin/zksync_core/src/sync_layer/fetcher.rs @@ -34,7 +34,7 @@ impl MainNodeFetcher { stop_receiver: Receiver, ) -> Self { let mut storage = pool.access_storage_tagged("sync_layer").await; - let last_sealed_block_header = storage.blocks_dal().get_newest_block_header().await; + let last_sealed_l1_batch_header = storage.blocks_dal().get_newest_l1_batch_header().await; let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await; // It's important to know whether we have opened a new batch already or just sealed the previous one. @@ -46,10 +46,10 @@ impl MainNodeFetcher { // Decide whether the next batch should be explicitly opened or not. let current_l1_batch = if was_new_batch_open { // No `OpenBatch` action needed. - last_sealed_block_header.number + 1 + last_sealed_l1_batch_header.number + 1 } else { // We need to open the next batch. - last_sealed_block_header.number + last_sealed_l1_batch_header.number }; let client = CachedMainNodeClient::build_client(main_node_url); @@ -133,13 +133,9 @@ impl MainNodeFetcher { let start = Instant::now(); let request_start = Instant::now(); - let Some(block) = self - .client - .sync_l2_block(self.current_miniblock) - .await? - else { - return Ok(false); - }; + let Some(block) = self.client.sync_l2_block(self.current_miniblock).await? else { + return Ok(false); + }; metrics::histogram!( "external_node.fetcher.requests", request_start.elapsed(), @@ -168,6 +164,7 @@ impl MainNodeFetcher { l2_fair_gas_price: block.l2_fair_gas_price, base_system_contracts_hashes: block.base_system_contracts_hashes, operator_address: block.operator_address, + protocol_version: None, }); metrics::gauge!("external_node.fetcher.l1_batch", block.l1_batch_number.0 as f64, "status" => "open"); self.current_l1_batch += 1; diff --git a/core/bin/zksync_core/src/sync_layer/genesis.rs b/core/bin/zksync_core/src/sync_layer/genesis.rs index 688b8973eb88..6843d12757ee 100644 --- a/core/bin/zksync_core/src/sync_layer/genesis.rs +++ b/core/bin/zksync_core/src/sync_layer/genesis.rs @@ -1,33 +1,121 @@ use crate::genesis::{ensure_genesis_state, GenesisParams}; +use anyhow::Context; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::StorageProcessor; -use zksync_types::{L1BatchNumber, L2ChainId, H256}; +use zksync_types::{ + api, block::DeployedContract, get_code_key, protocol_version::L1VerifierConfig, + system_contracts::get_system_smart_contracts, AccountTreeId, Address, L1BatchNumber, L2ChainId, + MiniblockNumber, ACCOUNT_CODE_STORAGE_ADDRESS, H256, U64, +}; +use zksync_utils::h256_to_u256; use zksync_web3_decl::{ jsonrpsee::{core::error::Error, http_client::HttpClientBuilder}, - namespaces::ZksNamespaceClient, + namespaces::{EnNamespaceClient, EthNamespaceClient, ZksNamespaceClient}, }; pub async fn perform_genesis_if_needed( storage: &mut StorageProcessor<'_>, zksync_chain_id: L2ChainId, - base_system_contracts_hashes: BaseSystemContractsHashes, main_node_url: String, -) { +) -> anyhow::Result<()> { let mut transaction = storage.start_transaction().await; - - let genesis_block_hash = ensure_genesis_state( - &mut transaction, - zksync_chain_id, - &GenesisParams::ExternalNode { - base_system_contracts_hashes, - main_node_url: main_node_url.clone(), - }, - ) - .await; + // We want to check whether the genesis is needed before we create genesis params to not + // make the node startup slower. + let genesis_block_hash = if transaction.blocks_dal().is_genesis_needed().await { + let genesis_params = create_genesis_params(&main_node_url).await?; + let genesis_block_hash = + ensure_genesis_state(&mut transaction, zksync_chain_id, &genesis_params).await; + genesis_block_hash + } else { + transaction + .blocks_dal() + .get_l1_batch_state_root(L1BatchNumber(0)) + .await + .expect("genesis block hash is empty") + }; validate_genesis_state(&main_node_url, genesis_block_hash).await; transaction.commit().await; + + Ok(()) +} + +async fn create_genesis_params(main_node_url: &str) -> anyhow::Result { + let base_system_contracts_hashes = fetch_genesis_system_contracts(main_node_url) + .await + .context("Unable to fetch genesis system contracts hashes")?; + + // Load the list of addresses that are known to contain system contracts at any point in time. + // Not every of these addresses is guaranteed to be present in the genesis state, but we'll iterate through + // them and try to fetch the contract bytecode for each of them. + let system_contract_addresses: Vec<_> = get_system_smart_contracts() + .into_iter() + .map(|contract| *contract.account_id.address()) + .collect(); + + // These have to be *initial* base contract hashes of main node + // (those that were used during genesis), not necessarily the current ones. + let base_system_contracts = + fetch_base_system_contracts(main_node_url, base_system_contracts_hashes) + .await + .context("Failed to fetch base system contracts from main node")?; + + let client = HttpClientBuilder::default().build(main_node_url).unwrap(); + let first_validator = client + .get_block_details(MiniblockNumber(0)) + .await + .context("Unable to fetch genesis block details")? + .context("Failed to fetch genesis miniblock")? + .operator_address; + + // In EN, we don't know what were the system contracts at the genesis state. + // We know the list of addresses where these contracts *may have been* deployed. + // So, to collect the list of system contracts, we compute the corresponding storage slots and request + // the state at genesis block to fetch the hash of the corresponding contract. + // Then, we can fetch the factory dependency bytecode to fully recover the contract. + let mut system_contracts: Vec = + Vec::with_capacity(system_contract_addresses.len()); + const GENESIS_BLOCK: api::BlockIdVariant = + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(U64([0]))); + for system_contract_address in system_contract_addresses { + let code_key = get_code_key(&system_contract_address); + let code_hash = client + .get_storage_at( + ACCOUNT_CODE_STORAGE_ADDRESS, + h256_to_u256(*code_key.key()), + Some(GENESIS_BLOCK), + ) + .await + .context("Unable to query storage at genesis state")?; + let Some(bytecode) = client + .get_bytecode_by_hash(code_hash) + .await + .context("Unable to query system contract bytecode")? + else { + // It's OK for some of contracts to be absent. + // If this is a bug, the genesis root hash won't match. + vlog::debug!("System contract with address {system_contract_address:?} is absent at genesis state"); + continue; + }; + let contract = DeployedContract::new(AccountTreeId::new(system_contract_address), bytecode); + system_contracts.push(contract); + } + assert!( + !system_contracts.is_empty(), + "No system contracts were fetched: this is a bug" + ); + + // Use default L1 verifier config and verifier address for genesis as they are not used by EN. + let first_l1_verifier_config = L1VerifierConfig::default(); + let first_verifier_address = Address::default(); + Ok(GenesisParams { + base_system_contracts, + system_contracts, + first_validator, + first_l1_verifier_config, + first_verifier_address, + }) } // When running an external node, we want to make sure we have the same @@ -40,7 +128,10 @@ async fn validate_genesis_state(main_node_url: &str, root_hash: H256) { .expect("couldn't get genesis block from the main node") .expect("main node did not return a genesis block"); - let genesis_block_hash = genesis_block.root_hash.expect("empty genesis block hash"); + let genesis_block_hash = genesis_block + .base + .root_hash + .expect("empty genesis block hash"); if genesis_block_hash != root_hash { panic!( @@ -50,6 +141,18 @@ async fn validate_genesis_state(main_node_url: &str, root_hash: H256) { } } +async fn fetch_genesis_system_contracts( + main_node_url: &str, +) -> Result { + let client = HttpClientBuilder::default().build(main_node_url).unwrap(); + let hashes = client + .sync_l2_block(zksync_types::MiniblockNumber(0), false) + .await? + .expect("No genesis block on the main node") + .base_system_contracts_hashes; + Ok(hashes) +} + pub async fn fetch_system_contract_by_hash( main_node_url: &str, hash: H256, diff --git a/core/bin/zksync_core/src/sync_layer/sync_action.rs b/core/bin/zksync_core/src/sync_layer/sync_action.rs index d11c14fba6d8..24ccecc7bdfe 100644 --- a/core/bin/zksync_core/src/sync_layer/sync_action.rs +++ b/core/bin/zksync_core/src/sync_layer/sync_action.rs @@ -5,7 +5,7 @@ use std::{ }; use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction}; /// Action queue is used to communicate between the fetcher and the rest of the external node /// by collecting the fetched data in memory until it gets processed by the different entities. @@ -130,6 +130,7 @@ pub(crate) enum SyncAction { l2_fair_gas_price: u64, base_system_contracts_hashes: BaseSystemContractsHashes, operator_address: Address, + protocol_version: Option, }, Miniblock { number: MiniblockNumber, @@ -165,6 +166,7 @@ mod tests { l2_fair_gas_price: 1, base_system_contracts_hashes: BaseSystemContractsHashes::default(), operator_address: Default::default(), + protocol_version: Some(ProtocolVersionId::latest()), } } @@ -262,7 +264,10 @@ mod tests { ]; for (idx, (sequence, expected_err)) in test_vector.into_iter().enumerate() { let Err(err) = ActionQueue::check_action_sequence(&sequence) else { - panic!("Invalid sequence passed the test. Sequence #{}, expected error: {}", idx, expected_err); + panic!( + "Invalid sequence passed the test. Sequence #{}, expected error: {}", + idx, expected_err + ); }; assert!( err.starts_with(expected_err), diff --git a/core/bin/zksync_core/src/sync_layer/sync_state.rs b/core/bin/zksync_core/src/sync_layer/sync_state.rs index 6cac8bd70b76..83027a446723 100644 --- a/core/bin/zksync_core/src/sync_layer/sync_state.rs +++ b/core/bin/zksync_core/src/sync_layer/sync_state.rs @@ -81,13 +81,10 @@ impl SyncState { if let (Some(main_node_block), Some(local_block)) = (inner.main_node_block, inner.local_block) { - let Some(block_diff) = main_node_block - .0 - .checked_sub(local_block.0) - else { - // We're ahead of the main node, this situation is handled by the reorg detector. - return (true, Some(0)); - }; + let Some(block_diff) = main_node_block.0.checked_sub(local_block.0) else { + // We're ahead of the main node, this situation is handled by the reorg detector. + return (true, Some(0)); + }; (block_diff <= SYNC_MINIBLOCK_DELTA, Some(block_diff)) } else { (false, None) diff --git a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs index 0c772221feb5..d2a814fa48db 100644 --- a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs +++ b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs @@ -28,7 +28,7 @@ use zksync_types::{ witness::oracle::VmWitnessOracle, SchedulerCircuitInstanceWitness, }, - Address, L1BatchNumber, U256, + Address, L1BatchNumber, ProtocolVersionId, U256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; @@ -61,6 +61,7 @@ pub struct BasicWitnessGeneratorJob { pub struct BasicWitnessGenerator { config: WitnessGeneratorConfig, object_store: Arc, + protocol_versions: Vec, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, } @@ -69,12 +70,14 @@ impl BasicWitnessGenerator { pub async fn new( config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory, + protocol_versions: Vec, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, ) -> Self { Self { config, object_store: store_factory.create_store().await.into(), + protocol_versions, connection_pool, prover_connection_pool, } @@ -156,6 +159,7 @@ impl JobProcessor for BasicWitnessGenerator { self.config.witness_generation_timeout(), self.config.max_attempts, last_l1_batch_to_process, + &self.protocol_versions, ) .await { @@ -261,7 +265,16 @@ async fn update_database( ) { let mut prover_connection = prover_connection_pool.access_storage().await; let mut transaction = prover_connection.start_transaction().await; - + let protocol_version = transaction + .witness_generator_dal() + .protocol_version_for_l1_batch(block_number) + .await + .unwrap_or_else(|| { + panic!( + "No system version exist for l1 batch {} for basic circuits", + block_number.0 + ) + }); transaction .witness_generator_dal() .create_aggregation_jobs( @@ -270,6 +283,7 @@ async fn update_database( &blob_urls.basic_circuits_inputs_url, blob_urls.circuit_types_and_urls.len(), &blob_urls.scheduler_witness_url, + protocol_version, ) .await; transaction @@ -278,6 +292,7 @@ async fn update_database( block_number, blob_urls.circuit_types_and_urls, AggregationRound::BasicCircuits, + protocol_version, ) .await; transaction @@ -338,31 +353,36 @@ async fn save_artifacts( pub async fn build_basic_circuits_witness_generator_input( connection_pool: ConnectionPool, witness_merkle_input: PrepareBasicCircuitsJob, - block_number: L1BatchNumber, + l1_batch_number: L1BatchNumber, ) -> BasicCircuitWitnessGeneratorInput { let mut connection = connection_pool.access_storage().await; let block_header = connection .blocks_dal() - .get_block_header(block_number) + .get_l1_batch_header(l1_batch_number) .await .unwrap(); - let previous_block_header = connection + let initial_heap_content = connection .blocks_dal() - .get_block_header(block_number - 1) + .get_initial_bootloader_heap(l1_batch_number) + .await + .unwrap(); + let (_, previous_block_timestamp) = connection + .blocks_dal() + .get_l1_batch_state_root_and_timestamp(l1_batch_number - 1) .await .unwrap(); let previous_block_hash = connection .blocks_dal() - .get_block_state_root(block_number - 1) + .get_l1_batch_state_root(l1_batch_number - 1) .await .expect("cannot generate witness before the root hash is computed"); BasicCircuitWitnessGeneratorInput { - block_number, - previous_block_timestamp: previous_block_header.timestamp, + block_number: l1_batch_number, + previous_block_timestamp, previous_block_hash, block_timestamp: block_header.timestamp, used_bytecodes_hashes: block_header.used_contract_hashes, - initial_heap_content: block_header.initial_bootloader_contents, + initial_heap_content, merkle_paths_input: witness_merkle_input, } } @@ -380,7 +400,7 @@ pub async fn generate_witness( let mut connection = connection_pool.access_storage().await; let header = connection .blocks_dal() - .get_block_header(input.block_number) + .get_l1_batch_header(input.block_number) .await .unwrap(); let bootloader_code_bytes = connection diff --git a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs index e41786353e41..62280926733d 100644 --- a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs @@ -16,7 +16,7 @@ use zksync_types::{ encodings::recursion_request::RecursionRequest, encodings::QueueSimulator, witness, witness::oracle::VmWitnessOracle, LeafAggregationOutputDataWitness, }, - L1BatchNumber, + L1BatchNumber, ProtocolVersionId, }; use zksync_verification_key_server::{ get_ordered_vks_for_basic_circuits, get_vks_for_basic_circuits, get_vks_for_commitment, @@ -48,6 +48,7 @@ pub struct LeafAggregationWitnessGeneratorJob { pub struct LeafAggregationWitnessGenerator { config: WitnessGeneratorConfig, object_store: Box, + protocol_versions: Vec, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, } @@ -56,12 +57,14 @@ impl LeafAggregationWitnessGenerator { pub async fn new( config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory, + protocol_versions: Vec, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, ) -> Self { Self { config, object_store: store_factory.create_store().await, + protocol_versions, connection_pool, prover_connection_pool, } @@ -100,6 +103,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { self.config.witness_generation_timeout(), self.config.max_attempts, last_l1_batch_to_process, + &self.protocol_versions, ) .await { @@ -245,12 +249,23 @@ async fn update_database( &blob_urls.aggregation_outputs_url, ) .await; + let system_version = transaction + .witness_generator_dal() + .protocol_version_for_l1_batch(block_number) + .await + .unwrap_or_else(|| { + panic!( + "No system version exist for l1 batch {} for leaf agg", + block_number.0 + ) + }); transaction .prover_dal() .insert_prover_jobs( block_number, blob_urls.circuit_types_and_urls, AggregationRound::LeafAggregation, + system_version, ) .await; transaction diff --git a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs index 0f7f67d982ca..ac9009a1ea8e 100644 --- a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs @@ -25,7 +25,7 @@ use zksync_types::{ }, NodeAggregationOutputDataWitness, }, - L1BatchNumber, + L1BatchNumber, ProtocolVersionId, }; use zksync_verification_key_server::{ get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, @@ -55,6 +55,7 @@ pub struct NodeAggregationWitnessGeneratorJob { pub struct NodeAggregationWitnessGenerator { config: WitnessGeneratorConfig, object_store: Box, + protocol_versions: Vec, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, } @@ -63,12 +64,14 @@ impl NodeAggregationWitnessGenerator { pub async fn new( config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory, + protocol_versions: Vec, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, ) -> Self { Self { config, object_store: store_factory.create_store().await, + protocol_versions, connection_pool, prover_connection_pool, } @@ -108,6 +111,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { self.config.witness_generation_timeout(), self.config.max_attempts, last_l1_batch_to_process, + &self.protocol_versions, ) .await { @@ -285,12 +289,23 @@ async fn update_database( .witness_generator_dal() .save_node_aggregation_artifacts(block_number, &blob_urls.node_aggregations_url) .await; + let protocol_version = transaction + .witness_generator_dal() + .protocol_version_for_l1_batch(block_number) + .await + .unwrap_or_else(|| { + panic!( + "No system version exist for l1 batch {} for node agg", + block_number.0 + ) + }); transaction .prover_dal() .insert_prover_jobs( block_number, blob_urls.circuit_types_and_urls, AggregationRound::NodeAggregation, + protocol_version, ) .await; transaction diff --git a/core/bin/zksync_core/src/witness_generator/scheduler.rs b/core/bin/zksync_core/src/witness_generator/scheduler.rs index 377c05ec4635..f4dce3d2c353 100644 --- a/core/bin/zksync_core/src/witness_generator/scheduler.rs +++ b/core/bin/zksync_core/src/witness_generator/scheduler.rs @@ -19,7 +19,7 @@ use zksync_types::{ sync_vm::scheduler::BlockApplicationWitness, witness::{self, oracle::VmWitnessOracle, recursive_aggregation::erase_vk_type}, }, - L1BatchNumber, + L1BatchNumber, ProtocolVersionId, }; use zksync_verification_key_server::{ get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, @@ -43,6 +43,7 @@ pub struct SchedulerWitnessGeneratorJob { pub struct SchedulerWitnessGenerator { config: WitnessGeneratorConfig, object_store: Box, + protocol_versions: Vec, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, } @@ -51,12 +52,14 @@ impl SchedulerWitnessGenerator { pub async fn new( config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory, + protocol_versions: Vec, connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, ) -> Self { Self { config, object_store: store_factory.create_store().await, + protocol_versions, connection_pool, prover_connection_pool, } @@ -96,13 +99,14 @@ impl JobProcessor for SchedulerWitnessGenerator { self.config.witness_generation_timeout(), self.config.max_attempts, last_l1_batch_to_process, + &self.protocol_versions, ) .await { Some(metadata) => { let prev_metadata = connection .blocks_dal() - .get_block_metadata(metadata.block_number - 1) + .get_l1_batch_metadata(metadata.block_number - 1) .await; let previous_aux_hash = prev_metadata .as_ref() @@ -257,7 +261,7 @@ pub async fn update_database( let mut connection = connection_pool.access_storage().await; let block = connection .blocks_dal() - .get_block_metadata(block_number) + .get_l1_batch_metadata(block_number) .await .expect("L1 batch should exist"); @@ -283,12 +287,23 @@ pub async fn update_database( let mut prover_connection = prover_connection_pool.access_storage().await; let mut transaction = prover_connection.start_transaction().await; + let protocol_version = transaction + .witness_generator_dal() + .protocol_version_for_l1_batch(block_number) + .await + .unwrap_or_else(|| { + panic!( + "No system version exist for l1 batch {} for node agg", + block_number.0 + ) + }); transaction .prover_dal() .insert_prover_jobs( block_number, circuit_types_and_urls, AggregationRound::Scheduler, + protocol_version, ) .await; diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index fcd4ecbdc8e8..29ef4032142d 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -10,5 +10,5 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -web3 = { version= "0.18.0", default-features = false, features = ["http-rustls-tls", "test", "signing"] } +web3 = { version= "0.19.0", default-features = false, features = ["http-rustls-tls", "test", "signing"] } serde = { version = "1.0", features = ["derive"] } diff --git a/core/lib/circuit_breaker/Cargo.toml b/core/lib/circuit_breaker/Cargo.toml index 17eb3a95add6..3b4ae3d3fef8 100644 --- a/core/lib/circuit_breaker/Cargo.toml +++ b/core/lib/circuit_breaker/Cargo.toml @@ -15,10 +15,7 @@ zksync_config = { path = "../config", version = "1.0" } zksync_contracts = { path = "../contracts", version = "1.0" } zksync_dal = { path = "../dal", version = "1.0" } zksync_eth_client = { path = "../eth_client", version = "1.0" } -zksync_utils = { path = "../utils", version = "1.0" } -zksync_verification_key_generator_and_server = { path = "../../bin/verification_key_generator_and_server", version = "1.0" } thiserror = "1.0" -serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" futures = { version = "0.3", features = ["compat"] } tokio = { version = "1", features = ["time"] } @@ -26,4 +23,6 @@ async-trait = "0.1" hex = "0.4" convert_case = "0.6.0" backon = "0.4.0" + +[dev-dependencies] assert_matches = "1.5.0" diff --git a/core/lib/circuit_breaker/src/facet_selectors.rs b/core/lib/circuit_breaker/src/facet_selectors.rs index eca1ff1cc81f..3fe2594e70a6 100644 --- a/core/lib/circuit_breaker/src/facet_selectors.rs +++ b/core/lib/circuit_breaker/src/facet_selectors.rs @@ -59,6 +59,7 @@ impl FacetSelectorsChecker { let contract = zksync_contracts::load_contract( format!("{0}/{1}.sol/{1}Facet.json", path_str, facet_name).as_str(), ); + // Filter out `getName` function. Because it's a part of the common interface and it could messed up the selectors let selectors = contract .functions .into_values() diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index 7baafeec914e..ce7fa30686d2 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -7,12 +7,10 @@ use tokio::sync::watch; use zksync_config::configs::chain::CircuitBreakerConfig; use crate::facet_selectors::MismatchedFacetSelectorsError; -use crate::vks::VerifierError; pub mod facet_selectors; pub mod l1_txs; pub mod utils; -pub mod vks; #[cfg(test)] mod tests; @@ -21,8 +19,6 @@ mod tests; pub enum CircuitBreakerError { #[error("System has failed L1 transaction")] FailedL1Transaction, - #[error("Verifier error: {0}")] - Verifier(VerifierError), #[error("Mismatched facet selectors: {0}")] MismatchedFacetSelectors(MismatchedFacetSelectorsError), } diff --git a/core/lib/circuit_breaker/src/tests/mod.rs b/core/lib/circuit_breaker/src/tests/mod.rs index 978406b6f2a9..6cd84c569aa5 100644 --- a/core/lib/circuit_breaker/src/tests/mod.rs +++ b/core/lib/circuit_breaker/src/tests/mod.rs @@ -248,28 +248,6 @@ impl BoundEthInterface for ETHDirectClientMock { } } -#[tokio::test] -async fn retries_for_contract_vk() { - let eth_client = ETHDirectClientMock::new(); - let result: Result = eth_client - .call_main_contract_function("facets", (), None, Default::default(), None) - .await; - - assert_matches!( - result, - Err(Error::EthereumGateway(web3::error::Error::Transport( - TransportError::Code(503), - ))) - ); - - let contracts = ContractsConfig::from_env(); - let config = get_test_circuit_breaker_config(); - let vks_checker = - crate::vks::VksChecker::new(&config, eth_client, contracts.diamond_proxy_addr); - - assert_matches!(vks_checker.get_vk_token_with_retries().await, Ok(_)); -} - #[tokio::test] async fn retries_for_facet_selectors() { let eth_client = ETHDirectClientMock::new(); diff --git a/core/lib/circuit_breaker/src/vks.rs b/core/lib/circuit_breaker/src/vks.rs deleted file mode 100644 index a69f7edf9011..000000000000 --- a/core/lib/circuit_breaker/src/vks.rs +++ /dev/null @@ -1,342 +0,0 @@ -use backon::{ConstantBuilder, Retryable}; -use serde::{Deserialize, Serialize}; -use std::{ - convert::TryInto, - fmt::Debug, - {env, str::FromStr}, -}; -use thiserror::Error; - -use zksync_config::configs::chain::CircuitBreakerConfig; -use zksync_contracts::zksync_contract; -use zksync_eth_client::{types::Error as EthClientError, EthInterface}; -use zksync_types::{ - ethabi::Token, - zkevm_test_harness::bellman::{ - bn256::{Fq, Fq2, Fr, G1Affine, G2Affine}, - CurveAffine, PrimeField, - }, - Address, H160, H256, -}; -use zksync_verification_key_server::get_vk_for_circuit_type; - -// local imports -use crate::{utils::unwrap_tuple, CircuitBreaker, CircuitBreakerError}; - -#[derive(Debug, Error)] -pub enum VerifierError { - #[error("Verifier address from the env var is different from the one in Diamond Proxy contract, from env: {address_from_env:?}, from contract: {address_from_contract:?}")] - VerifierAddressMismatch { - address_from_env: Address, - address_from_contract: Address, - }, - #[error("Server has different vks commitment from the one on L1 contract, server: {server_vks:?}, contract: {contract_vks:?}")] - VksCommitment { - server_vks: VksCommitment, - contract_vks: VksCommitment, - }, - #[error("Server has different Scheduler VK from the one on L1 contract, server: {server_vk}, contract: {contract_vk}")] - SchedulerVk { - server_vk: String, - contract_vk: String, - }, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct VksCommitment { - pub node: H256, - pub leaf: H256, - pub basic_circuits: H256, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct VerificationKey { - pub n: usize, - pub num_inputs: usize, - - pub gate_setup_commitments: Vec, - pub gate_selectors_commitments: Vec, - pub permutation_commitments: Vec, - - pub lookup_selector_commitment: Option, - pub lookup_tables_commitments: Vec, - pub lookup_table_type_commitment: Option, - - pub non_residues: Vec, - pub g2_elements: [G2Affine; 2], -} - -#[derive(Debug)] -pub struct VksChecker { - pub eth_client: E, - pub config: CircuitBreakerConfig, - pub main_contract: Address, -} - -impl VksChecker { - pub fn new(config: &CircuitBreakerConfig, eth_client: E, main_contract: H160) -> Self { - Self { - eth_client, - config: config.clone(), - main_contract, - } - } - - async fn check_verifier_address(&self) -> Result<(), CircuitBreakerError> { - let address_from_env = - Address::from_str(&env::var("CONTRACTS_VERIFIER_ADDR").unwrap()).unwrap(); - - let address_from_contract: Address = (|| async { - let result: Result = self - .eth_client - .call_contract_function( - "getVerifier", - (), - None, - Default::default(), - None, - self.main_contract, - zksync_contract(), - ) - .await; - result - }) - .retry( - &ConstantBuilder::default() - .with_max_times(self.config.http_req_max_retry_number) - .with_delay(self.config.http_req_retry_interval()), - ) - .await - .unwrap(); - - if address_from_env != address_from_contract { - return Err(CircuitBreakerError::Verifier( - VerifierError::VerifierAddressMismatch { - address_from_env, - address_from_contract, - }, - )); - } - Ok(()) - } - - async fn check_commitments(&self) -> Result<(), CircuitBreakerError> { - let verifier_params_token: Token = (|| async { - let result: Result = self - .eth_client - .call_contract_function( - "getVerifierParams", - (), - None, - Default::default(), - None, - self.main_contract, - zksync_contract(), - ) - .await; - result - }) - .retry( - &ConstantBuilder::default() - .with_max_times(self.config.http_req_max_retry_number) - .with_delay(self.config.http_req_retry_interval()), - ) - .await - .unwrap(); - - let vks_vec: Vec = unwrap_tuple(verifier_params_token) - .into_iter() - .map(|token| H256::from_slice(&token.into_fixed_bytes().unwrap())) - .collect(); - let contract_vks = VksCommitment { - node: vks_vec[0], - leaf: vks_vec[1], - basic_circuits: vks_vec[2], - }; - - let server_vks = VksCommitment { - node: H256::from_str(&env::var("CONTRACTS_VK_COMMITMENT_NODE").unwrap()).unwrap(), - leaf: H256::from_str(&env::var("CONTRACTS_VK_COMMITMENT_LEAF").unwrap()).unwrap(), - basic_circuits: H256::from_str( - &env::var("CONTRACTS_VK_COMMITMENT_BASIC_CIRCUITS").unwrap(), - ) - .unwrap(), - }; - - if contract_vks != server_vks { - return Err(CircuitBreakerError::Verifier( - VerifierError::VksCommitment { - contract_vks, - server_vks, - }, - )); - } - Ok(()) - } - - async fn check_scheduler_vk(&self) -> Result<(), CircuitBreakerError> { - let server_vk = get_vk_for_circuit_type(0); - let server_vk = VerificationKey { - n: server_vk.n, - num_inputs: server_vk.num_inputs, - gate_setup_commitments: server_vk.gate_setup_commitments, - gate_selectors_commitments: server_vk.gate_selectors_commitments, - permutation_commitments: server_vk.permutation_commitments, - lookup_selector_commitment: server_vk.lookup_selector_commitment, - lookup_tables_commitments: server_vk.lookup_tables_commitments, - lookup_table_type_commitment: server_vk.lookup_table_type_commitment, - non_residues: server_vk.non_residues, - g2_elements: server_vk.g2_elements, - }; - - let contract_vk = self.get_contract_vk().await; - - if server_vk != contract_vk { - return Err(CircuitBreakerError::Verifier(VerifierError::SchedulerVk { - server_vk: serde_json::to_string_pretty(&server_vk).unwrap(), - contract_vk: serde_json::to_string_pretty(&contract_vk).unwrap(), - })); - } - Ok(()) - } - - async fn get_contract_vk(&self) -> VerificationKey { - let vk_token = self.get_vk_token_with_retries().await.unwrap(); - - parse_vk_token(vk_token) - } - - pub(super) async fn get_vk_token_with_retries(&self) -> Result { - let verifier_contract_address = - Address::from_str(&env::var("CONTRACTS_VERIFIER_ADDR").unwrap()).unwrap(); - let verifier_contract_abi = zksync_contracts::verifier_contract(); - (|| async { - let result: Result = self - .eth_client - .call_contract_function( - "get_verification_key", - (), - None, - Default::default(), - None, - verifier_contract_address, - verifier_contract_abi.clone(), - ) - .await; - - result - }) - .retry( - &ConstantBuilder::default() - .with_max_times(self.config.http_req_max_retry_number) - .with_delay(self.config.http_req_retry_interval()), - ) - .await - } -} - -#[async_trait::async_trait] -impl CircuitBreaker for VksChecker { - async fn check(&self) -> Result<(), CircuitBreakerError> { - self.check_verifier_address().await?; - self.check_commitments().await?; - self.check_scheduler_vk().await?; - Ok(()) - } -} - -fn g1_affine_from_token(token: Token) -> G1Affine { - let tokens = unwrap_tuple(token); - G1Affine::from_xy_unchecked( - Fq::from_str(&tokens[0].clone().into_uint().unwrap().to_string()).unwrap(), - Fq::from_str(&tokens[1].clone().into_uint().unwrap().to_string()).unwrap(), - ) -} - -fn fr_from_token(token: Token) -> Fr { - let tokens = unwrap_tuple(token); - Fr::from_str(&tokens[0].clone().into_uint().unwrap().to_string()).unwrap() -} - -fn g2_affine_from_token(token: Token) -> G2Affine { - let tokens = unwrap_tuple(token); - let tokens0 = tokens[0].clone().into_fixed_array().unwrap(); - let tokens1 = tokens[1].clone().into_fixed_array().unwrap(); - G2Affine::from_xy_unchecked( - Fq2 { - c1: Fq::from_str(&tokens0[0].clone().into_uint().unwrap().to_string()).unwrap(), - c0: Fq::from_str(&tokens0[1].clone().into_uint().unwrap().to_string()).unwrap(), - }, - Fq2 { - c1: Fq::from_str(&tokens1[0].clone().into_uint().unwrap().to_string()).unwrap(), - c0: Fq::from_str(&tokens1[1].clone().into_uint().unwrap().to_string()).unwrap(), - }, - ) -} - -fn parse_vk_token(vk_token: Token) -> VerificationKey { - let tokens = unwrap_tuple(vk_token); - let n = tokens[0].clone().into_uint().unwrap().as_usize() - 1; - let num_inputs = tokens[1].clone().into_uint().unwrap().as_usize(); - let gate_selectors_commitments = tokens[3] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g1_affine_from_token) - .collect(); - let gate_setup_commitments = tokens[4] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g1_affine_from_token) - .collect(); - let permutation_commitments = tokens[5] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g1_affine_from_token) - .collect(); - let lookup_selector_commitment = g1_affine_from_token(tokens[6].clone()); - let lookup_tables_commitments = tokens[7] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g1_affine_from_token) - .collect(); - let lookup_table_type_commitment = g1_affine_from_token(tokens[8].clone()); - let non_residues = tokens[9] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(fr_from_token) - .collect(); - - let g2_elements = tokens[10] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g2_affine_from_token) - .collect::>(); - - VerificationKey { - n, - num_inputs, - - gate_setup_commitments, - gate_selectors_commitments, - permutation_commitments, - - lookup_selector_commitment: Some(lookup_selector_commitment), - lookup_tables_commitments, - lookup_table_type_commitment: Some(lookup_table_type_commitment), - - non_residues, - g2_elements: g2_elements.try_into().unwrap(), - } -} diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index 7770e34d0c60..d6e88c4e4c1e 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -20,4 +20,4 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" envy = "0.4" once_cell = "1.13.0" -bigdecimal = "0.2.0" +bigdecimal = "0.2.2" diff --git a/core/lib/config/src/configs/alerts.rs b/core/lib/config/src/configs/alerts.rs index d9f9d191a804..59b6d0ebddf6 100644 --- a/core/lib/config/src/configs/alerts.rs +++ b/core/lib/config/src/configs/alerts.rs @@ -20,7 +20,9 @@ impl AlertsConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::set_env; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> AlertsConfig { AlertsConfig { @@ -34,10 +36,12 @@ mod tests { #[test] fn test_from_env() { + let mut lock = MUTEX.lock(); let config = r#" - ALERTS_SPORADIC_CRYPTO_ERRORS_SUBSTRS=EventDestroyErr,Can't free memory of DeviceBuf,called `Result::unwrap()` on an `Err` value: PoisonError + ALERTS_SPORADIC_CRYPTO_ERRORS_SUBSTRS=EventDestroyErr,Can't free memory of DeviceBuf,called `Result::unwrap()` on an `Err` value: PoisonError "#; - set_env(config); + lock.set_env(config); + assert_eq!(AlertsConfig::from_env(), expected_config()); } } diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 9c40bb8acfda..271793ee52ad 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -14,7 +14,7 @@ pub struct ApiConfig { /// Configuration options for the Web3 JSON RPC servers. pub web3_json_rpc: Web3JsonRpcConfig, /// Configuration options for the REST servers. - pub explorer: ExplorerApiConfig, + pub contract_verification: ContractVerificationApiConfig, /// Configuration options for the Prometheus exporter. pub prometheus: PrometheusConfig, /// Configuration options for the Health check. @@ -25,7 +25,7 @@ impl ApiConfig { pub fn from_env() -> Self { Self { web3_json_rpc: Web3JsonRpcConfig::from_env(), - explorer: ExplorerApiConfig::from_env(), + contract_verification: ContractVerificationApiConfig::from_env(), prometheus: PrometheusConfig::from_env(), healthcheck: HealthCheckConfig::from_env(), } @@ -76,14 +76,25 @@ pub struct Web3JsonRpcConfig { /// This option can be tweaked down if the API server is running out of memory. /// If not set, the VM concurrency limit will be efficiently disabled. pub vm_concurrency_limit: Option, - /// Smart contract cache size in MBs + /// Smart contract cache size in MiBs. The default value is 128 MiB. pub factory_deps_cache_size_mb: Option, + /// Initial writes cache size in MiBs. The default value is 32 MiB. + pub initial_writes_cache_size_mb: Option, + /// Latest values cache size in MiBs. The default value is 128 MiB. If set to 0, the latest + /// values cache will be disabled. + pub latest_values_cache_size_mb: Option, /// Override value for the amount of threads used for HTTP RPC server. /// If not set, the value from `threads_per_server` is used. pub http_threads: Option, /// Override value for the amount of threads used for WebSocket RPC server. /// If not set, the value from `threads_per_server` is used. pub ws_threads: Option, + /// Limit for fee history block range. + pub fee_history_limit: Option, + /// Maximum number of requests in a single batch JSON RPC request. Default is 500. + pub max_batch_request_size: Option, + /// Maximum response body size in MiBs. Default is 10 MiB. + pub max_response_body_size_mb: Option, } impl Web3JsonRpcConfig { @@ -123,9 +134,26 @@ impl Web3JsonRpcConfig { self.account_pks.clone().unwrap_or_default() } - pub fn factory_deps_cache_size_mb(&self) -> usize { - // 128MB is the default smart contract code cache size. - self.factory_deps_cache_size_mb.unwrap_or(128) + pub fn vm_concurrency_limit(&self) -> usize { + // The default limit is large so that it does not create a bottleneck on its own. + // VM execution can still be limited by Tokio runtime parallelism and/or the number + // of DB connections in a pool. + self.vm_concurrency_limit.unwrap_or(2_048) + } + + /// Returns the size of factory dependencies cache in bytes. + pub fn factory_deps_cache_size(&self) -> usize { + self.factory_deps_cache_size_mb.unwrap_or(128) * super::BYTES_IN_MEGABYTE + } + + /// Returns the size of initial writes cache in bytes. + pub fn initial_writes_cache_size(&self) -> usize { + self.initial_writes_cache_size_mb.unwrap_or(32) * super::BYTES_IN_MEGABYTE + } + + /// Returns the size of latest values cache in bytes. + pub fn latest_values_cache_size(&self) -> usize { + self.latest_values_cache_size_mb.unwrap_or(128) * super::BYTES_IN_MEGABYTE } pub fn http_server_threads(&self) -> usize { @@ -135,6 +163,19 @@ impl Web3JsonRpcConfig { pub fn ws_server_threads(&self) -> usize { self.ws_threads.unwrap_or(self.threads_per_server) as usize } + + pub fn fee_history_limit(&self) -> u64 { + self.fee_history_limit.unwrap_or(1024) + } + + pub fn max_batch_request_size(&self) -> usize { + // The default limit is chosen to be reasonably permissive. + self.max_batch_request_size.unwrap_or(500) + } + + pub fn max_response_body_size(&self) -> usize { + self.max_response_body_size_mb.unwrap_or(10) * super::BYTES_IN_MEGABYTE + } } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -154,49 +195,33 @@ impl HealthCheckConfig { } #[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct ExplorerApiConfig { +pub struct ContractVerificationApiConfig { /// Port to which the REST server is listening. pub port: u16, /// URL to access REST server. pub url: String, - /// Interval between polling db for network stats (in ms). - pub network_stats_polling_interval: Option, - /// Max possible limit of entities to be requested once. - pub req_entities_limit: Option, - /// Max possible value of (offset + limit) in pagination endpoints. - pub offset_limit: Option, /// number of threads per server pub threads_per_server: u32, } -impl ExplorerApiConfig { +impl ContractVerificationApiConfig { pub fn bind_addr(&self) -> SocketAddr { SocketAddr::new("0.0.0.0".parse().unwrap(), self.port) } - pub fn network_stats_interval(&self) -> Duration { - Duration::from_millis(self.network_stats_polling_interval.unwrap_or(1000)) - } - - pub fn req_entities_limit(&self) -> usize { - self.req_entities_limit.unwrap_or(100) as usize - } - - pub fn offset_limit(&self) -> usize { - self.offset_limit.unwrap_or(10000) as usize - } - pub fn from_env() -> Self { - envy_load("explorer", "API_EXPLORER_") + envy_load("contract_verification", "API_CONTRACT_VERIFICATION_") } } #[cfg(test)] mod tests { - use super::*; - use crate::configs::test_utils::set_env; use std::net::IpAddr; - use std::str::FromStr; + + use super::*; + use crate::configs::test_utils::{hash, EnvMutex}; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ApiConfig { ApiConfig { @@ -214,14 +239,8 @@ mod tests { transactions_per_sec_limit: Some(1000), request_timeout: Some(10), account_pks: Some(vec![ - H256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - H256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000002", - ) - .unwrap(), + hash("0x0000000000000000000000000000000000000000000000000000000000000001"), + hash("0x0000000000000000000000000000000000000000000000000000000000000002"), ]), estimate_gas_scale_factor: 1.0f64, gas_price_scale_factor: 1.2, @@ -230,15 +249,17 @@ mod tests { vm_execution_cache_misses_limit: None, vm_concurrency_limit: Some(512), factory_deps_cache_size_mb: Some(128), + initial_writes_cache_size_mb: Some(32), + latest_values_cache_size_mb: Some(256), http_threads: Some(128), ws_threads: Some(256), + fee_history_limit: Some(100), + max_batch_request_size: Some(200), + max_response_body_size_mb: Some(10), }, - explorer: ExplorerApiConfig { + contract_verification: ContractVerificationApiConfig { port: 3070, url: "http://127.0.0.1:3070".into(), - network_stats_polling_interval: Some(1000), - req_entities_limit: Some(100), - offset_limit: Some(10000), threads_per_server: 128, }, prometheus: PrometheusConfig { @@ -252,40 +273,43 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -API_WEB3_JSON_RPC_HTTP_PORT="3050" -API_WEB3_JSON_RPC_HTTP_URL="http://127.0.0.1:3050" -API_WEB3_JSON_RPC_WS_PORT="3051" -API_WEB3_JSON_RPC_WS_URL="ws://127.0.0.1:3051" -API_WEB3_JSON_RPC_REQ_ENTITIES_LIMIT=10000 -API_WEB3_JSON_RPC_FILTERS_LIMIT=10000 -API_WEB3_JSON_RPC_SUBSCRIPTIONS_LIMIT=10000 -API_WEB3_JSON_RPC_PUBSUB_POLLING_INTERVAL=200 -API_WEB3_JSON_RPC_THREADS_PER_SERVER=128 -API_WEB3_JSON_RPC_MAX_NONCE_AHEAD=5 -API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 -API_WEB3_JSON_RPC_TRANSACTIONS_PER_SEC_LIMIT=1000 -API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 -API_WEB3_JSON_RPC_ACCOUNT_PKS=0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002 -API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 -API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 -API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 -API_WEB3_JSON_RPC_VM_CONCURRENCY_LIMIT=512 -API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128 -API_WEB3_JSON_RPC_HTTP_THREADS=128 -API_WEB3_JSON_RPC_WS_THREADS=256 -API_EXPLORER_PORT="3070" -API_EXPLORER_URL="http://127.0.0.1:3070" -API_EXPLORER_NETWORK_STATS_POLLING_INTERVAL="1000" -API_EXPLORER_REQ_ENTITIES_LIMIT=100 -API_EXPLORER_OFFSET_LIMIT=10000 -API_EXPLORER_THREADS_PER_SERVER=128 -API_PROMETHEUS_LISTENER_PORT="3312" -API_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" -API_PROMETHEUS_PUSH_INTERVAL_MS=100 -API_HEALTHCHECK_PORT=8081 + API_WEB3_JSON_RPC_HTTP_PORT="3050" + API_WEB3_JSON_RPC_HTTP_URL="http://127.0.0.1:3050" + API_WEB3_JSON_RPC_WS_PORT="3051" + API_WEB3_JSON_RPC_WS_URL="ws://127.0.0.1:3051" + API_WEB3_JSON_RPC_REQ_ENTITIES_LIMIT=10000 + API_WEB3_JSON_RPC_FILTERS_LIMIT=10000 + API_WEB3_JSON_RPC_SUBSCRIPTIONS_LIMIT=10000 + API_WEB3_JSON_RPC_PUBSUB_POLLING_INTERVAL=200 + API_WEB3_JSON_RPC_THREADS_PER_SERVER=128 + API_WEB3_JSON_RPC_MAX_NONCE_AHEAD=5 + API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 + API_WEB3_JSON_RPC_TRANSACTIONS_PER_SEC_LIMIT=1000 + API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 + API_WEB3_JSON_RPC_ACCOUNT_PKS=0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002 + API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 + API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 + API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 + API_WEB3_JSON_RPC_VM_CONCURRENCY_LIMIT=512 + API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128 + API_WEB3_JSON_RPC_INITIAL_WRITES_CACHE_SIZE_MB=32 + API_WEB3_JSON_RPC_LATEST_VALUES_CACHE_SIZE_MB=256 + API_WEB3_JSON_RPC_HTTP_THREADS=128 + API_WEB3_JSON_RPC_WS_THREADS=256 + API_WEB3_JSON_RPC_FEE_HISTORY_LIMIT=100 + API_WEB3_JSON_RPC_MAX_BATCH_REQUEST_SIZE=200 + API_CONTRACT_VERIFICATION_PORT="3070" + API_CONTRACT_VERIFICATION_URL="http://127.0.0.1:3070" + API_CONTRACT_VERIFICATION_THREADS_PER_SERVER=128 + API_WEB3_JSON_RPC_MAX_RESPONSE_BODY_SIZE_MB=10 + API_PROMETHEUS_LISTENER_PORT="3312" + API_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" + API_PROMETHEUS_PUSH_INTERVAL_MS=100 + API_HEALTHCHECK_PORT=8081 "#; - set_env(config); + lock.set_env(config); let actual = ApiConfig::from_env(); assert_eq!(actual, expected_config()); @@ -302,8 +326,8 @@ API_HEALTHCHECK_PORT=8081 Duration::from_millis(200) ); assert_eq!( - config.explorer.bind_addr(), - SocketAddr::new(bind_broadcast_addr, config.explorer.port) + config.contract_verification.bind_addr(), + SocketAddr::new(bind_broadcast_addr, config.contract_verification.port) ); } } diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index be23b69b6c02..ceeb2cf75d1a 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -182,7 +182,9 @@ impl MempoolConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::{addr, set_env}; + use crate::configs::test_utils::{addr, EnvMutex}; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ChainConfig { ChainConfig { @@ -232,40 +234,41 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -CHAIN_ETH_NETWORK="localhost" -CHAIN_ETH_ZKSYNC_NETWORK="localhost" -CHAIN_ETH_ZKSYNC_NETWORK_ID=270 -CHAIN_STATE_KEEPER_TRANSACTION_SLOTS="50" -CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR="0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" -CHAIN_STATE_KEEPER_MAX_SINGLE_TX_GAS="1000000" -CHAIN_STATE_KEEPER_MAX_ALLOWED_L2_TX_GAS_LIMIT="2000000000" -CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GEOMETRY_PERCENTAGE="0.5" -CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GAS_PERCENTAGE="0.8" -CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_ETH_PARAMS_PERCENTAGE="0.2" -CHAIN_STATE_KEEPER_REJECT_TX_AT_GEOMETRY_PERCENTAGE="0.3" -CHAIN_STATE_KEEPER_REJECT_TX_AT_ETH_PARAMS_PERCENTAGE="0.8" -CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE="0.5" -CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS="2500" -CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS="1000" -CHAIN_STATE_KEEPER_MINIBLOCK_SEAL_QUEUE_CAPACITY="10" -CHAIN_STATE_KEEPER_FAIR_L2_GAS_PRICE="250000000" -CHAIN_STATE_KEEPER_BOOTLOADER_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" -CHAIN_STATE_KEEPER_DEFAULT_AA_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" -CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT="10000000" -CHAIN_STATE_KEEPER_SAVE_CALL_TRACES="false" -CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL="100" -CHAIN_MEMPOOL_SYNC_INTERVAL_MS="10" -CHAIN_MEMPOOL_SYNC_BATCH_SIZE="1000" -CHAIN_MEMPOOL_STUCK_TX_TIMEOUT="10" -CHAIN_MEMPOOL_REMOVE_STUCK_TXS="true" -CHAIN_MEMPOOL_DELAY_INTERVAL="100" -CHAIN_MEMPOOL_CAPACITY="1000000" -CHAIN_CIRCUIT_BREAKER_SYNC_INTERVAL_MS="1000" -CHAIN_CIRCUIT_BREAKER_HTTP_REQ_MAX_RETRY_NUMBER="5" -CHAIN_CIRCUIT_BREAKER_HTTP_REQ_RETRY_INTERVAL_SEC="2" + CHAIN_ETH_NETWORK="localhost" + CHAIN_ETH_ZKSYNC_NETWORK="localhost" + CHAIN_ETH_ZKSYNC_NETWORK_ID=270 + CHAIN_STATE_KEEPER_TRANSACTION_SLOTS="50" + CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR="0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" + CHAIN_STATE_KEEPER_MAX_SINGLE_TX_GAS="1000000" + CHAIN_STATE_KEEPER_MAX_ALLOWED_L2_TX_GAS_LIMIT="2000000000" + CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GEOMETRY_PERCENTAGE="0.5" + CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GAS_PERCENTAGE="0.8" + CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_ETH_PARAMS_PERCENTAGE="0.2" + CHAIN_STATE_KEEPER_REJECT_TX_AT_GEOMETRY_PERCENTAGE="0.3" + CHAIN_STATE_KEEPER_REJECT_TX_AT_ETH_PARAMS_PERCENTAGE="0.8" + CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE="0.5" + CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS="2500" + CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS="1000" + CHAIN_STATE_KEEPER_MINIBLOCK_SEAL_QUEUE_CAPACITY="10" + CHAIN_STATE_KEEPER_FAIR_L2_GAS_PRICE="250000000" + CHAIN_STATE_KEEPER_BOOTLOADER_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" + CHAIN_STATE_KEEPER_DEFAULT_AA_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" + CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT="10000000" + CHAIN_STATE_KEEPER_SAVE_CALL_TRACES="false" + CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL="100" + CHAIN_MEMPOOL_SYNC_INTERVAL_MS="10" + CHAIN_MEMPOOL_SYNC_BATCH_SIZE="1000" + CHAIN_MEMPOOL_STUCK_TX_TIMEOUT="10" + CHAIN_MEMPOOL_REMOVE_STUCK_TXS="true" + CHAIN_MEMPOOL_DELAY_INTERVAL="100" + CHAIN_MEMPOOL_CAPACITY="1000000" + CHAIN_CIRCUIT_BREAKER_SYNC_INTERVAL_MS="1000" + CHAIN_CIRCUIT_BREAKER_HTTP_REQ_MAX_RETRY_NUMBER="5" + CHAIN_CIRCUIT_BREAKER_HTTP_REQ_RETRY_INTERVAL_SEC="2" "#; - set_env(config); + lock.set_env(config); let actual = ChainConfig::from_env(); assert_eq!(actual, expected_config()); diff --git a/core/lib/config/src/configs/circuit_synthesizer.rs b/core/lib/config/src/configs/circuit_synthesizer.rs index d0959304ebcf..d4599e3cb634 100644 --- a/core/lib/config/src/configs/circuit_synthesizer.rs +++ b/core/lib/config/src/configs/circuit_synthesizer.rs @@ -49,9 +49,10 @@ impl CircuitSynthesizerConfig { #[cfg(test)] mod tests { - use crate::configs::test_utils::set_env; - use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> CircuitSynthesizerConfig { CircuitSynthesizerConfig { @@ -69,18 +70,20 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" - CIRCUIT_SYNTHESIZER_GENERATION_TIMEOUT_IN_SECS=1000 - CIRCUIT_SYNTHESIZER_MAX_ATTEMPTS=2 - CIRCUIT_SYNTHESIZER_GPU_PROVER_QUEUE_TIMEOUT_IN_SECS=1000 - CIRCUIT_SYNTHESIZER_PROVER_INSTANCE_WAIT_TIMEOUT_IN_SECS=1000 - CIRCUIT_SYNTHESIZER_PROVER_INSTANCE_POLL_TIME_IN_MILLI_SECS=250 - CIRCUIT_SYNTHESIZER_PROMETHEUS_LISTENER_PORT=3314 - CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" - CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSH_INTERVAL_MS=100 - CIRCUIT_SYNTHESIZER_PROVER_GROUP_ID=0 + CIRCUIT_SYNTHESIZER_GENERATION_TIMEOUT_IN_SECS=1000 + CIRCUIT_SYNTHESIZER_MAX_ATTEMPTS=2 + CIRCUIT_SYNTHESIZER_GPU_PROVER_QUEUE_TIMEOUT_IN_SECS=1000 + CIRCUIT_SYNTHESIZER_PROVER_INSTANCE_WAIT_TIMEOUT_IN_SECS=1000 + CIRCUIT_SYNTHESIZER_PROVER_INSTANCE_POLL_TIME_IN_MILLI_SECS=250 + CIRCUIT_SYNTHESIZER_PROMETHEUS_LISTENER_PORT=3314 + CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" + CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSH_INTERVAL_MS=100 + CIRCUIT_SYNTHESIZER_PROVER_GROUP_ID=0 "#; - set_env(config); + lock.set_env(config); + let actual = CircuitSynthesizerConfig::from_env(); assert_eq!(actual, expected_config()); } diff --git a/core/lib/config/src/configs/contract_verifier.rs b/core/lib/config/src/configs/contract_verifier.rs index 4c42ae45cc46..145fdab040e8 100644 --- a/core/lib/config/src/configs/contract_verifier.rs +++ b/core/lib/config/src/configs/contract_verifier.rs @@ -32,7 +32,9 @@ impl ContractVerifierConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::set_env; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ContractVerifierConfig { ContractVerifierConfig { @@ -44,12 +46,13 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" CONTRACT_VERIFIER_COMPILATION_TIMEOUT=30 CONTRACT_VERIFIER_POLLING_INTERVAL=1000 CONTRACT_VERIFIER_PROMETHEUS_PORT=3314 "#; - set_env(config); + lock.set_env(config); let actual = ContractVerifierConfig::from_env(); assert_eq!(actual, expected_config()); diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index 5775953a65d4..f6530e18ee3f 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -26,6 +26,11 @@ pub struct ContractsConfig { pub l2_weth_bridge_addr: Option
, pub l1_allow_list_addr: Address, pub l2_testnet_paymaster_addr: Option
, + pub recursion_scheduler_level_vk_hash: H256, + pub recursion_node_level_vk_hash: H256, + pub recursion_leaf_level_vk_hash: H256, + pub recursion_circuits_set_vks_hash: H256, + pub l1_multicall3_addr: Address, } impl ContractsConfig { @@ -37,7 +42,9 @@ impl ContractsConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::{addr, hash, set_env}; + use crate::configs::test_utils::{addr, hash, EnvMutex}; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ContractsConfig { ContractsConfig { @@ -61,11 +68,25 @@ mod tests { l1_weth_bridge_proxy_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_weth_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_testnet_paymaster_addr: Some(addr("FC073319977e314F251EAE6ae6bE76B0B3BAeeCF")), + recursion_scheduler_level_vk_hash: hash( + "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8", + ), + recursion_node_level_vk_hash: hash( + "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8", + ), + recursion_leaf_level_vk_hash: hash( + "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210", + ), + recursion_circuits_set_vks_hash: hash( + "0x142a364ef2073132eaf07aa7f3d8495065be5b92a2dc14fda09b4216affed9c0", + ), + l1_multicall3_addr: addr("0xcA11bde05977b3631167028862bE2a173976CA11"), } } #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" CONTRACTS_MAILBOX_FACET_ADDR="0x0f6Fa881EF414Fc6E818180657c2d5CD7Ac6cCAd" CONTRACTS_EXECUTOR_FACET_ADDR="0x18B631537801963A964211C0E86645c1aBfbB2d3" @@ -85,8 +106,13 @@ CONTRACTS_L2_ERC20_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_WETH_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_TESTNET_PAYMASTER_ADDR="FC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +CONTRACTS_RECURSION_SCHEDULER_LEVEL_VK_HASH="0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8" +CONTRACTS_RECURSION_NODE_LEVEL_VK_HASH="0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8" +CONTRACTS_RECURSION_LEAF_LEVEL_VK_HASH="0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" +CONTRACTS_RECURSION_CIRCUITS_SET_VKS_HASH="0x142a364ef2073132eaf07aa7f3d8495065be5b92a2dc14fda09b4216affed9c0" +CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" "#; - set_env(config); + lock.set_env(config); let actual = ContractsConfig::from_env(); assert_eq!(actual, expected_config()); diff --git a/core/lib/config/src/configs/database.rs b/core/lib/config/src/configs/database.rs index 67be709d3b3e..088e88f2b64e 100644 --- a/core/lib/config/src/configs/database.rs +++ b/core/lib/config/src/configs/database.rs @@ -1,138 +1,214 @@ -use serde::Deserialize; - -use std::{env, str::FromStr, time::Duration}; +use serde::{Deserialize, Serialize}; + +use std::time::Duration; + +use super::envy_load; + +/// Mode of operation for the Merkle tree. +/// +/// The mode does not influence how tree data is stored; i.e., a mode can be switched on the fly. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum MerkleTreeMode { + /// In this mode, `MetadataCalculator` will compute witness inputs for all storage operations + /// and put them into the object store as provided by `store_factory` (e.g., GCS). + #[default] + Full, + /// In this mode, `MetadataCalculator` computes Merkle tree root hashes and some auxiliary information + /// for L1 batches, but not witness inputs. + Lightweight, +} -/// Database configuration. #[derive(Debug, Clone, PartialEq, Deserialize)] -pub struct DBConfig { - /// Path to the database data directory that serves state cache. - pub state_keeper_db_path: String, +pub struct MerkleTreeConfig { + /// Path to the RocksDB data directory for Merkle tree. + #[serde(default = "MerkleTreeConfig::default_path")] + pub path: String, /// Path to merkle tree backup directory. - pub merkle_tree_backup_path: String, - /// Fast SSD path. Used as a RocksDB dir for the Merkle tree. - pub new_merkle_tree_ssd_path: String, - /// Throttle interval for the Merkle tree in milliseconds. This interval will be applied after - /// each time the tree makes progress. - pub new_merkle_tree_throttle_ms: u64, - /// Number of backups to keep. - pub backup_count: usize, - /// Time interval between performing backups. - pub backup_interval_ms: u64, - /// Maximum number of blocks to be processed by the Merkle tree at a time. - pub max_block_batch: usize, + #[serde(default = "MerkleTreeConfig::default_backup_path")] + pub backup_path: String, + /// Operation mode for the Merkle tree. If not specified, the full mode will be used. + #[serde(default)] + pub mode: MerkleTreeMode, + /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, + /// but the effects vary wildly depending on the setup (e.g., the filesystem used). + #[serde(default = "MerkleTreeConfig::default_multi_get_chunk_size")] + pub multi_get_chunk_size: usize, + /// Capacity of the block cache for the Merkle tree RocksDB. Reasonable values range from ~100 MB to several GB. + /// The default value is 128 MB. + #[serde(default = "MerkleTreeConfig::default_block_cache_size_mb")] + pub block_cache_size_mb: usize, + /// Maximum number of L1 batches to be processed by the Merkle tree at a time. + #[serde(default = "MerkleTreeConfig::default_max_l1_batches_per_iter")] + pub max_l1_batches_per_iter: usize, } -impl Default for DBConfig { +impl Default for MerkleTreeConfig { fn default() -> Self { Self { - state_keeper_db_path: "./db/state_keeper".to_owned(), - merkle_tree_backup_path: "./db/backups".to_owned(), - new_merkle_tree_ssd_path: "./db/lightweight-new".to_owned(), - new_merkle_tree_throttle_ms: 0, - backup_count: 5, - backup_interval_ms: 60_000, - max_block_batch: 100, + path: Self::default_path(), + backup_path: Self::default_backup_path(), + mode: MerkleTreeMode::default(), + multi_get_chunk_size: Self::default_multi_get_chunk_size(), + block_cache_size_mb: Self::default_block_cache_size_mb(), + max_l1_batches_per_iter: Self::default_max_l1_batches_per_iter(), } } } -impl DBConfig { - pub fn from_env() -> Self { - let mut config = DBConfig::default(); - if let Ok(path) = env::var("DATABASE_STATE_KEEPER_DB_PATH") { - config.state_keeper_db_path = path; - } - if let Ok(path) = env::var("DATABASE_MERKLE_TREE_BACKUP_PATH") { - config.merkle_tree_backup_path = path; - } - if let Ok(path) = env::var("DATABASE_NEW_MERKLE_TREE_SSD_PATH") { - config.new_merkle_tree_ssd_path = path; - } - if let Some(interval) = Self::parse_env_var("DATABASE_NEW_MERKLE_TREE_THROTTLE_MS") { - config.new_merkle_tree_throttle_ms = interval; - } - if let Some(count) = Self::parse_env_var("DATABASE_BACKUP_COUNT") { - config.backup_count = count; - } - if let Some(interval) = Self::parse_env_var("DATABASE_BACKUP_INTERVAL_MS") { - config.backup_interval_ms = interval; - } - if let Some(size) = Self::parse_env_var("DATABASE_MAX_BLOCK_BATCH") { - config.max_block_batch = size; - } - config +impl MerkleTreeConfig { + fn default_path() -> String { + "./db/lightweight-new".to_owned() // named this way for legacy reasons } - fn parse_env_var(key: &str) -> Option { - let env_var = env::var(key).ok()?; - env_var.parse().ok() + fn default_backup_path() -> String { + "./db/backups".to_owned() } - /// Path to the database data directory that serves state cache. - pub fn state_keeper_db_path(&self) -> &str { - &self.state_keeper_db_path + const fn default_multi_get_chunk_size() -> usize { + 500 } - /// Path to the merkle tree backup directory. - pub fn merkle_tree_backup_path(&self) -> &str { - &self.merkle_tree_backup_path + const fn default_block_cache_size_mb() -> usize { + 128 } - /// Throttle interval for the Merkle tree. - pub fn new_merkle_tree_throttle_interval(&self) -> Duration { - Duration::from_millis(self.new_merkle_tree_throttle_ms) + const fn default_max_l1_batches_per_iter() -> usize { + 20 } - /// Number of backups to keep - pub fn backup_count(&self) -> usize { - self.backup_count + /// Returns the size of block cache size for Merkle tree in bytes. + pub fn block_cache_size(&self) -> usize { + self.block_cache_size_mb * super::BYTES_IN_MEGABYTE } +} - pub fn backup_interval(&self) -> Duration { - Duration::from_millis(self.backup_interval_ms) +/// Database configuration. +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct DBConfig { + /// Statement timeout in seconds for Postgres connections. Applies only to the replica + /// connection pool used by the API servers. + pub statement_timeout_sec: Option, + /// Path to the RocksDB data directory that serves state cache. + #[serde(default = "DBConfig::default_state_keeper_db_path")] + pub state_keeper_db_path: String, + /// Merkle tree configuration. + #[serde(skip)] + // ^ Filled in separately in `Self::from_env()`. We cannot use `serde(flatten)` because it + // doesn't work with 'envy`. + pub merkle_tree: MerkleTreeConfig, + /// Number of backups to keep. + #[serde(default = "DBConfig::default_backup_count")] + pub backup_count: usize, + /// Time interval between performing backups. + #[serde(default = "DBConfig::default_backup_interval_ms")] + pub backup_interval_ms: u64, +} + +impl DBConfig { + fn default_state_keeper_db_path() -> String { + "./db/state_keeper".to_owned() + } + + const fn default_backup_count() -> usize { + 5 + } + + const fn default_backup_interval_ms() -> u64 { + 60_000 + } + + pub fn from_env() -> Self { + Self { + merkle_tree: envy_load("database_merkle_tree", "DATABASE_MERKLE_TREE_"), + ..envy_load("database", "DATABASE_") + } } - pub fn max_block_batch(&self) -> usize { - self.max_block_batch + /// Returns the Postgres statement timeout. + pub fn statement_timeout(&self) -> Option { + self.statement_timeout_sec.map(Duration::from_secs) + } + + pub fn backup_interval(&self) -> Duration { + Duration::from_millis(self.backup_interval_ms) } } #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::set_env; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -DATABASE_STATE_KEEPER_DB_PATH="./db/state_keeper" -DATABASE_MERKLE_TREE_BACKUP_PATH="./db/backups" -DATABASE_NEW_MERKLE_TREE_SSD_PATH="./db/lightweight-new" -DATABASE_NEW_MERKLE_TREE_THROTTLE_MS=0 -DATABASE_BACKUP_COUNT=5 -DATABASE_BACKUP_INTERVAL_MS=60000 -DATABASE_MAX_BLOCK_BATCH=100 + DATABASE_STATE_KEEPER_DB_PATH="/db/state_keeper" + DATABASE_MERKLE_TREE_BACKUP_PATH="/db/backups" + DATABASE_MERKLE_TREE_PATH="/db/tree" + DATABASE_MERKLE_TREE_MODE=lightweight + DATABASE_MERKLE_TREE_MULTI_GET_CHUNK_SIZE=250 + DATABASE_MERKLE_TREE_MAX_L1_BATCHES_PER_ITER=50 + DATABASE_BACKUP_COUNT=5 + DATABASE_BACKUP_INTERVAL_MS=60000 "#; - set_env(config); - - let actual = DBConfig::from_env(); - assert_eq!(actual, DBConfig::default()); + lock.set_env(config); + + let db_config = DBConfig::from_env(); + assert_eq!(db_config.state_keeper_db_path, "/db/state_keeper"); + assert_eq!(db_config.merkle_tree.path, "/db/tree"); + assert_eq!(db_config.merkle_tree.backup_path, "/db/backups"); + assert_eq!(db_config.merkle_tree.mode, MerkleTreeMode::Lightweight); + assert_eq!(db_config.merkle_tree.multi_get_chunk_size, 250); + assert_eq!(db_config.merkle_tree.max_l1_batches_per_iter, 50); + assert_eq!(db_config.backup_count, 5); + assert_eq!(db_config.backup_interval().as_secs(), 60); } - /// Checks the correctness of the config helper methods. #[test] - fn methods() { - let db_config = DBConfig::default(); - - assert_eq!( - db_config.state_keeper_db_path(), - &db_config.state_keeper_db_path - ); - assert_eq!( - db_config.merkle_tree_backup_path(), - &db_config.merkle_tree_backup_path - ); - assert_eq!(db_config.backup_count(), db_config.backup_count); + fn from_empty_env() { + let mut lock = MUTEX.lock(); + lock.remove_env(&[ + "DATABASE_STATE_KEEPER_DB_PATH", + "DATABASE_MERKLE_TREE_BACKUP_PATH", + "DATABASE_MERKLE_TREE_PATH", + "DATABASE_MERKLE_TREE_MODE", + "DATABASE_MERKLE_TREE_MULTI_GET_CHUNK_SIZE", + "DATABASE_MERKLE_TREE_BLOCK_CACHE_SIZE_MB", + "DATABASE_MERKLE_TREE_MAX_L1_BATCHES_PER_ITER", + "DATABASE_BACKUP_COUNT", + "DATABASE_BACKUP_INTERVAL_MS", + ]); + + let db_config = DBConfig::from_env(); + assert_eq!(db_config.state_keeper_db_path, "./db/state_keeper"); + assert_eq!(db_config.merkle_tree.path, "./db/lightweight-new"); + assert_eq!(db_config.merkle_tree.backup_path, "./db/backups"); + assert_eq!(db_config.merkle_tree.mode, MerkleTreeMode::Full); + assert_eq!(db_config.merkle_tree.multi_get_chunk_size, 500); + assert_eq!(db_config.merkle_tree.max_l1_batches_per_iter, 20); + assert_eq!(db_config.merkle_tree.block_cache_size_mb, 128); + assert_eq!(db_config.backup_count, 5); assert_eq!(db_config.backup_interval().as_secs(), 60); + + // Check that new env variable for Merkle tree path is supported + lock.set_env("DATABASE_MERKLE_TREE_PATH=/db/tree/main"); + let db_config = DBConfig::from_env(); + assert_eq!(db_config.merkle_tree.path, "/db/tree/main"); + + lock.set_env("DATABASE_MERKLE_TREE_MULTI_GET_CHUNK_SIZE=200"); + let db_config = DBConfig::from_env(); + assert_eq!(db_config.merkle_tree.multi_get_chunk_size, 200); + + lock.set_env("DATABASE_MERKLE_TREE_BLOCK_CACHE_SIZE_MB=256"); + let db_config = DBConfig::from_env(); + assert_eq!(db_config.merkle_tree.block_cache_size_mb, 256); + + lock.set_env("DATABASE_MERKLE_TREE_MAX_L1_BATCHES_PER_ITER=50"); + let db_config = DBConfig::from_env(); + assert_eq!(db_config.merkle_tree.max_l1_batches_per_iter, 50); } } diff --git a/core/lib/config/src/configs/eth_client.rs b/core/lib/config/src/configs/eth_client.rs index 6e293eac26b5..46427613f89b 100644 --- a/core/lib/config/src/configs/eth_client.rs +++ b/core/lib/config/src/configs/eth_client.rs @@ -28,7 +28,9 @@ impl ETHClientConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::set_env; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ETHClientConfig { ETHClientConfig { @@ -39,11 +41,12 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -ETH_CLIENT_CHAIN_ID="9" -ETH_CLIENT_WEB3_URL="http://127.0.0.1:8545" + ETH_CLIENT_CHAIN_ID="9" + ETH_CLIENT_WEB3_URL="http://127.0.0.1:8545" "#; - set_env(config); + lock.set_env(config); let actual = ETHClientConfig::from_env(); assert_eq!(actual, expected_config()); diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 0cb316fb4621..d4651d60a04c 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -124,7 +124,9 @@ impl GasAdjusterConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::{hash, set_env}; + use crate::configs::test_utils::{hash, EnvMutex}; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ETHSenderConfig { ETHSenderConfig { @@ -162,34 +164,35 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -ETH_SENDER_SENDER_WAIT_CONFIRMATIONS="1" -ETH_SENDER_SENDER_TX_POLL_PERIOD="3" -ETH_SENDER_SENDER_AGGREGATE_TX_POLL_PERIOD="3" -ETH_SENDER_SENDER_MAX_TXS_IN_FLIGHT="3" -ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY="0x27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be" -ETH_SENDER_SENDER_PROOF_SENDING_MODE="SkipEveryProof" -ETH_SENDER_GAS_ADJUSTER_DEFAULT_PRIORITY_FEE_PER_GAS="20000000000" -ETH_SENDER_GAS_ADJUSTER_MAX_BASE_FEE_SAMPLES="10000" -ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_A="1.5" -ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_B="1.0005" -ETH_SENDER_GAS_ADJUSTER_INTERNAL_L1_PRICING_MULTIPLIER="0.8" -ETH_SENDER_GAS_ADJUSTER_POLL_PERIOD="15" -ETH_SENDER_GAS_ADJUSTER_MAX_L1_GAS_PRICE="100000000" -ETH_SENDER_WAIT_FOR_PROOFS="false" -ETH_SENDER_SENDER_AGGREGATED_PROOF_SIZES="1,5" -ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_COMMIT="3" -ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_EXECUTE="4" -ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE="30" -ETH_SENDER_SENDER_AGGREGATED_BLOCK_PROVE_DEADLINE="3000" -ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE="4000" -ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" -ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" -ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" -ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" -ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" + ETH_SENDER_SENDER_WAIT_CONFIRMATIONS="1" + ETH_SENDER_SENDER_TX_POLL_PERIOD="3" + ETH_SENDER_SENDER_AGGREGATE_TX_POLL_PERIOD="3" + ETH_SENDER_SENDER_MAX_TXS_IN_FLIGHT="3" + ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY="0x27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be" + ETH_SENDER_SENDER_PROOF_SENDING_MODE="SkipEveryProof" + ETH_SENDER_GAS_ADJUSTER_DEFAULT_PRIORITY_FEE_PER_GAS="20000000000" + ETH_SENDER_GAS_ADJUSTER_MAX_BASE_FEE_SAMPLES="10000" + ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_A="1.5" + ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_B="1.0005" + ETH_SENDER_GAS_ADJUSTER_INTERNAL_L1_PRICING_MULTIPLIER="0.8" + ETH_SENDER_GAS_ADJUSTER_POLL_PERIOD="15" + ETH_SENDER_GAS_ADJUSTER_MAX_L1_GAS_PRICE="100000000" + ETH_SENDER_WAIT_FOR_PROOFS="false" + ETH_SENDER_SENDER_AGGREGATED_PROOF_SIZES="1,5" + ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_COMMIT="3" + ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_EXECUTE="4" + ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE="30" + ETH_SENDER_SENDER_AGGREGATED_BLOCK_PROVE_DEADLINE="3000" + ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE="4000" + ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" + ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" + ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" + ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" + ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" "#; - set_env(config); + lock.set_env(config); let actual = ETHSenderConfig::from_env(); assert_eq!(actual, expected_config()); diff --git a/core/lib/config/src/configs/eth_watch.rs b/core/lib/config/src/configs/eth_watch.rs index 0ae24f672d0c..3ae617f1daa0 100644 --- a/core/lib/config/src/configs/eth_watch.rs +++ b/core/lib/config/src/configs/eth_watch.rs @@ -30,7 +30,9 @@ impl ETHWatchConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::set_env; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ETHWatchConfig { ETHWatchConfig { @@ -41,11 +43,12 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -ETH_WATCH_CONFIRMATIONS_FOR_ETH_EVENT="0" -ETH_WATCH_ETH_NODE_POLL_INTERVAL="300" + ETH_WATCH_CONFIRMATIONS_FOR_ETH_EVENT="0" + ETH_WATCH_ETH_NODE_POLL_INTERVAL="300" "#; - set_env(config); + lock.set_env(config); let actual = ETHWatchConfig::from_env(); assert_eq!(actual, expected_config()); @@ -55,7 +58,6 @@ ETH_WATCH_ETH_NODE_POLL_INTERVAL="300" #[test] fn methods() { let config = expected_config(); - assert_eq!( config.poll_interval(), Duration::from_millis(config.eth_node_poll_interval) diff --git a/core/lib/config/src/configs/fetcher.rs b/core/lib/config/src/configs/fetcher.rs index 98d6c859058b..5ed2612d944c 100644 --- a/core/lib/config/src/configs/fetcher.rs +++ b/core/lib/config/src/configs/fetcher.rs @@ -66,7 +66,9 @@ impl FetcherConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::set_env; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> FetcherConfig { FetcherConfig { @@ -90,18 +92,19 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -FETCHER_TOKEN_LIST_SOURCE="OneInch" -FETCHER_TOKEN_LIST_URL="http://127.0.0.1:1020" -FETCHER_TOKEN_LIST_FETCHING_INTERVAL="10" -FETCHER_TOKEN_PRICE_SOURCE="CoinGecko" -FETCHER_TOKEN_PRICE_URL="http://127.0.0.1:9876" -FETCHER_TOKEN_PRICE_FETCHING_INTERVAL="7" -FETCHER_TOKEN_TRADING_VOLUME_SOURCE="Uniswap" -FETCHER_TOKEN_TRADING_VOLUME_URL="http://127.0.0.1:9975/graphql" -FETCHER_TOKEN_TRADING_VOLUME_FETCHING_INTERVAL="5" + FETCHER_TOKEN_LIST_SOURCE="OneInch" + FETCHER_TOKEN_LIST_URL="http://127.0.0.1:1020" + FETCHER_TOKEN_LIST_FETCHING_INTERVAL="10" + FETCHER_TOKEN_PRICE_SOURCE="CoinGecko" + FETCHER_TOKEN_PRICE_URL="http://127.0.0.1:9876" + FETCHER_TOKEN_PRICE_FETCHING_INTERVAL="7" + FETCHER_TOKEN_TRADING_VOLUME_SOURCE="Uniswap" + FETCHER_TOKEN_TRADING_VOLUME_URL="http://127.0.0.1:9975/graphql" + FETCHER_TOKEN_TRADING_VOLUME_FETCHING_INTERVAL="5" "#; - set_env(config); + lock.set_env(config); let actual = FetcherConfig::from_env(); assert_eq!(actual, expected_config()); diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs index 057c1572516d..e996affacb86 100644 --- a/core/lib/config/src/configs/fri_prover.rs +++ b/core/lib/config/src/configs/fri_prover.rs @@ -19,6 +19,9 @@ pub struct FriProverConfig { pub recursive_layer_circuit_ids_to_be_verified: Vec, pub setup_load_mode: SetupLoadMode, pub specialized_group_id: u8, + pub witness_vector_generator_thread_count: Option, + pub queue_capacity: usize, + pub witness_vector_receiver_port: u16, } impl FriProverConfig { @@ -33,9 +36,10 @@ impl FriProverConfig { #[cfg(test)] mod tests { - use crate::configs::test_utils::set_env; - use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> FriProverConfig { FriProverConfig { @@ -47,22 +51,30 @@ mod tests { recursive_layer_circuit_ids_to_be_verified: vec![1, 2, 3], setup_load_mode: SetupLoadMode::FromDisk, specialized_group_id: 10, + witness_vector_generator_thread_count: Some(5), + queue_capacity: 10, + witness_vector_receiver_port: 3316, } } #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -FRI_PROVER_SETUP_DATA_PATH="/usr/src/setup-data" -FRI_PROVER_PROMETHEUS_PORT="3315" -FRI_PROVER_MAX_ATTEMPTS="10" -FRI_PROVER_GENERATION_TIMEOUT_IN_SECS="300" -FRI_PROVER_BASE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED="1,5" -FRI_PROVER_RECURSIVE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED="1,2,3" -FRI_PROVER_SETUP_LOAD_MODE="FromDisk" -FRI_PROVER_SPECIALIZED_GROUP_ID="10" + FRI_PROVER_SETUP_DATA_PATH="/usr/src/setup-data" + FRI_PROVER_PROMETHEUS_PORT="3315" + FRI_PROVER_MAX_ATTEMPTS="10" + FRI_PROVER_GENERATION_TIMEOUT_IN_SECS="300" + FRI_PROVER_BASE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED="1,5" + FRI_PROVER_RECURSIVE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED="1,2,3" + FRI_PROVER_SETUP_LOAD_MODE="FromDisk" + FRI_PROVER_SPECIALIZED_GROUP_ID="10" + FRI_PROVER_WITNESS_VECTOR_GENERATOR_THREAD_COUNT="5" + FRI_PROVER_QUEUE_CAPACITY="10" + FRI_PROVER_WITNESS_VECTOR_RECEIVER_PORT="3316" "#; - set_env(config); + lock.set_env(config); + let actual = FriProverConfig::from_env(); assert_eq!(actual, expected_config()); } diff --git a/core/lib/config/src/configs/fri_prover_group.rs b/core/lib/config/src/configs/fri_prover_group.rs index 57b89489ce0e..6d3223d2e247 100644 --- a/core/lib/config/src/configs/fri_prover_group.rs +++ b/core/lib/config/src/configs/fri_prover_group.rs @@ -119,6 +119,12 @@ impl FriProverGroupConfig { } } + pub fn get_all_circuit_ids(&self) -> Vec { + (0..13) + .filter_map(|group_id| self.get_circuit_ids_for_group_id(group_id)) + .flatten() + .collect() + } /// check all_circuit ids present exactly once /// and For each aggregation round, check that the circuit ids are in the correct range. /// For example, in aggregation round 0, the circuit ids should be 1 to 13. diff --git a/core/lib/config/src/configs/fri_witness_generator.rs b/core/lib/config/src/configs/fri_witness_generator.rs index bfb9e2e8b481..a1d429f507f3 100644 --- a/core/lib/config/src/configs/fri_witness_generator.rs +++ b/core/lib/config/src/configs/fri_witness_generator.rs @@ -42,9 +42,10 @@ impl FriWitnessGeneratorConfig { #[cfg(test)] mod tests { - use crate::configs::test_utils::set_env; - use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> FriWitnessGeneratorConfig { FriWitnessGeneratorConfig { @@ -59,14 +60,16 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" - FRI_WITNESS_GENERATION_TIMEOUT_IN_SECS=900 - FRI_WITNESS_MAX_ATTEMPTS=4 - FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" - FRI_WITNESS_BLOCKS_PROVING_PERCENTAGE="30" - FRI_WITNESS_FORCE_PROCESS_BLOCK="1" + FRI_WITNESS_GENERATION_TIMEOUT_IN_SECS=900 + FRI_WITNESS_MAX_ATTEMPTS=4 + FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" + FRI_WITNESS_BLOCKS_PROVING_PERCENTAGE="30" + FRI_WITNESS_FORCE_PROCESS_BLOCK="1" "#; - set_env(config); + lock.set_env(config); + let actual = FriWitnessGeneratorConfig::from_env(); assert_eq!(actual, expected_config()); } diff --git a/core/lib/config/src/configs/fri_witness_vector_generator.rs b/core/lib/config/src/configs/fri_witness_vector_generator.rs new file mode 100644 index 000000000000..5550550a5e12 --- /dev/null +++ b/core/lib/config/src/configs/fri_witness_vector_generator.rs @@ -0,0 +1,84 @@ +use std::time::Duration; + +use serde::Deserialize; + +use super::envy_load; + +/// Configuration for the witness vector generator +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct FriWitnessVectorGeneratorConfig { + /// Max time before an `reserved` prover instance in considered as `available` + pub max_prover_reservation_duration_in_secs: u16, + /// Max time to wait to get a free prover instance + pub prover_instance_wait_timeout_in_secs: u16, + // Time to wait between 2 consecutive poll to get new prover instance. + pub prover_instance_poll_time_in_milli_secs: u16, + + /// Configurations for prometheus + pub prometheus_listener_port: u16, + pub prometheus_pushgateway_url: String, + pub prometheus_push_interval_ms: Option, + + // specialized group id for this witness vector generator. + // witness vector generator running the same (circuit id, round) shall have same group id. + pub specialized_group_id: u8, +} + +impl FriWitnessVectorGeneratorConfig { + pub fn from_env() -> Self { + envy_load( + "fri_witness_vector_generator", + "FRI_WITNESS_VECTOR_GENERATOR_", + ) + } + + pub fn prover_instance_wait_timeout(&self) -> Duration { + Duration::from_secs(self.prover_instance_wait_timeout_in_secs as u64) + } + + pub fn prover_instance_poll_time(&self) -> Duration { + Duration::from_millis(self.prover_instance_poll_time_in_milli_secs as u64) + } + + pub fn max_prover_reservation_duration(&self) -> Duration { + Duration::from_secs(self.max_prover_reservation_duration_in_secs as u64) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_config() -> FriWitnessVectorGeneratorConfig { + FriWitnessVectorGeneratorConfig { + max_prover_reservation_duration_in_secs: 1000u16, + prover_instance_wait_timeout_in_secs: 1000u16, + prover_instance_poll_time_in_milli_secs: 250u16, + prometheus_listener_port: 3316, + prometheus_pushgateway_url: "http://127.0.0.1:9091".to_string(), + prometheus_push_interval_ms: Some(100), + specialized_group_id: 1, + } + } + + #[test] + fn from_env() { + let mut lock = MUTEX.lock(); + let config = r#" + FRI_WITNESS_VECTOR_GENERATOR_MAX_PROVER_RESERVATION_DURATION_IN_SECS=1000 + FRI_WITNESS_VECTOR_GENERATOR_PROVER_INSTANCE_WAIT_TIMEOUT_IN_SECS=1000 + FRI_WITNESS_VECTOR_GENERATOR_PROVER_INSTANCE_POLL_TIME_IN_MILLI_SECS=250 + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3316 + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" + FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_PUSH_INTERVAL_MS=100 + FRI_WITNESS_VECTOR_GENERATOR_SPECIALIZED_GROUP_ID=1 + "#; + lock.set_env(config); + + let actual = FriWitnessVectorGeneratorConfig::from_env(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/config/src/configs/house_keeper.rs b/core/lib/config/src/configs/house_keeper.rs index f54d4f84703c..f941c2a9f251 100644 --- a/core/lib/config/src/configs/house_keeper.rs +++ b/core/lib/config/src/configs/house_keeper.rs @@ -27,9 +27,10 @@ impl HouseKeeperConfig { #[cfg(test)] mod tests { - use crate::configs::test_utils::set_env; - use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> HouseKeeperConfig { HouseKeeperConfig { @@ -50,21 +51,23 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS="10000" -HOUSE_KEEPER_BLOB_CLEANING_INTERVAL_MS="60000" -HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS="10000" -HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS="300000" -HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS="5000" -HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS="30000" -HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS="10000" -HOUSE_KEEPER_FRI_WITNESS_JOB_MOVING_INTERVAL_MS="40000" -HOUSE_KEEPER_FRI_PROVER_JOB_RETRYING_INTERVAL_MS="30000" -HOUSE_KEEPER_FRI_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS="30000" -HOUSE_KEEPER_PROVER_DB_POOL_SIZE="2" -HOUSE_KEEPER_FRI_PROVER_STATS_REPORTING_INTERVAL_MS="30000" + HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS="10000" + HOUSE_KEEPER_BLOB_CLEANING_INTERVAL_MS="60000" + HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS="10000" + HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS="300000" + HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS="5000" + HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS="30000" + HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS="10000" + HOUSE_KEEPER_FRI_WITNESS_JOB_MOVING_INTERVAL_MS="40000" + HOUSE_KEEPER_FRI_PROVER_JOB_RETRYING_INTERVAL_MS="30000" + HOUSE_KEEPER_FRI_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS="30000" + HOUSE_KEEPER_PROVER_DB_POOL_SIZE="2" + HOUSE_KEEPER_FRI_PROVER_STATS_REPORTING_INTERVAL_MS="30000" "#; - set_env(config); + lock.set_env(config); + let actual = HouseKeeperConfig::from_env(); assert_eq!(actual, expected_config()); } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 0ac90de3020f..292ed1161a81 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -5,8 +5,9 @@ pub use self::{ contracts::ContractsConfig, database::DBConfig, eth_client::ETHClientConfig, eth_sender::ETHSenderConfig, eth_sender::GasAdjusterConfig, eth_watch::ETHWatchConfig, fetcher::FetcherConfig, fri_prover::FriProverConfig, - fri_witness_generator::FriWitnessGeneratorConfig, nfs::NfsConfig, - object_store::ObjectStoreConfig, prover::ProverConfig, prover::ProverConfigs, + fri_witness_generator::FriWitnessGeneratorConfig, + fri_witness_vector_generator::FriWitnessVectorGeneratorConfig, object_store::ObjectStoreConfig, + proof_data_handler::ProofDataHandlerConfig, prover::ProverConfig, prover::ProverConfigs, prover_group::ProverGroupConfig, utils::PrometheusConfig, witness_generator::WitnessGeneratorConfig, }; @@ -27,9 +28,10 @@ pub mod fetcher; pub mod fri_prover; pub mod fri_prover_group; pub mod fri_witness_generator; +pub mod fri_witness_vector_generator; pub mod house_keeper; -pub mod nfs; pub mod object_store; +pub mod proof_data_handler; pub mod prover; pub mod prover_group; pub mod utils; @@ -38,6 +40,8 @@ pub mod witness_generator; #[cfg(test)] pub(crate) mod test_utils; +const BYTES_IN_MEGABYTE: usize = 1_024 * 1_024; + /// Convenience function that loads the structure from the environment variable given the prefix. /// Panics if the config cannot be loaded from the environment variables. pub fn envy_load(name: &str, prefix: &str) -> T { diff --git a/core/lib/config/src/configs/nfs.rs b/core/lib/config/src/configs/nfs.rs deleted file mode 100644 index a01a870ef88d..000000000000 --- a/core/lib/config/src/configs/nfs.rs +++ /dev/null @@ -1,35 +0,0 @@ -use super::envy_load; -use serde::Deserialize; -/// Configuration for the Network file system. -#[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct NfsConfig { - pub setup_key_mount_path: String, -} - -impl NfsConfig { - pub fn from_env() -> Self { - envy_load("nfs", "NFS_") - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::configs::test_utils::set_env; - - fn expected_config() -> NfsConfig { - NfsConfig { - setup_key_mount_path: "/path/to/setup_keys".to_string(), - } - } - - #[test] - fn from_env() { - let config = r#" -NFS_SETUP_KEY_MOUNT_PATH="/path/to/setup_keys" - "#; - set_env(config); - let actual = NfsConfig::from_env(); - assert_eq!(actual, expected_config()); - } -} diff --git a/core/lib/config/src/configs/object_store.rs b/core/lib/config/src/configs/object_store.rs index afca2cf7fb34..12793594813b 100644 --- a/core/lib/config/src/configs/object_store.rs +++ b/core/lib/config/src/configs/object_store.rs @@ -1,6 +1,7 @@ -use super::envy_load; use serde::Deserialize; +use super::envy_load; + #[derive(Debug, Deserialize, Eq, PartialEq, Clone, Copy)] pub enum ObjectStoreMode { GCS, @@ -31,7 +32,9 @@ impl ObjectStoreConfig { #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::set_env; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config(bucket_base_url: &str) -> ObjectStoreConfig { ObjectStoreConfig { @@ -45,28 +48,30 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" -OBJECT_STORE_BUCKET_BASE_URL="/base/url" -OBJECT_STORE_MODE="FileBacked" -OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" -OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" -OBJECT_STORE_MAX_RETRIES="5" + OBJECT_STORE_BUCKET_BASE_URL="/base/url" + OBJECT_STORE_MODE="FileBacked" + OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" + OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" + OBJECT_STORE_MAX_RETRIES="5" "#; - set_env(config); + lock.set_env(config); let actual = ObjectStoreConfig::from_env(); assert_eq!(actual, expected_config("/base/url")); } #[test] fn public_bucket_config_from_env() { + let mut lock = MUTEX.lock(); let config = r#" -PUBLIC_OBJECT_STORE_BUCKET_BASE_URL="/public_base_url" -PUBLIC_OBJECT_STORE_MODE="FileBacked" -PUBLIC_OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" -PUBLIC_OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" -PUBLIC_OBJECT_STORE_MAX_RETRIES="5" + PUBLIC_OBJECT_STORE_BUCKET_BASE_URL="/public_base_url" + PUBLIC_OBJECT_STORE_MODE="FileBacked" + PUBLIC_OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" + PUBLIC_OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" + PUBLIC_OBJECT_STORE_MAX_RETRIES="5" "#; - set_env(config); + lock.set_env(config); let actual = ObjectStoreConfig::public_from_env(); assert_eq!(actual, expected_config("/public_base_url")); } diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs new file mode 100644 index 000000000000..d15fed3c8821 --- /dev/null +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -0,0 +1,46 @@ +use super::envy_load; +use serde::Deserialize; +use std::time::Duration; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct ProofDataHandlerConfig { + pub http_port: u16, + pub proof_generation_timeout_in_secs: u16, +} + +impl ProofDataHandlerConfig { + pub fn from_env() -> Self { + envy_load("proof_data_handler", "PROOF_DATA_HANDLER_") + } + + pub fn proof_generation_timeout(&self) -> Duration { + Duration::from_secs(self.proof_generation_timeout_in_secs as u64) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_config() -> ProofDataHandlerConfig { + ProofDataHandlerConfig { + http_port: 3320, + proof_generation_timeout_in_secs: 18000, + } + } + + #[test] + fn from_env() { + let config = r#" + PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS="18000" + PROOF_DATA_HANDLER_HTTP_PORT="3320" + "#; + let mut lock = MUTEX.lock(); + lock.set_env(config); + let actual = ProofDataHandlerConfig::from_env(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/config/src/configs/prover.rs b/core/lib/config/src/configs/prover.rs index 38e038839d97..03e0f630e881 100644 --- a/core/lib/config/src/configs/prover.rs +++ b/core/lib/config/src/configs/prover.rs @@ -88,9 +88,10 @@ impl ProverConfigs { #[cfg(test)] mod tests { - use crate::configs::test_utils::set_env; - use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ProverConfigs { ProverConfigs { @@ -173,132 +174,82 @@ mod tests { } const CONFIG: &str = r#" -PROVER_NON_GPU_PROMETHEUS_PORT="3313" -PROVER_NON_GPU_INITIAL_SETUP_KEY_PATH="key" -PROVER_NON_GPU_KEY_DOWNLOAD_URL="value" -PROVER_NON_GPU_GENERATION_TIMEOUT_IN_SECS=2700 -PROVER_NON_GPU_NUMBER_OF_THREADS="2" -PROVER_NON_GPU_MAX_ATTEMPTS="4" -PROVER_NON_GPU_POLLING_DURATION_IN_MILLIS=5 -PROVER_NON_GPU_SETUP_KEYS_PATH="/usr/src/setup-keys" -PROVER_NON_GPU_NUMBER_OF_SETUP_SLOTS=2 -PROVER_NON_GPU_ASSEMBLY_RECEIVER_PORT=17791 -PROVER_NON_GPU_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 -PROVER_NON_GPU_ASSEMBLY_QUEUE_CAPACITY=5 -PROVER_NON_GPU_SPECIALIZED_PROVER_GROUP_ID=0 - -PROVER_TWO_GPU_FORTY_GB_MEM_PROMETHEUS_PORT="3313" -PROVER_TWO_GPU_FORTY_GB_MEM_INITIAL_SETUP_KEY_PATH="key" -PROVER_TWO_GPU_FORTY_GB_MEM_KEY_DOWNLOAD_URL="value" -PROVER_TWO_GPU_FORTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=2700 -PROVER_TWO_GPU_FORTY_GB_MEM_NUMBER_OF_THREADS="2" -PROVER_TWO_GPU_FORTY_GB_MEM_MAX_ATTEMPTS="4" -PROVER_TWO_GPU_FORTY_GB_MEM_POLLING_DURATION_IN_MILLIS=5 -PROVER_TWO_GPU_FORTY_GB_MEM_SETUP_KEYS_PATH="/usr/src/setup-keys" -PROVER_TWO_GPU_FORTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=5 -PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=17791 -PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 -PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=5 -PROVER_TWO_GPU_FORTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=1 - -PROVER_ONE_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT="3313" -PROVER_ONE_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH="key" -PROVER_ONE_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL="value" -PROVER_ONE_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=2700 -PROVER_ONE_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS="4" -PROVER_ONE_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS="4" -PROVER_ONE_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=5 -PROVER_ONE_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH="/usr/src/setup-keys" -PROVER_ONE_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=5 -PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=17791 -PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 -PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=5 -PROVER_ONE_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=2 - -PROVER_TWO_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT="3313" -PROVER_TWO_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH="key" -PROVER_TWO_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL="value" -PROVER_TWO_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=2700 -PROVER_TWO_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS="9" -PROVER_TWO_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS="4" -PROVER_TWO_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=5 -PROVER_TWO_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH="/usr/src/setup-keys" -PROVER_TWO_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=9 -PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=17791 -PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 -PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=5 -PROVER_TWO_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=3 - -PROVER_FOUR_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT="3313" -PROVER_FOUR_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH="key" -PROVER_FOUR_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL="value" -PROVER_FOUR_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=2700 -PROVER_FOUR_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS="18" -PROVER_FOUR_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS="4" -PROVER_FOUR_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=5 -PROVER_FOUR_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH="/usr/src/setup-keys" -PROVER_FOUR_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=18 -PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=17791 -PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 -PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=5 -PROVER_FOUR_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=4 - "#; + PROVER_NON_GPU_PROMETHEUS_PORT="3313" + PROVER_NON_GPU_INITIAL_SETUP_KEY_PATH="key" + PROVER_NON_GPU_KEY_DOWNLOAD_URL="value" + PROVER_NON_GPU_GENERATION_TIMEOUT_IN_SECS=2700 + PROVER_NON_GPU_NUMBER_OF_THREADS="2" + PROVER_NON_GPU_MAX_ATTEMPTS="4" + PROVER_NON_GPU_POLLING_DURATION_IN_MILLIS=5 + PROVER_NON_GPU_SETUP_KEYS_PATH="/usr/src/setup-keys" + PROVER_NON_GPU_NUMBER_OF_SETUP_SLOTS=2 + PROVER_NON_GPU_ASSEMBLY_RECEIVER_PORT=17791 + PROVER_NON_GPU_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 + PROVER_NON_GPU_ASSEMBLY_QUEUE_CAPACITY=5 + PROVER_NON_GPU_SPECIALIZED_PROVER_GROUP_ID=0 + + PROVER_TWO_GPU_FORTY_GB_MEM_PROMETHEUS_PORT="3313" + PROVER_TWO_GPU_FORTY_GB_MEM_INITIAL_SETUP_KEY_PATH="key" + PROVER_TWO_GPU_FORTY_GB_MEM_KEY_DOWNLOAD_URL="value" + PROVER_TWO_GPU_FORTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=2700 + PROVER_TWO_GPU_FORTY_GB_MEM_NUMBER_OF_THREADS="2" + PROVER_TWO_GPU_FORTY_GB_MEM_MAX_ATTEMPTS="4" + PROVER_TWO_GPU_FORTY_GB_MEM_POLLING_DURATION_IN_MILLIS=5 + PROVER_TWO_GPU_FORTY_GB_MEM_SETUP_KEYS_PATH="/usr/src/setup-keys" + PROVER_TWO_GPU_FORTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=5 + PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=17791 + PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 + PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=5 + PROVER_TWO_GPU_FORTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=1 + + PROVER_ONE_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT="3313" + PROVER_ONE_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH="key" + PROVER_ONE_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL="value" + PROVER_ONE_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=2700 + PROVER_ONE_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS="4" + PROVER_ONE_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS="4" + PROVER_ONE_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=5 + PROVER_ONE_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH="/usr/src/setup-keys" + PROVER_ONE_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=5 + PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=17791 + PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 + PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=5 + PROVER_ONE_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=2 + + PROVER_TWO_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT="3313" + PROVER_TWO_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH="key" + PROVER_TWO_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL="value" + PROVER_TWO_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=2700 + PROVER_TWO_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS="9" + PROVER_TWO_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS="4" + PROVER_TWO_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=5 + PROVER_TWO_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH="/usr/src/setup-keys" + PROVER_TWO_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=9 + PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=17791 + PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 + PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=5 + PROVER_TWO_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=3 + + PROVER_FOUR_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT="3313" + PROVER_FOUR_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH="key" + PROVER_FOUR_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL="value" + PROVER_FOUR_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=2700 + PROVER_FOUR_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS="18" + PROVER_FOUR_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS="4" + PROVER_FOUR_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=5 + PROVER_FOUR_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH="/usr/src/setup-keys" + PROVER_FOUR_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=18 + PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=17791 + PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=250 + PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=5 + PROVER_FOUR_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=4 + "#; #[test] fn from_env() { - set_env(CONFIG); + let mut lock = MUTEX.lock(); + lock.set_env(CONFIG); let actual = ProverConfigs::from_env(); assert_eq!(actual, expected_config()); } - - fn convert<'a, T: IntoIterator>( - iter: T, - prefix: &str, - ) -> ProverConfig { - let iter = iter - .into_iter() - .map(|(x, y)| (x.to_string(), y.to_string())); - - envy::prefixed(prefix).from_iter(iter).unwrap() - } - - #[test] - fn from_env_some() { - let expected_config = ProverConfig { - prometheus_port: 3313, - initial_setup_key_path: "key".to_owned(), - key_download_url: "value".to_owned(), - generation_timeout_in_secs: 2700u16, - number_of_threads: 2, - max_attempts: 4, - polling_duration_in_millis: 5, - setup_keys_path: "/usr/src/setup-keys".to_string(), - specialized_prover_group_id: 0, - number_of_setup_slots: 11, - assembly_receiver_port: 17791, - assembly_receiver_poll_time_in_millis: 250, - assembly_queue_capacity: 5, - }; - - let config = [ - ("PROVER_PROMETHEUS_PORT", "3313"), - ("PROVER_INITIAL_SETUP_KEY_PATH", "key"), - ("PROVER_KEY_DOWNLOAD_URL", "value"), - ("PROVER_GENERATION_TIMEOUT_IN_SECS", "2700"), - ("PROVER_NUMBER_OF_THREADS", "2"), - ("PROVER_MAX_ATTEMPTS", "4"), - ("PROVER_POLLING_DURATION_IN_MILLIS", "5"), - ("PROVER_SETUP_KEYS_PATH", "/usr/src/setup-keys"), - ("PROVER_NUMBER_OF_SETUP_SLOTS", "11"), - ("PROVER_ASSEMBLY_RECEIVER_PORT", "17791"), - ("PROVER_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS", "250"), - ("PROVER_ASSEMBLY_QUEUE_CAPACITY", "5"), - ("PROVER_SPECIALIZED_PROVER_GROUP_ID", "0"), - ] - .iter() - .chain(vec![&("PROVER_CIRCUIT_TYPES", "1,2")]); - - let actual = convert(config, "PROVER_"); - assert_eq!(actual, expected_config); - } } diff --git a/core/lib/config/src/configs/prover_group.rs b/core/lib/config/src/configs/prover_group.rs index 4e1d402fd550..86d57897898b 100644 --- a/core/lib/config/src/configs/prover_group.rs +++ b/core/lib/config/src/configs/prover_group.rs @@ -73,9 +73,10 @@ impl ProverGroupConfig { #[cfg(test)] mod tests { - use crate::configs::test_utils::set_env; - use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> ProverGroupConfig { ProverGroupConfig { @@ -117,15 +118,15 @@ mod tests { #[test] fn from_env() { - set_env(CONFIG); + let mut lock = MUTEX.lock(); + lock.set_env(CONFIG); let actual = ProverGroupConfig::from_env(); assert_eq!(actual, expected_config()); } #[test] fn get_group_id_for_circuit_id() { - set_env(CONFIG); - let prover_group_config = ProverGroupConfig::from_env(); + let prover_group_config = expected_config(); assert_eq!(Some(0), prover_group_config.get_group_id_for_circuit_id(0)); assert_eq!(Some(0), prover_group_config.get_group_id_for_circuit_id(18)); @@ -162,8 +163,8 @@ mod tests { #[test] fn get_circuit_ids_for_group_id() { - set_env(CONFIG); - let prover_group_config = ProverGroupConfig::from_env(); + let prover_group_config = expected_config(); + assert_eq!( Some(vec![0, 18]), prover_group_config.get_circuit_ids_for_group_id(0) diff --git a/core/lib/config/src/configs/test_utils.rs b/core/lib/config/src/configs/test_utils.rs index 8581b81853f8..013d12493ae4 100644 --- a/core/lib/config/src/configs/test_utils.rs +++ b/core/lib/config/src/configs/test_utils.rs @@ -1,38 +1,143 @@ // Built-in uses. -use std::{env, str::FromStr}; +use std::{ + collections::HashMap, + env, + ffi::{OsStr, OsString}, + mem, + sync::{Mutex, MutexGuard, PoisonError}, +}; // Workspace uses use zksync_basic_types::{Address, H256}; -/// Parses the provided fixture in a form of `VARIABLE_NAME=variable_value` lines and -/// sets the corresponding environment variables. -pub fn set_env(fixture: &str) { - for line in fixture.split('\n').map(str::trim) { - if line.is_empty() { - // Skip empty lines. - continue; +/// Mutex that allows to modify certain env variables and roll them back to initial values when +/// the corresponding [`EnvMutexGuard`] is dropped. This is useful for having multiple tests +/// that parse the same config from the environment. +#[derive(Debug)] +pub(crate) struct EnvMutex(Mutex<()>); + +impl EnvMutex { + /// Creates a new mutex. Separate mutexes can be used for changing env vars that do not intersect + /// (e.g., env vars for different configs). + pub const fn new() -> Self { + Self(Mutex::new(())) + } + + pub fn lock(&self) -> EnvMutexGuard<'_> { + let guard = self.0.lock().unwrap_or_else(PoisonError::into_inner); + EnvMutexGuard { + _inner: guard, + redefined_vars: HashMap::new(), } + } +} + +/// Guard provided by [`EnvMutex`] that allows mutating env variables. All changes are rolled back +/// when the guard is dropped. +#[must_use = "Environment will be reset when the guard is dropped"] +#[derive(Debug)] +pub(crate) struct EnvMutexGuard<'a> { + _inner: MutexGuard<'a, ()>, + redefined_vars: HashMap>, +} + +impl Drop for EnvMutexGuard<'_> { + fn drop(&mut self) { + for (env_name, value) in mem::take(&mut self.redefined_vars) { + if let Some(value) = value { + env::set_var(env_name, value); + } else { + env::remove_var(env_name); + } + } + } +} - let elements: Vec<_> = line.split('=').collect(); - assert_eq!( - elements.len(), - 2, - "Incorrect line for setting environment variable: {}", - line - ); +impl EnvMutexGuard<'_> { + /// Sets env vars specified in `.env`-like format. + pub fn set_env(&mut self, fixture: &str) { + for line in fixture.split('\n').map(str::trim) { + if line.is_empty() { + // Skip empty lines. + continue; + } - let variable_name = elements[0]; - let variable_value = elements[1].trim_matches('"'); + let elements: Vec<_> = line.split('=').collect(); + assert_eq!( + elements.len(), + 2, + "Incorrect line for setting environment variable: {}", + line + ); - env::set_var(variable_name, variable_value); + let variable_name: &OsStr = elements[0].as_ref(); + let variable_value: &OsStr = elements[1].trim_matches('"').as_ref(); + + if !self.redefined_vars.contains_key(variable_name) { + let prev_value = env::var_os(variable_name); + self.redefined_vars + .insert(variable_name.to_os_string(), prev_value); + } + env::set_var(variable_name, variable_value); + } + } + + /// Removes the specified env vars. + pub fn remove_env(&mut self, var_names: &[&str]) { + for &var_name in var_names { + let variable_name: &OsStr = var_name.as_ref(); + if !self.redefined_vars.contains_key(variable_name) { + let prev_value = env::var_os(variable_name); + self.redefined_vars + .insert(variable_name.to_os_string(), prev_value); + } + env::remove_var(variable_name); + } } } /// Parses the address panicking upon deserialization failure. pub fn addr(addr_str: &str) -> Address { - Address::from_str(addr_str).expect("Incorrect address string") + addr_str.parse().expect("Incorrect address string") } /// Parses the H256 panicking upon deserialization failure. pub fn hash(addr_str: &str) -> H256 { - H256::from_str(addr_str).expect("Incorrect hash string") + addr_str.parse().expect("Incorrect hash string") +} + +#[test] +fn env_mutex_basics() { + const TEST_VARIABLE_NAME: &str = "TEST_VARIABLE_THAT_WILL_CERTAINLY_NOT_BE_SET"; + const REDEFINED_VARIABLE_NAME: &str = "REDEFINED_VARIABLE_THAT_WILL_CERTAINLY_NOT_BE_SET"; + + assert!(env::var_os(TEST_VARIABLE_NAME).is_none()); + assert!(env::var_os(REDEFINED_VARIABLE_NAME).is_none()); + env::set_var(REDEFINED_VARIABLE_NAME, "initial"); + + let mutex = EnvMutex::new(); + let mut lock = mutex.lock(); + lock.set_env(&format!("{TEST_VARIABLE_NAME}=test")); + assert!(lock.redefined_vars[OsStr::new(TEST_VARIABLE_NAME)].is_none()); + assert_eq!(env::var_os(TEST_VARIABLE_NAME).unwrap(), "test"); + lock.set_env(&format!("{REDEFINED_VARIABLE_NAME}=redefined")); + assert_eq!( + lock.redefined_vars[OsStr::new(REDEFINED_VARIABLE_NAME)] + .as_ref() + .unwrap(), + "initial" + ); + assert_eq!(env::var_os(REDEFINED_VARIABLE_NAME).unwrap(), "redefined"); + + lock.remove_env(&[REDEFINED_VARIABLE_NAME]); + assert!(env::var_os(REDEFINED_VARIABLE_NAME).is_none()); + assert_eq!( + lock.redefined_vars[OsStr::new(REDEFINED_VARIABLE_NAME)] + .as_ref() + .unwrap(), + "initial" + ); + + drop(lock); + assert!(env::var_os(TEST_VARIABLE_NAME).is_none()); + assert_eq!(env::var_os(REDEFINED_VARIABLE_NAME).unwrap(), "initial"); } diff --git a/core/lib/config/src/configs/witness_generator.rs b/core/lib/config/src/configs/witness_generator.rs index b44b925a9b4b..8af1a04b1ccb 100644 --- a/core/lib/config/src/configs/witness_generator.rs +++ b/core/lib/config/src/configs/witness_generator.rs @@ -44,13 +44,14 @@ impl WitnessGeneratorConfig { #[cfg(test)] mod tests { - use crate::configs::test_utils::set_env; - use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); fn expected_config() -> WitnessGeneratorConfig { WitnessGeneratorConfig { - generation_timeout_in_secs: 900u16, + generation_timeout_in_secs: 900_u16, initial_setup_key_path: "key".to_owned(), key_download_url: "value".to_owned(), max_attempts: 4, @@ -62,15 +63,17 @@ mod tests { #[test] fn from_env() { + let mut lock = MUTEX.lock(); let config = r#" - WITNESS_GENERATION_TIMEOUT_IN_SECS=900 - WITNESS_INITIAL_SETUP_KEY_PATH="key" - WITNESS_KEY_DOWNLOAD_URL="value" - WITNESS_MAX_ATTEMPTS=4 - WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" - WITNESS_BLOCKS_PROVING_PERCENTAGE="30" + WITNESS_GENERATION_TIMEOUT_IN_SECS=900 + WITNESS_INITIAL_SETUP_KEY_PATH="key" + WITNESS_KEY_DOWNLOAD_URL="value" + WITNESS_MAX_ATTEMPTS=4 + WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" + WITNESS_BLOCKS_PROVING_PERCENTAGE="30" "#; - set_env(config); + lock.set_env(config); + let actual = WitnessGeneratorConfig::from_env(); assert_eq!(actual, expected_config()); } diff --git a/core/lib/config/src/constants/contracts.rs b/core/lib/config/src/constants/contracts.rs index 5d81ca907407..15b9c9c08207 100644 --- a/core/lib/config/src/constants/contracts.rs +++ b/core/lib/config/src/constants/contracts.rs @@ -78,6 +78,11 @@ pub const BYTECODE_COMPRESSOR_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x0e, ]); +pub const COMPLEX_UPGRADER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x0f, +]); + /// The `ecrecover` system contract address. pub const ECRECOVER_PRECOMPILE_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/core/lib/contracts/Cargo.toml b/core/lib/contracts/Cargo.toml index cd15964c9345..380aeddf3a5b 100644 --- a/core/lib/contracts/Cargo.toml +++ b/core/lib/contracts/Cargo.toml @@ -12,7 +12,7 @@ categories = ["cryptography"] [dependencies] zksync_utils = { path = "../utils", version = "1.0" } -ethabi = "16.0.0" +ethabi = "18.0.0" serde_json = "1.0" serde = "1.0" once_cell = "1.7" diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index c7a7b5d68314..be67ceb9131b 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -20,6 +20,8 @@ pub enum ContractLanguage { const ZKSYNC_CONTRACT_FILE: &str = "contracts/ethereum/artifacts/cache/solpp-generated-contracts/zksync/interfaces/IZkSync.sol/IZkSync.json"; +const MULTICALL3_CONTRACT_FILE: &str = + "contracts/ethereum/artifacts/cache/solpp-generated-contracts/dev-contracts/Multicall3.sol/Multicall3.json"; const VERIFIER_CONTRACT_FILE: &str = "contracts/ethereum/artifacts/cache/solpp-generated-contracts/zksync/Verifier.sol/Verifier.json"; const IERC20_CONTRACT_FILE: &str = @@ -65,6 +67,10 @@ pub fn zksync_contract() -> Contract { load_contract(ZKSYNC_CONTRACT_FILE) } +pub fn multicall_contract() -> Contract { + load_contract(MULTICALL3_CONTRACT_FILE) +} + pub fn erc20_contract() -> Contract { load_contract(IERC20_CONTRACT_FILE) } @@ -196,7 +202,6 @@ pub fn read_zbin_bytecode(zbin_path: impl AsRef) -> Vec { fs::read(&bytecode_path) .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) } - /// Hash of code and code which consists of 32 bytes words #[derive(Debug, Clone)] pub struct SystemContractCode { @@ -271,12 +276,18 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } - /// BaseSystemContracts with playground bootloader - used for handling 'eth_calls'. + /// BaseSystemContracts with playground bootloader - used for handling 'eth_calls'. pub fn playground() -> Self { let bootloader_bytecode = read_playground_block_bootloader_bytecode(); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + /// BaseSystemContracts with playground bootloader - used for handling 'eth_calls'. + pub fn estimate_gas() -> Self { + let bootloader_bytecode = read_bootloader_code("fee_estimate"); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn hashes(&self) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: self.bootloader.hash, diff --git a/core/lib/crypto/Cargo.toml b/core/lib/crypto/Cargo.toml index f79058d3a000..838247374c48 100644 --- a/core/lib/crypto/Cargo.toml +++ b/core/lib/crypto/Cargo.toml @@ -11,11 +11,6 @@ categories = ["cryptography"] readme = "README.md" [dependencies] -#franklin-crypto = {git = "https://github.com/matter-labs/franklin-crypto", branch = "dev", features = ["multicore", "plonk"]} -#recursive_aggregation_circuit = { version = "1.0.0", git = "https://github.com/matter-labs/recursive_aggregation_circuit.git"} -#rescue_poseidon = { version = "0.4.0", git = "https://github.com/matter-labs/rescue-poseidon.git", branch="dev-dep" } -rand = "0.4" - zksync_basic_types = { path = "../basic_types", version = "1.0" } serde = "1.0" thiserror = "1.0" @@ -27,4 +22,3 @@ blake2 = "0.10" [dev-dependencies] serde_json = "1.0" - diff --git a/core/lib/crypto/src/convert.rs b/core/lib/crypto/src/convert.rs deleted file mode 100644 index 9fb8ac5ba408..000000000000 --- a/core/lib/crypto/src/convert.rs +++ /dev/null @@ -1,101 +0,0 @@ -// use crate::{ -// error::ConversionError, -// franklin_crypto::bellman::pairing::ff::{PrimeField, PrimeFieldRepr}, -// }; -// -// /// Extension trait denoting common conversion method for field elements. -// pub trait FeConvert: PrimeField { -// /// Converts the field element into a byte array. -// fn to_bytes(&self) -> Vec { -// let mut buf: Vec = Vec::with_capacity(32); -// self.into_repr().write_be(&mut buf).unwrap(); -// -// buf -// } -// -// /// Reads a field element from its byte sequence representation. -// fn from_bytes(value: &[u8]) -> Result { -// let mut repr = Self::Repr::default(); -// -// // `repr.as_ref()` converts `repr` to a list of `u64`. Each element has 8 bytes, -// // so to obtain size in bytes, we multiply the array size with the size of `u64`. -// let expected_input_size = repr.as_ref().len() * 8; -// if value.len() != expected_input_size { -// return Err(ConversionError::IncorrectInputSize { -// size: value.len(), -// expected_size: expected_input_size, -// }); -// } -// repr.read_be(value).map_err(ConversionError::ParsingError)?; -// Self::from_repr(repr).map_err(From::from) -// } -// -// /// Returns hex representation of the field element without `0x` prefix. -// fn to_hex(&self) -> String { -// let mut buf: Vec = Vec::with_capacity(32); -// self.into_repr().write_be(&mut buf).unwrap(); -// hex::encode(&buf) -// } -// -// /// Reads a field element from its hexadecimal representation. -// fn from_hex(value: &str) -> Result { -// let value = if let Some(value) = value.strip_prefix("0x") { -// value -// } else { -// value -// }; -// -// // Buffer is reversed and read as little endian, since we pad it with zeros to -// // match the expected length. -// let mut buf = hex::decode(&value)?; -// buf.reverse(); -// let mut repr = Self::Repr::default(); -// -// // `repr.as_ref()` converts `repr` to a list of `u64`. Each element has 8 bytes, -// // so to obtain size in bytes, we multiply the array size with the size of `u64`. -// buf.resize(repr.as_ref().len() * 8, 0); -// repr.read_le(&buf[..]) -// .map_err(ConversionError::ParsingError)?; -// Self::from_repr(repr).map_err(From::from) -// } -// } -// -// impl FeConvert for T where T: PrimeField {} -// -// #[cfg(test)] -// mod tests { -// use super::*; -// -// use crate::{ -// rand::{Rand, SeedableRng, XorShiftRng}, -// Fr, -// }; -// -// /// Checks that converting FE to the hex form and back results -// /// in the same FE. -// #[test] -// fn fe_hex_roundtrip() { -// let mut rng = XorShiftRng::from_seed([1, 2, 3, 4]); -// -// let fr = Fr::rand(&mut rng); -// -// let encoded_fr = fr.to_hex(); -// let decoded_fr = Fr::from_hex(&encoded_fr).expect("Can't decode encoded fr"); -// -// assert_eq!(fr, decoded_fr); -// } -// -// /// Checks that converting FE to the bytes form and back results -// /// in the same FE. -// #[test] -// fn fe_bytes_roundtrip() { -// let mut rng = XorShiftRng::from_seed([1, 2, 3, 4]); -// -// let fr = Fr::rand(&mut rng); -// -// let encoded_fr = fr.to_bytes(); -// let decoded_fr = Fr::from_bytes(&encoded_fr).expect("Can't decode encoded fr"); -// -// assert_eq!(fr, decoded_fr); -// } -// } diff --git a/core/lib/crypto/src/error.rs b/core/lib/crypto/src/error.rs deleted file mode 100644 index e7ecbd4c4ac3..000000000000 --- a/core/lib/crypto/src/error.rs +++ /dev/null @@ -1,21 +0,0 @@ -// use crate::franklin_crypto::bellman::pairing::ff; -// use hex::FromHexError; -// use thiserror::Error; -// -// #[derive(Debug, Error, PartialEq)] -// pub enum PackingError { -// #[error("Input integer is too big for packing. Actual: {integer}, limit: {limit}")] -// IntegerTooBig { integer: u128, limit: u128 }, -// } -// -// #[derive(Debug, Error)] -// pub enum ConversionError { -// #[error("Incorrect input size. Actual: {size}, expected: {expected_size}")] -// IncorrectInputSize { size: usize, expected_size: usize }, -// #[error("Cannot decode hex: {0}")] -// HexDecodingError(#[from] FromHexError), -// #[error("Cannot parse value {0}")] -// ParsingError(std::io::Error), -// #[error("Cannot convert into prime field value: {0}")] -// PrimeFieldDecodingError(#[from] ff::PrimeFieldDecodingError), -// } diff --git a/core/lib/crypto/src/lib.rs b/core/lib/crypto/src/lib.rs index e3955fddfd52..f437e48ef7b3 100644 --- a/core/lib/crypto/src/lib.rs +++ b/core/lib/crypto/src/lib.rs @@ -1,52 +1 @@ -//! `zksync_crypto` is a crate containing essential zkSync cryptographic primitives, such as private keys and hashers. - -#![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] - -// use crate::franklin_crypto::{ -// bellman::{pairing::bn256, plonk::better_cs::cs::PlonkCsWidth4WithNextStepParams}, -// // eddsa::{PrivateKey as PrivateKeyImport, PublicKey as PublicKeyImport}, -// jubjub::JubjubEngine, -// }; - -mod crypto_exports { - // pub use crate::franklin_crypto::{ - // bellman, - // bellman::{pairing, pairing::ff}, - // }; - // pub use franklin_crypto; - pub use rand; - // pub use recursive_aggregation_circuit; -} - -pub use crypto_exports::*; - -pub mod convert; -pub mod error; pub mod hasher; -pub mod primitives; -pub mod proof; -pub mod serialization; - -// pub use crypto_exports::*; -// -// pub type Engine = bn256::Bn256; -// pub type Fr = bn256::Fr; -// pub type Fs = ::Fs; -// pub type PlonkCS = PlonkCsWidth4WithNextStepParams; - -// pub type PrivateKey = PrivateKeyImport; -// pub type PublicKey = PublicKeyImport; - -// /// Decodes a private key from a field element. -// pub fn priv_key_from_fs(fs: Fs) -> PrivateKey { -// PrivateKeyImport(fs) -// } - -// /// Converts private key into a corresponding public key. -// pub fn public_key_from_private(pk: &PrivateKey) -> PublicKey { -// PublicKey::from_private( -// pk, -// FixedGenerators::SpendingKeyGenerator, -// ¶ms::JUBJUB_PARAMS, -// ) -// } diff --git a/core/lib/crypto/src/primitives.rs b/core/lib/crypto/src/primitives.rs deleted file mode 100644 index 62f7a98b58e9..000000000000 --- a/core/lib/crypto/src/primitives.rs +++ /dev/null @@ -1,132 +0,0 @@ -// // Built-in deps -// // External deps -// use crate::franklin_crypto::bellman::pairing::{ -// bn256::Bn256, -// ff::{PrimeField, PrimeFieldRepr, ScalarEngine}, -// CurveAffine, Engine, -// }; -// use zksync_basic_types::U256; -// // Workspace deps -// -// pub struct EthereumSerializer; -// -// impl EthereumSerializer { -// pub fn serialize_g1(point: &::G1Affine) -> (U256, U256) { -// if point.is_zero() { -// return (U256::zero(), U256::zero()); -// } -// let uncompressed = point.into_uncompressed(); -// -// let uncompressed_slice = uncompressed.as_ref(); -// -// // bellman serializes points as big endian and in the form x, y -// // ethereum expects the same order in memory -// let x = U256::from_big_endian(&uncompressed_slice[0..32]); -// let y = U256::from_big_endian(&uncompressed_slice[32..64]); -// -// (x, y) -// } -// -// pub fn serialize_g2(point: &::G2Affine) -> ((U256, U256), (U256, U256)) { -// let uncompressed = point.into_uncompressed(); -// -// let uncompressed_slice = uncompressed.as_ref(); -// -// // bellman serializes points as big endian and in the form x1*u, x0, y1*u, y0 -// // ethereum expects the same order in memory -// let x_1 = U256::from_big_endian(&uncompressed_slice[0..32]); -// let x_0 = U256::from_big_endian(&uncompressed_slice[32..64]); -// let y_1 = U256::from_big_endian(&uncompressed_slice[64..96]); -// let y_0 = U256::from_big_endian(&uncompressed_slice[96..128]); -// -// ((x_1, x_0), (y_1, y_0)) -// } -// -// pub fn serialize_fe(field_element: &::Fr) -> U256 { -// let mut be_bytes = [0u8; 32]; -// field_element -// .into_repr() -// .write_be(&mut be_bytes[..]) -// .expect("get new root BE bytes"); -// U256::from_big_endian(&be_bytes[..]) -// } -// } -// -// pub struct BitConvert; -// -// impl BitConvert { -// /// Converts a set of bits to a set of bytes in direct order. -// #[allow(clippy::wrong_self_convention)] -// pub fn into_bytes(bits: Vec) -> Vec { -// assert_eq!(bits.len() % 8, 0); -// let mut message_bytes: Vec = vec![]; -// -// let byte_chunks = bits.chunks(8); -// for byte_chunk in byte_chunks { -// let mut byte = 0u8; -// for (i, bit) in byte_chunk.iter().enumerate() { -// if *bit { -// byte |= 1 << i; -// } -// } -// message_bytes.push(byte); -// } -// -// message_bytes -// } -// -// /// Converts a set of bits to a set of bytes in reverse order for each byte. -// #[allow(clippy::wrong_self_convention)] -// pub fn into_bytes_ordered(bits: Vec) -> Vec { -// assert_eq!(bits.len() % 8, 0); -// let mut message_bytes: Vec = vec![]; -// -// let byte_chunks = bits.chunks(8); -// for byte_chunk in byte_chunks { -// let mut byte = 0u8; -// for (i, bit) in byte_chunk.iter().rev().enumerate() { -// if *bit { -// byte |= 1 << i; -// } -// } -// message_bytes.push(byte); -// } -// -// message_bytes -// } -// -// /// Converts a set of Big Endian bytes to a set of bits. -// pub fn from_be_bytes(bytes: &[u8]) -> Vec { -// let mut bits = vec![]; -// for byte in bytes { -// let mut temp = *byte; -// for _ in 0..8 { -// bits.push(temp & 0x80 == 0x80); -// temp <<= 1; -// } -// } -// bits -// } -// } -// -// #[cfg(test)] -// mod test { -// use super::*; -// -// #[test] -// fn test_bits_conversions() { -// let mut bits = vec![]; -// -// bits.extend(vec![true, false, false, true, true, false, true, false]); -// bits.extend(vec![false, false, true, true, false, true, true, false]); -// bits.extend(vec![false, false, false, false, false, false, false, true]); -// -// let bytes = BitConvert::into_bytes(bits.clone()); -// assert_eq!(bytes, vec![89, 108, 128]); -// -// let bytes = BitConvert::into_bytes_ordered(bits.clone()); -// assert_eq!(bytes, vec![154, 54, 1]); -// -// assert_eq!(BitConvert::from_be_bytes(&[154, 54, 1]), bits); -// } -// } diff --git a/core/lib/crypto/src/proof.rs b/core/lib/crypto/src/proof.rs deleted file mode 100644 index 3af6228410af..000000000000 --- a/core/lib/crypto/src/proof.rs +++ /dev/null @@ -1,59 +0,0 @@ -use serde::{Deserialize, Serialize}; -use zksync_basic_types::{ethabi::Token, U256}; - -/// Encoded representation of the aggregated block proof. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub struct EncodedAggregatedProof { - pub aggregated_input: U256, - pub proof: Vec, - pub subproof_limbs: Vec, - pub individual_vk_inputs: Vec, - pub individual_vk_idxs: Vec, -} - -impl EncodedAggregatedProof { - pub fn get_eth_tx_args(&self) -> Token { - let subproof_limbs = Token::Array( - self.subproof_limbs - .iter() - .map(|v| Token::Uint(*v)) - .collect(), - ); - let proof = Token::Array( - self.proof - .iter() - .map(|p| Token::Uint(U256::from(p))) - .collect(), - ); - - Token::Tuple(vec![subproof_limbs, proof]) - } -} - -impl Default for EncodedAggregatedProof { - fn default() -> Self { - Self { - aggregated_input: U256::default(), - proof: vec![U256::default(); 34], - subproof_limbs: vec![U256::default(); 16], - individual_vk_inputs: vec![U256::default(); 1], - individual_vk_idxs: vec![U256::default(); 1], - } - } -} - -/// Encoded representation of the block proof. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub struct EncodedSingleProof { - pub inputs: Vec, - pub proof: Vec, -} - -impl Default for EncodedSingleProof { - fn default() -> Self { - Self { - inputs: vec![U256::default(); 1], - proof: vec![U256::default(); 33], - } - } -} diff --git a/core/lib/crypto/src/serialization.rs b/core/lib/crypto/src/serialization.rs deleted file mode 100644 index 41ab89fefcad..000000000000 --- a/core/lib/crypto/src/serialization.rs +++ /dev/null @@ -1,462 +0,0 @@ -// //! Common serialization utilities. -// //! -// //! This module provides building blocks for serializing and deserializing -// //! common `zksync` types. -// -// use crate::{ -// bellman::plonk::{ -// better_better_cs::{cs::Circuit as NewCircuit, proof::Proof as NewProof}, -// better_cs::{cs::PlonkCsWidth4WithNextStepParams, keys::Proof as OldProof}, -// }, -// convert::FeConvert, -// primitives::EthereumSerializer, -// proof::EncodedSingleProof, -// recursive_aggregation_circuit::circuit::RecursiveAggregationCircuitBn256, -// Engine, Fr, -// }; -// use serde::{de, ser, Deserialize, Deserializer, Serialize, Serializer}; -// use zksync_basic_types::U256; -// -// /// Blanket structure implementing serializing/deserializing methods for `Fr`. -// /// -// /// This structure is required, since `Fr` does not originate in the current -// /// crate and we can't implement `serde` traits for it. -// /// -// /// ## Example: -// /// -// /// ``` -// /// use zksync_crypto::serialization::FrSerde; -// /// use zksync_crypto::Fr; -// /// use serde::{Serialize, Deserialize}; -// /// -// /// #[derive(Clone, Debug, Serialize, Deserialize)] -// /// pub struct SomeStructure { -// /// #[serde(with = "FrSerde")] -// /// pub some_data: Fr, -// /// } -// /// ``` -// pub struct FrSerde; -// -// impl FrSerde { -// pub fn serialize(value: &Fr, serializer: S) -> Result -// where -// S: Serializer, -// { -// // First, serialize `Fr` to hexadecimal string. -// let hex_value = value.to_hex(); -// -// // Then, serialize it using `Serialize` trait implementation for `String`. -// String::serialize(&hex_value, serializer) -// } -// -// pub fn deserialize<'de, D>(deserializer: D) -> Result -// where -// D: Deserializer<'de>, -// { -// // First, deserialize a string value. It is expected to be a -// // hexadecimal representation of `Fr`. -// let deserialized_string = String::deserialize(deserializer)?; -// -// // Then, parse hexadecimal string to obtain `Fr`. -// Fr::from_hex(&deserialized_string).map_err(de::Error::custom) -// } -// } -// -// /// Blanket structure implementing serializing/deserializing methods for `Option`. -// /// -// /// ## Example: -// /// -// /// ``` -// /// use zksync_crypto::serialization::OptionalFrSerde; -// /// use zksync_crypto::Fr; -// /// use serde::{Serialize, Deserialize}; -// /// -// /// #[derive(Clone, Debug, Serialize, Deserialize)] -// /// pub struct SomeStructure { -// /// #[serde(with = "OptionalFrSerde")] -// /// pub maybe_some_data: Option, -// /// } -// /// ``` -// pub struct OptionalFrSerde; -// -// impl OptionalFrSerde { -// pub fn serialize(value: &Option, serializer: S) -> Result -// where -// S: Serializer, -// { -// let optional_hex_value = value.map(|fr| fr.to_hex()); -// -// Option::serialize(&optional_hex_value, serializer) -// } -// -// pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -// where -// D: Deserializer<'de>, -// { -// let optional_deserialized_string: Option = Option::deserialize(deserializer)?; -// -// // Apply `fe_from_hex` to the contents of `Option`, then transpose result to have -// // `Result, ..>` and adapt error to the expected format. -// optional_deserialized_string -// .map(|v| Fr::from_hex(&v)) -// .transpose() -// .map_err(de::Error::custom) -// } -// } -// -// /// Blanket structure implementing serializing/deserializing methods for `Vec>`. -// /// -// /// ## Example: -// /// -// /// ``` -// /// use zksync_crypto::serialization::VecOptionalFrSerde; -// /// use zksync_crypto::Fr; -// /// use serde::{Serialize, Deserialize}; -// /// -// /// #[derive(Clone, Debug, Serialize, Deserialize)] -// /// pub struct SomeStructure { -// /// #[serde(with = "VecOptionalFrSerde")] -// /// pub maybe_some_data: Vec>, -// /// } -// /// ``` -// pub struct VecOptionalFrSerde; -// -// impl VecOptionalFrSerde { -// pub fn serialize(operations: &[Option], ser: S) -> Result -// where -// S: Serializer, -// { -// let mut res = Vec::with_capacity(operations.len()); -// for value in operations.iter() { -// let v = value.map(|fr| fr.to_hex()); -// res.push(v); -// } -// Vec::serialize(&res, ser) -// } -// -// pub fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> -// where -// D: Deserializer<'de>, -// { -// let str_vec: Vec> = Vec::deserialize(deserializer)?; -// let mut res = Vec::with_capacity(str_vec.len()); -// for s in str_vec.into_iter() { -// if let Some(a) = s { -// let v = Fr::from_hex(&a).map_err(de::Error::custom)?; -// res.push(Some(v)); -// } else { -// res.push(None); -// } -// } -// Ok(res) -// } -// } -// -// /// Blanket structure implementing serializing/deserializing methods for `Vec`. -// /// -// /// ## Example: -// /// -// /// ``` -// /// use zksync_crypto::serialization::VecFrSerde; -// /// use zksync_crypto::Fr; -// /// use serde::{Serialize, Deserialize}; -// /// -// /// #[derive(Clone, Debug, Serialize, Deserialize)] -// /// pub struct SomeStructure { -// /// #[serde(with = "VecFrSerde")] -// /// pub vec_fr: Vec, -// /// } -// /// ``` -// pub struct VecFrSerde; -// -// impl VecFrSerde { -// pub fn serialize(operations: &[Fr], ser: S) -> Result -// where -// S: Serializer, -// { -// let mut res = Vec::with_capacity(operations.len()); -// for fr in operations.iter() { -// res.push(fr.to_hex()); -// } -// Vec::serialize(&res, ser) -// } -// -// pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -// where -// D: Deserializer<'de>, -// { -// let str_vec: Vec = Vec::deserialize(deserializer)?; -// let mut res = Vec::with_capacity(str_vec.len()); -// for s in str_vec.into_iter() { -// let v = Fr::from_hex(&s).map_err(de::Error::custom)?; -// res.push(v); -// } -// Ok(res) -// } -// } -// -// pub struct SingleProofSerde; -// -// impl SingleProofSerde { -// pub fn serialize( -// value: &OldProof, -// serializer: S, -// ) -> Result -// where -// S: Serializer, -// { -// // First, serialize `SingleProof` to base64 string. -// let mut bytes = Vec::new(); -// value.write(&mut bytes).map_err(ser::Error::custom)?; -// let base64_value = base64::encode(&bytes); -// -// // Then, serialize it using `Serialize` trait implementation for `String`. -// String::serialize(&base64_value, serializer) -// } -// -// pub fn deserialize<'de, D>( -// deserializer: D, -// ) -> Result, D::Error> -// where -// D: Deserializer<'de>, -// { -// // First, deserialize a string value. It is expected to be a -// // base64 representation of `SingleProof`. -// let deserialized_string = String::deserialize(deserializer)?; -// let bytes = base64::decode(&deserialized_string).map_err(de::Error::custom)?; -// -// // Then, parse hexadecimal string to obtain `SingleProof`. -// OldProof::read(&*bytes).map_err(de::Error::custom) -// } -// } -// -// pub struct AggregatedProofSerde; -// -// impl AggregatedProofSerde { -// pub fn serialize( -// value: &NewProof>, -// serializer: S, -// ) -> Result -// where -// S: Serializer, -// { -// // First, serialize `AggregatedProof` to base64 string. -// let mut bytes = Vec::new(); -// value.write(&mut bytes).map_err(ser::Error::custom)?; -// let base64_value = base64::encode(&bytes); -// -// // Then, serialize it using `Serialize` trait implementation for `String`. -// String::serialize(&base64_value, serializer) -// } -// -// pub fn deserialize<'de, D>( -// deserializer: D, -// ) -> Result>, D::Error> -// where -// D: Deserializer<'de>, -// { -// // First, deserialize a string value. It is expected to be a -// // base64 representation of `AggregatedProof`. -// let deserialized_string = String::deserialize(deserializer)?; -// let bytes = base64::decode(&deserialized_string).map_err(de::Error::custom)?; -// -// // Then, parse hexadecimal string to obtain `SingleProof`. -// NewProof::read(&*bytes).map_err(de::Error::custom) -// } -// } -// -// pub fn serialize_new_proof>( -// proof: &NewProof, -// ) -> (Vec, Vec) { -// let mut inputs = vec![]; -// for input in proof.inputs.iter() { -// inputs.push(EthereumSerializer::serialize_fe(input)); -// } -// let mut serialized_proof = vec![]; -// -// for c in proof.state_polys_commitments.iter() { -// let (x, y) = EthereumSerializer::serialize_g1(c); -// serialized_proof.push(x); -// serialized_proof.push(y); -// } -// -// let (x, y) = EthereumSerializer::serialize_g1(&proof.copy_permutation_grand_product_commitment); -// serialized_proof.push(x); -// serialized_proof.push(y); -// -// for c in proof.quotient_poly_parts_commitments.iter() { -// let (x, y) = EthereumSerializer::serialize_g1(c); -// serialized_proof.push(x); -// serialized_proof.push(y); -// } -// -// for c in proof.state_polys_openings_at_z.iter() { -// serialized_proof.push(EthereumSerializer::serialize_fe(c)); -// } -// -// for (_, _, c) in proof.state_polys_openings_at_dilations.iter() { -// serialized_proof.push(EthereumSerializer::serialize_fe(c)); -// } -// -// assert_eq!(proof.gate_setup_openings_at_z.len(), 0); -// -// for (_, c) in proof.gate_selectors_openings_at_z.iter() { -// serialized_proof.push(EthereumSerializer::serialize_fe(c)); -// } -// -// for c in proof.copy_permutation_polys_openings_at_z.iter() { -// serialized_proof.push(EthereumSerializer::serialize_fe(c)); -// } -// -// serialized_proof.push(EthereumSerializer::serialize_fe( -// &proof.copy_permutation_grand_product_opening_at_z_omega, -// )); -// serialized_proof.push(EthereumSerializer::serialize_fe( -// &proof.quotient_poly_opening_at_z, -// )); -// serialized_proof.push(EthereumSerializer::serialize_fe( -// &proof.linearization_poly_opening_at_z, -// )); -// -// let (x, y) = EthereumSerializer::serialize_g1(&proof.opening_proof_at_z); -// serialized_proof.push(x); -// serialized_proof.push(y); -// -// let (x, y) = EthereumSerializer::serialize_g1(&proof.opening_proof_at_z_omega); -// serialized_proof.push(x); -// serialized_proof.push(y); -// -// (inputs, serialized_proof) -// } -// -// pub fn serialize_single_proof( -// proof: &OldProof, -// ) -> EncodedSingleProof { -// let mut inputs = vec![]; -// for input in proof.input_values.iter() { -// let ser = EthereumSerializer::serialize_fe(input); -// inputs.push(ser); -// } -// let mut serialized_proof = vec![]; -// -// for c in proof.wire_commitments.iter() { -// let (x, y) = EthereumSerializer::serialize_g1(c); -// serialized_proof.push(x); -// serialized_proof.push(y); -// } -// -// let (x, y) = EthereumSerializer::serialize_g1(&proof.grand_product_commitment); -// serialized_proof.push(x); -// serialized_proof.push(y); -// -// for c in proof.quotient_poly_commitments.iter() { -// let (x, y) = EthereumSerializer::serialize_g1(c); -// serialized_proof.push(x); -// serialized_proof.push(y); -// } -// -// for c in proof.wire_values_at_z.iter() { -// serialized_proof.push(EthereumSerializer::serialize_fe(c)); -// } -// -// for c in proof.wire_values_at_z_omega.iter() { -// serialized_proof.push(EthereumSerializer::serialize_fe(c)); -// } -// -// serialized_proof.push(EthereumSerializer::serialize_fe( -// &proof.grand_product_at_z_omega, -// )); -// serialized_proof.push(EthereumSerializer::serialize_fe( -// &proof.quotient_polynomial_at_z, -// )); -// serialized_proof.push(EthereumSerializer::serialize_fe( -// &proof.linearization_polynomial_at_z, -// )); -// -// for c in proof.permutation_polynomials_at_z.iter() { -// serialized_proof.push(EthereumSerializer::serialize_fe(c)); -// } -// -// let (x, y) = EthereumSerializer::serialize_g1(&proof.opening_at_z_proof); -// serialized_proof.push(x); -// serialized_proof.push(y); -// -// let (x, y) = EthereumSerializer::serialize_g1(&proof.opening_at_z_omega_proof); -// serialized_proof.push(x); -// serialized_proof.push(y); -// -// EncodedSingleProof { -// inputs, -// proof: serialized_proof, -// } -// } -// -// #[cfg(test)] -// mod tests { -// use super::*; -// use serde::{Deserialize, Serialize}; -// use serde_json::json; -// -// #[test] -// fn test_fr_serialize() { -// #[derive(Debug, Default, Serialize, Deserialize)] -// struct Reference { -// #[serde(with = "FrSerde")] -// value: Fr, -// } -// -// let value = Reference::default(); -// let serialized_fr = serde_json::to_string(&value).expect("Serialization failed"); -// let expected = json!({ -// "value": "0000000000000000000000000000000000000000000000000000000000000000" -// }); -// -// assert_eq!(serialized_fr, expected.to_string()); -// } -// -// #[test] -// fn test_optional_fr_serialize() { -// #[derive(Debug, Default, Serialize, Deserialize)] -// struct Reference { -// #[serde(with = "OptionalFrSerde")] -// value: Option, -// } -// -// // Check serialization of `None`. -// let value = Reference { value: None }; -// let serialized_fr = serde_json::to_string(&value).expect("Serialization failed"); -// let expected = json!({ "value": null }); -// -// assert_eq!(serialized_fr, expected.to_string()); -// -// // Check serialization of `Some`. -// let value = Reference { -// value: Some(Fr::default()), -// }; -// let serialized_fr = serde_json::to_string(&value).expect("Serialization failed"); -// let expected = json!({ -// "value": "0000000000000000000000000000000000000000000000000000000000000000" -// }); -// -// assert_eq!(serialized_fr, expected.to_string()); -// } -// -// #[test] -// fn test_vec_optional_fr_serialize() { -// #[derive(Debug, Default, Serialize, Deserialize)] -// struct Reference { -// #[serde(with = "VecOptionalFrSerde")] -// value: Vec>, -// } -// -// let value = Reference { -// value: vec![None, Some(Fr::default())], -// }; -// let serialized_fr = serde_json::to_string(&value).expect("Serialization failed"); -// let expected = json!({ -// "value": [null, "0000000000000000000000000000000000000000000000000000000000000000"] -// }); -// -// assert_eq!(serialized_fr, expected.to_string()); -// } -// } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 2aa15d70fa99..decf249876b0 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -22,7 +22,7 @@ thiserror = "1.0" anyhow = "1.0" metrics = "0.20" tokio = { version = "1", features = ["time"] } -sqlx = { version = "0.5", default-features = false, features = [ +sqlx = { version = "0.5.13", default-features = false, features = [ "runtime-tokio-native-tls", "macros", "postgres", @@ -33,13 +33,15 @@ sqlx = { version = "0.5", default-features = false, features = [ "migrate", "ipnetwork" ] } +serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -bigdecimal = "0.2.0" +bigdecimal = "0.2.2" bincode = "1" num = "0.3.1" hex = "0.4" once_cell = "1.7" strum = { version = "0.24", features = ["derive"] } + [dev-dependencies] +assert_matches = "1.5.0" db_test_macro = { path = "../db_test_macro", version = "0.1.0" } -#criterion = "0.3.0" diff --git a/core/lib/dal/migrations/20230427083744_protocol_versions_table.down.sql b/core/lib/dal/migrations/20230427083744_protocol_versions_table.down.sql new file mode 100644 index 000000000000..57c0c51b722c --- /dev/null +++ b/core/lib/dal/migrations/20230427083744_protocol_versions_table.down.sql @@ -0,0 +1,4 @@ +ALTER TABLE transactions DROP COLUMN IF EXISTS upgrade_id; +ALTER TABLE miniblocks DROP COLUMN IF EXISTS protocol_version; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS protocol_version; +DROP TABLE IF EXISTS protocol_versions; diff --git a/core/lib/dal/migrations/20230427083744_protocol_versions_table.up.sql b/core/lib/dal/migrations/20230427083744_protocol_versions_table.up.sql new file mode 100644 index 000000000000..6b43cba301f7 --- /dev/null +++ b/core/lib/dal/migrations/20230427083744_protocol_versions_table.up.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS protocol_versions ( + id INT PRIMARY KEY, + timestamp BIGINT NOT NULL, + recursion_scheduler_level_vk_hash BYTEA NOT NULL, + recursion_node_level_vk_hash BYTEA NOT NULL, + recursion_leaf_level_vk_hash BYTEA NOT NULL, + recursion_circuits_set_vks_hash BYTEA NOT NULL, + bootloader_code_hash BYTEA NOT NULL, + default_account_code_hash BYTEA NOT NULL, + verifier_address BYTEA NOT NULL, + upgrade_tx_hash BYTEA REFERENCES transactions (hash), + created_at TIMESTAMP NOT NULL +); + +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES protocol_versions (id); +ALTER TABLE miniblocks ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES protocol_versions (id); +ALTER TABLE transactions ADD COLUMN IF NOT EXISTS upgrade_id INT; diff --git a/core/lib/dal/migrations/20230530091756_add_system_version_in_witness_and_prover_related_tables.down.sql b/core/lib/dal/migrations/20230530091756_add_system_version_in_witness_and_prover_related_tables.down.sql new file mode 100644 index 000000000000..38e7ed7a73ec --- /dev/null +++ b/core/lib/dal/migrations/20230530091756_add_system_version_in_witness_and_prover_related_tables.down.sql @@ -0,0 +1,6 @@ +ALTER TABLE witness_inputs DROP COLUMN IF EXISTS protocol_version; +ALTER TABLE leaf_aggregation_witness_jobs DROP COLUMN IF EXISTS protocol_version; +ALTER TABLE node_aggregation_witness_jobs DROP COLUMN IF EXISTS protocol_version; +ALTER TABLE scheduler_witness_jobs DROP COLUMN IF EXISTS protocol_version; +ALTER TABLE prover_jobs DROP COLUMN IF EXISTS protocol_version; + diff --git a/core/lib/dal/migrations/20230530091756_add_system_version_in_witness_and_prover_related_tables.up.sql b/core/lib/dal/migrations/20230530091756_add_system_version_in_witness_and_prover_related_tables.up.sql new file mode 100644 index 000000000000..786774505d12 --- /dev/null +++ b/core/lib/dal/migrations/20230530091756_add_system_version_in_witness_and_prover_related_tables.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE witness_inputs ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES protocol_versions (id); +ALTER TABLE leaf_aggregation_witness_jobs ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES protocol_versions (id); +ALTER TABLE node_aggregation_witness_jobs ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES protocol_versions (id); +ALTER TABLE scheduler_witness_jobs ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES protocol_versions (id); +ALTER TABLE prover_jobs ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES protocol_versions (id); diff --git a/core/lib/dal/migrations/20230619113310_add_protocol_version_index_in_prover_jobs.down.sql b/core/lib/dal/migrations/20230619113310_add_protocol_version_index_in_prover_jobs.down.sql new file mode 100644 index 000000000000..c23881289157 --- /dev/null +++ b/core/lib/dal/migrations/20230619113310_add_protocol_version_index_in_prover_jobs.down.sql @@ -0,0 +1,21 @@ +DROP INDEX IF EXISTS ix_prover_jobs_circuits_0_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_1_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_2_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_3_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_4_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_5_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_6_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_7_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_8_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_9_1; + +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_0 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{Scheduler,"L1 messages merklizer"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_1 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Node aggregation","Decommitts sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Leaf aggregation","Code decommitter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_3 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Log demuxer",Keccak}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_4 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{SHA256,ECRecover}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_5 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"RAM permutation","Storage sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_6 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Storage application","Initial writes pubdata rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_7 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Repeated writes pubdata rehasher","Events sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_8 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"L1 messages sorter","L1 messages rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_9 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Main VM"}'::text[]))); diff --git a/core/lib/dal/migrations/20230619113310_add_protocol_version_index_in_prover_jobs.up.sql b/core/lib/dal/migrations/20230619113310_add_protocol_version_index_in_prover_jobs.up.sql new file mode 100644 index 000000000000..ac40b842b863 --- /dev/null +++ b/core/lib/dal/migrations/20230619113310_add_protocol_version_index_in_prover_jobs.up.sql @@ -0,0 +1,21 @@ +DROP INDEX IF EXISTS ix_prover_jobs_circuits_0; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_3; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_4; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_5; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_6; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_7; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_8; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_9; + +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_0_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{Scheduler,"L1 messages merklizer"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_1_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Node aggregation","Decommitts sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_2_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Leaf aggregation","Code decommitter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_3_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Log demuxer",Keccak}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_4_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{SHA256,ECRecover}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_5_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"RAM permutation","Storage sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_6_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Storage application","Initial writes pubdata rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_7_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Repeated writes pubdata rehasher","Events sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_8_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"L1 messages sorter","L1 messages rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_9_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Main VM"}'::text[]))); diff --git a/core/lib/dal/migrations/20230706080351_add-enumeration-indices.down.sql b/core/lib/dal/migrations/20230706080351_add-enumeration-indices.down.sql new file mode 100644 index 000000000000..4b60799a52ef --- /dev/null +++ b/core/lib/dal/migrations/20230706080351_add-enumeration-indices.down.sql @@ -0,0 +1 @@ +ALTER TABLE initial_writes DROP COLUMN IF EXISTS index; diff --git a/core/lib/dal/migrations/20230706080351_add-enumeration-indices.up.sql b/core/lib/dal/migrations/20230706080351_add-enumeration-indices.up.sql new file mode 100644 index 000000000000..d41bf4e35d7c --- /dev/null +++ b/core/lib/dal/migrations/20230706080351_add-enumeration-indices.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE initial_writes ADD COLUMN IF NOT EXISTS index BIGINT; +CREATE UNIQUE INDEX IF NOT EXISTS initial_writes_index_index ON initial_writes (index); diff --git a/core/lib/dal/migrations/20230717182755_pending_tx_index.down.sql b/core/lib/dal/migrations/20230717182755_pending_tx_index.down.sql new file mode 100644 index 000000000000..2277c1929ba8 --- /dev/null +++ b/core/lib/dal/migrations/20230717182755_pending_tx_index.down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS pending_l1_batch_txs; +CREATE INDEX IF NOT EXISTS transactions_l1_batch_number_idx ON transactions USING btree (l1_batch_number) diff --git a/core/lib/dal/migrations/20230717182755_pending_tx_index.up.sql b/core/lib/dal/migrations/20230717182755_pending_tx_index.up.sql new file mode 100644 index 000000000000..3db0f2fa69b3 --- /dev/null +++ b/core/lib/dal/migrations/20230717182755_pending_tx_index.up.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS pending_l1_batch_txs ON public.transactions USING btree (miniblock_number, index_in_block) WHERE ((miniblock_number IS NOT NULL) AND (l1_batch_number IS NULL)); +DROP INDEX IF EXISTS transactions_l1_batch_number_idx; diff --git a/core/lib/dal/migrations/20230810090211_add_proof_generation_details_table.down.sql b/core/lib/dal/migrations/20230810090211_add_proof_generation_details_table.down.sql new file mode 100644 index 000000000000..b784882e0a49 --- /dev/null +++ b/core/lib/dal/migrations/20230810090211_add_proof_generation_details_table.down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS proof_generation_details; + +DROP INDEX IF EXISTS idx_l1_batches_status_prover_taken_at; diff --git a/core/lib/dal/migrations/20230810090211_add_proof_generation_details_table.up.sql b/core/lib/dal/migrations/20230810090211_add_proof_generation_details_table.up.sql new file mode 100644 index 000000000000..72056a0d6152 --- /dev/null +++ b/core/lib/dal/migrations/20230810090211_add_proof_generation_details_table.up.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS proof_generation_details +( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + status TEXT NOT NULL, + proof_gen_data_blob_url TEXT NOT NULL, + proof_blob_url TEXT, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + prover_taken_at TIMESTAMP +); + + +CREATE INDEX IF NOT EXISTS idx_proof_generation_details_status_prover_taken_at + ON proof_generation_details (prover_taken_at) + WHERE status = 'picked_by_prover'; diff --git a/core/lib/dal/migrations/20230816123036_create_table_gpu_prover_queue_fri.down.sql b/core/lib/dal/migrations/20230816123036_create_table_gpu_prover_queue_fri.down.sql new file mode 100644 index 000000000000..cad2bad7c91a --- /dev/null +++ b/core/lib/dal/migrations/20230816123036_create_table_gpu_prover_queue_fri.down.sql @@ -0,0 +1 @@ +DROP TABLE IF exists gpu_prover_queue_fri; diff --git a/core/lib/dal/migrations/20230816123036_create_table_gpu_prover_queue_fri.up.sql b/core/lib/dal/migrations/20230816123036_create_table_gpu_prover_queue_fri.up.sql new file mode 100644 index 000000000000..bd5d5b27c2b8 --- /dev/null +++ b/core/lib/dal/migrations/20230816123036_create_table_gpu_prover_queue_fri.up.sql @@ -0,0 +1,14 @@ +-- Add up migration script here +CREATE TABLE IF NOT EXISTS gpu_prover_queue_fri +( + id BIGSERIAL PRIMARY KEY, + instance_host INET NOT NULL, + instance_port INT NOT NULL + CONSTRAINT valid_port CHECK (instance_port >= 0 AND instance_port <= 65535), + instance_status TEXT NOT NULL, + specialized_prover_group_id SMALLINT NOT NULL, + zone TEXT, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + processing_started_at TIMESTAMP +); diff --git a/core/lib/dal/migrations/20230817101508_add_index_for_gpu_prover_queue_fri.down.sql b/core/lib/dal/migrations/20230817101508_add_index_for_gpu_prover_queue_fri.down.sql new file mode 100644 index 000000000000..34ade94cf9ed --- /dev/null +++ b/core/lib/dal/migrations/20230817101508_add_index_for_gpu_prover_queue_fri.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS gpu_prover_queue_fri_host_port_zone_idx; diff --git a/core/lib/dal/migrations/20230817101508_add_index_for_gpu_prover_queue_fri.up.sql b/core/lib/dal/migrations/20230817101508_add_index_for_gpu_prover_queue_fri.up.sql new file mode 100644 index 000000000000..6eb4febe8340 --- /dev/null +++ b/core/lib/dal/migrations/20230817101508_add_index_for_gpu_prover_queue_fri.up.sql @@ -0,0 +1 @@ +CREATE UNIQUE INDEX IF NOT EXISTS gpu_prover_queue_fri_host_port_zone_idx ON gpu_prover_queue_fri (instance_host, instance_port, zone); diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 1bd3f3ee7607..2bdee59ce8f4 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -1,22 +1,18 @@ { "db": "PostgreSQL", - "01189407fab9be050ae75249f75b9503343500af700f00721e295871fa969172": { + "01a21fe42c5c0ec0f848739235b8175b62b0ffe503b823c128dd620fec047784": { "describe": { - "columns": [ - { - "name": "l2_address", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], + "columns": [], + "nullable": [], "parameters": { - "Left": [] + "Left": [ + "Text", + "Int4", + "Text" + ] } }, - "query": "SELECT l2_address FROM tokens WHERE well_known = true" + "query": "UPDATE gpu_prover_queue_fri SET instance_status = 'available', updated_at = now() WHERE instance_host = $1::text::inet AND instance_port = $2 AND instance_status = 'full' AND zone = $3\n " }, "01ebdc5b524e85033fb06d9166475f365643f744492e59ff12f10b419dd6d485": { "describe": { @@ -38,56 +34,6 @@ }, "query": "SELECT bytecode_hash FROM factory_deps WHERE miniblock_number > $1" }, - "021e878567c19a5ec20c79949da5286985f7b17d7b272e24f9a5c194050ec783": { - "describe": { - "columns": [ - { - "name": "l1_address", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "l2_address", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "symbol", - "ordinal": 2, - "type_info": "Varchar" - }, - { - "name": "name", - "ordinal": 3, - "type_info": "Varchar" - }, - { - "name": "decimals", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "usd_price", - "ordinal": 5, - "type_info": "Numeric" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - true - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT l1_address, l2_address, symbol, name, decimals, usd_price\n FROM tokens\n WHERE l2_address = $1\n " - }, "03a34f0fd82bed22f14c5b36554bb958d407e9724fa5ea5123edc3c6607e545c": { "describe": { "columns": [ @@ -267,311 +213,172 @@ }, "query": "\n UPDATE witness_inputs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs_fri\n WHERE l1_batch_number <= $1\n AND status = 'queued'\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs_fri.*\n " }, - "0b934f7671826b45d5a6f95f30ae13f073a16bc54b1b933b52681901c676d623": { + "0c212f47b9a0e719f947a419be8284837b1b01aa23994ba6401b420790b802b8": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int4" + ] + } + }, + "query": "\n INSERT INTO node_aggregation_witness_jobs\n (l1_batch_number, protocol_version, status, created_at, updated_at)\n VALUES ($1, $2, 'waiting_for_artifacts', now(), now())\n " + }, + "0c729d441aceba247e36c08a89661c35b476d4d7c73882147699009affe78472": { "describe": { "columns": [ { - "name": "hash", + "name": "l1_batch_number!", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "is_priority", + "name": "circuit_id", "ordinal": 1, - "type_info": "Bool" + "type_info": "Int2" }, { - "name": "full_fee", + "name": "aggregation_round", "ordinal": 2, - "type_info": "Numeric" - }, - { - "name": "layer_2_tip_fee", - "ordinal": 3, - "type_info": "Numeric" - }, - { - "name": "initiator_address", - "ordinal": 4, - "type_info": "Bytea" - }, + "type_info": "Int2" + } + ], + "nullable": [ + null, + false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number!\", circuit_id, aggregation_round\n FROM prover_jobs_fri\n WHERE status IN('queued', 'in_progress', 'failed')\n GROUP BY circuit_id, aggregation_round\n " + }, + "0d1bed183c38304ff1a6c8c78dca03964e2e188a6d01f98eaf0c6b24f19b8b6f": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "UPDATE transactions SET in_mempool = FALSE FROM UNNEST ($1::bytea[]) AS s(address) WHERE transactions.in_mempool = TRUE AND transactions.initiator_address = s.address" + }, + "0d99b4015b29905862991e4f1a44a1021d48f50e99cb1701e7496ce6c3e15dc6": { + "describe": { + "columns": [ { - "name": "nonce", - "ordinal": 5, + "name": "number", + "ordinal": 0, "type_info": "Int8" - }, - { - "name": "signature", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "input", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE is_finished = TRUE" + }, + "0e001ef507253b4fd3a87e379c8f2e63fa41250b1a396d81697de2b7ea71215e": { + "describe": { + "columns": [ { - "name": "priority_op_id", - "ordinal": 10, + "name": "count!", + "ordinal": 0, "type_info": "Int8" - }, + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches WHERE number = $1 AND hash = $2 AND merkle_root_hash = $3 AND parent_hash = $4 AND l2_l1_merkle_root = $5" + }, + "0ee31e6e2ec60f427d8dec719ec0ba03ef75bc610e878ae32b0bf61c4c2c1366": { + "describe": { + "columns": [ { - "name": "l1_batch_number", - "ordinal": 11, + "name": "id", + "ordinal": 0, "type_info": "Int8" }, { - "name": "index_in_block", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" + "name": "instance_host", + "ordinal": 1, + "type_info": "Inet" }, { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" + "name": "instance_port", + "ordinal": 2, + "type_info": "Int4" }, { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" + "name": "instance_status", + "ordinal": 3, + "type_info": "Text" }, { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" + "name": "specialized_prover_group_id", + "ordinal": 4, + "type_info": "Int2" }, { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" + "name": "zone", + "ordinal": 5, + "type_info": "Text" }, { "name": "created_at", - "ordinal": 18, + "ordinal": 6, "type_info": "Timestamp" }, { "name": "updated_at", - "ordinal": 19, + "ordinal": 7, "type_info": "Timestamp" }, { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" - }, - { - "name": "in_mempool", - "ordinal": 22, - "type_info": "Bool" - }, - { - "name": "l1_block_number", - "ordinal": 23, - "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, - { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" - }, - { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" - }, - { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" - }, - { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" - }, - { - "name": "l1_batch_tx_index", - "ordinal": 31, - "type_info": "Int4" - }, - { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" - }, - { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" - }, - { - "name": "l1_tx_refund_recipient", - "ordinal": 34, - "type_info": "Bytea" + "name": "processing_started_at", + "ordinal": 8, + "type_info": "Timestamp" } ], "nullable": [ false, false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, false, false, - true, false, true, false, false, - false, - true, - true, - true, - true, - true, - false, - true, true ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT * FROM transactions\n WHERE miniblock_number IS NOT NULL AND l1_batch_number IS NULL\n ORDER BY miniblock_number, index_in_block\n " - }, - "0c729d441aceba247e36c08a89661c35b476d4d7c73882147699009affe78472": { - "describe": { - "columns": [ - { - "name": "l1_batch_number!", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "circuit_id", - "ordinal": 1, - "type_info": "Int2" - }, - { - "name": "aggregation_round", - "ordinal": 2, - "type_info": "Int2" - } - ], - "nullable": [ - null, - false, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number!\", circuit_id, aggregation_round\n FROM prover_jobs_fri\n WHERE status IN('queued', 'in_progress', 'failed')\n GROUP BY circuit_id, aggregation_round\n " - }, - "0d1bed183c38304ff1a6c8c78dca03964e2e188a6d01f98eaf0c6b24f19b8b6f": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray" - ] - } - }, - "query": "UPDATE transactions SET in_mempool = FALSE FROM UNNEST ($1::bytea[]) AS s(address) WHERE transactions.in_mempool = TRUE AND transactions.initiator_address = s.address" - }, - "0d99b4015b29905862991e4f1a44a1021d48f50e99cb1701e7496ce6c3e15dc6": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE is_finished = TRUE" - }, - "0e001ef507253b4fd3a87e379c8f2e63fa41250b1a396d81697de2b7ea71215e": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], "parameters": { "Left": [ - "Int8", - "Bytea", - "Bytea", - "Bytea", - "Bytea" + "Interval", + "Int2", + "Text" ] } }, - "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches WHERE number = $1 AND hash = $2 AND merkle_root_hash = $3 AND parent_hash = $4 AND l2_l1_merkle_root = $5" + "query": "UPDATE gpu_prover_queue_fri SET instance_status = 'reserved', updated_at = now(), processing_started_at = now() WHERE id in ( SELECT id FROM gpu_prover_queue_fri WHERE specialized_prover_group_id=$2 AND zone=$3 AND ( instance_status = 'available' OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval) ) ORDER BY updated_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED ) RETURNING gpu_prover_queue_fri.*\n " }, "0f5897b5e0109535caa3d49f899c65e5080511d49305558b59b185c34227aa18": { "describe": { @@ -638,131 +445,259 @@ }, "query": "\n SELECT COUNT(*) as \"count!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY status\n " }, - "13f9c910b12ede287fe5ee753c9a3bf87a06216a320a58170608b9c81dc14b14": { - "describe": { - "columns": [ - { - "name": "tx_hash", - "ordinal": 0, - "type_info": "Text" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "SELECT tx_hash FROM eth_txs_history\n WHERE eth_tx_id = $1 AND confirmed_at IS NOT NULL" - }, - "142c812f70d8c0cef986bef9b3c058e148f2cfb1c2c933ff321cf498b9c6e3b2": { + "100ede607d40d8d07000fcdc40705c806e8229323e0e6dfb7507691838963ccf": { "describe": { "columns": [ { - "name": "number", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number!", + "name": "basic_circuits", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "last_batch_miniblock?", + "name": "basic_circuits_inputs", "ordinal": 2, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "number_of_basic_circuits", "ordinal": 3, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "root_hash?", + "name": "status", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "commit_tx_hash?", + "name": "processing_started_at", "ordinal": 5, - "type_info": "Text" + "type_info": "Timestamp" }, { - "name": "committed_at?", + "name": "time_taken", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "Time" }, { - "name": "prove_tx_hash?", + "name": "error", "ordinal": 7, "type_info": "Text" }, { - "name": "proven_at?", + "name": "created_at", "ordinal": 8, "type_info": "Timestamp" }, { - "name": "execute_tx_hash?", + "name": "updated_at", "ordinal": 9, - "type_info": "Text" + "type_info": "Timestamp" }, { - "name": "executed_at?", + "name": "attempts", "ordinal": 10, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "l1_gas_price", + "name": "basic_circuits_blob_url", "ordinal": 11, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "l2_fair_gas_price", + "name": "basic_circuits_inputs_blob_url", "ordinal": 12, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "bootloader_code_hash", + "name": "is_blob_cleaned", "ordinal": 13, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "default_aa_code_hash", + "name": "protocol_version", "ordinal": 14, - "type_info": "Bytea" - }, - { - "name": "fee_account_address?", - "ordinal": 15, - "type_info": "Bytea" + "type_info": "Int4" } ], "nullable": [ false, - null, - null, false, false, false, - true, false, true, - false, true, + true, + false, false, false, true, true, - false + false, + true ], "parameters": { "Left": [ - "Int8" + "Interval", + "Int4", + "Int8", + "Int4Array" + ] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM leaf_aggregation_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n AND protocol_version = ANY($4)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs.*\n " + }, + "13e5f6a2a73eaa979229611ffdbed86d6e5e1bad0c645d39b56fdc47f5c17971": { + "describe": { + "columns": [ + { + "name": "hashed_key", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT DISTINCT hashed_key FROM storage_logs WHERE miniblock_number BETWEEN $1 and $2" + }, + "13f9c910b12ede287fe5ee753c9a3bf87a06216a320a58170608b9c81dc14b14": { + "describe": { + "columns": [ + { + "name": "tx_hash", + "ordinal": 0, + "type_info": "Text" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "SELECT tx_hash FROM eth_txs_history\n WHERE eth_tx_id = $1 AND confirmed_at IS NOT NULL" + }, + "142c812f70d8c0cef986bef9b3c058e148f2cfb1c2c933ff321cf498b9c6e3b2": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number!", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "last_batch_miniblock?", + "ordinal": 2, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "root_hash?", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "commit_tx_hash?", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "committed_at?", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "prove_tx_hash?", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "proven_at?", + "ordinal": 8, + "type_info": "Timestamp" + }, + { + "name": "execute_tx_hash?", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "executed_at?", + "ordinal": 10, + "type_info": "Timestamp" + }, + { + "name": "l1_gas_price", + "ordinal": 11, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 13, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 14, + "type_info": "Bytea" + }, + { + "name": "fee_account_address?", + "ordinal": 15, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + null, + null, + false, + false, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true, + false + ], + "parameters": { + "Left": [ + "Int8" ] } }, @@ -786,6 +721,29 @@ }, "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) WHERE prove_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" }, + "14fb05ec0acec5a4f24752c60768d72bf19d9953468dc691e3f3b8519e6d3ada": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Int4" + ] + } + }, + "query": "INSERT INTO miniblocks ( number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, protocol_version, created_at, updated_at ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, now(), now())" + }, "151aa7cab859c275f74f981ed146415e1e5242ebe259552d5b9fac333c0d9ce8": { "describe": { "columns": [], @@ -877,6 +835,65 @@ }, "query": "SELECT sent_at_block FROM eth_txs_history WHERE eth_tx_id = $1 AND sent_at_block IS NOT NULL ORDER BY created_at ASC LIMIT 1" }, + "19c8d9e449034ce7fd501541e5e71e2d5957bf2329e52166f4981955a847e175": { + "describe": { + "columns": [ + { + "name": "value!", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "l1_address!", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "l2_address!", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "symbol!", + "ordinal": 3, + "type_info": "Varchar" + }, + { + "name": "name!", + "ordinal": 4, + "type_info": "Varchar" + }, + { + "name": "decimals!", + "ordinal": 5, + "type_info": "Int4" + }, + { + "name": "usd_price?", + "ordinal": 6, + "type_info": "Numeric" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + true + ], + "parameters": { + "Left": [ + "ByteaArray", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT storage.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM storage\n INNER JOIN tokens ON\n storage.address = tokens.l2_address OR (storage.address = $2 AND tokens.l2_address = $3)\n WHERE storage.hashed_key = ANY($1) AND storage.value != $4\n " + }, "1a91acea72e56513a2a9e667bd5a2c171baa5fec01c51dcb7c7cf33f736c854d": { "describe": { "columns": [ @@ -1189,113 +1206,93 @@ }, "query": "UPDATE transactions SET in_mempool = FALSE WHERE in_mempool = TRUE" }, - "1f3e41f4ac5b1f6e735f1c422c0098ed534d9e8fe84e98b3234e893e8a2c5085": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int4" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Text" - ] - } - }, - "query": "SELECT eth_txs.id FROM eth_txs_history JOIN eth_txs\n ON eth_txs.confirmed_eth_tx_history_id = eth_txs_history.id\n WHERE eth_txs_history.tx_hash = $1" - }, - "1faf6552c221c75b7232b55210c0c37be76a57ec9dc94584b6ccb562e8b182f2": { + "1f33c948d95dfb549b9de814e74c4effff504c3316df79816d619a20a2c04be3": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number", + "name": "l1_tx_count", "ordinal": 1, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "circuit_type", + "name": "l2_tx_count", "ordinal": 2, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "prover_input", + "name": "timestamp", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "status", + "name": "is_finished", "ordinal": 4, - "type_info": "Text" + "type_info": "Bool" }, { - "name": "error", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "processing_started_at", + "name": "l2_to_l1_logs", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "ByteaArray" }, { - "name": "created_at", + "name": "l2_to_l1_messages", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "ByteaArray" }, { - "name": "updated_at", + "name": "bloom", "ordinal": 8, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "time_taken", + "name": "priority_ops_onchain_data", "ordinal": 9, - "type_info": "Time" + "type_info": "ByteaArray" }, { - "name": "aggregation_round", + "name": "used_contract_hashes", "ordinal": 10, - "type_info": "Int4" + "type_info": "Jsonb" }, { - "name": "result", + "name": "base_fee_per_gas", "ordinal": 11, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "sequence_number", + "name": "l1_gas_price", "ordinal": 12, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "attempts", + "name": "l2_fair_gas_price", "ordinal": 13, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "circuit_input_blob_url", + "name": "bootloader_code_hash", "ordinal": 14, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "proccesed_by", + "name": "default_aa_code_hash", "ordinal": 15, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "is_blob_cleaned", + "name": "protocol_version", "ordinal": 16, - "type_info": "Bool" + "type_info": "Int4" } ], "nullable": [ @@ -1304,28 +1301,48 @@ false, false, false, - true, - true, false, false, false, false, - true, + false, + false, + false, false, false, true, true, + true + ], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "SELECT number, l1_tx_count, l2_tx_count, timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version FROM l1_batches WHERE eth_commit_tx_id = $1 OR eth_prove_tx_id = $1 OR eth_execute_tx_id = $1" + }, + "1f3e41f4ac5b1f6e735f1c422c0098ed534d9e8fe84e98b3234e893e8a2c5085": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ false ], "parameters": { "Left": [ - "Int8" + "Text" ] } }, - "query": "SELECT * from prover_jobs where id=$1" + "query": "SELECT eth_txs.id FROM eth_txs_history JOIN eth_txs\n ON eth_txs.confirmed_eth_tx_history_id = eth_txs_history.id\n WHERE eth_txs_history.tx_hash = $1" }, - "206eaafbd834d16f37c47a06c8bbb8da8b23ed1eab9c5c5958e31832ced6f9f0": { + "1faf6552c221c75b7232b55210c0c37be76a57ec9dc94584b6ccb562e8b182f2": { "describe": { "columns": [ { @@ -1412,6 +1429,11 @@ "name": "is_blob_cleaned", "ordinal": 16, "type_info": "Bool" + }, + { + "name": "protocol_version", + "ordinal": 17, + "type_info": "Int4" } ], "nullable": [ @@ -1431,47 +1453,54 @@ false, true, true, - false + false, + true ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " - }, - "21504ecf55757d6b487f8c21fa72821109c70736185f616be7e180d1b31ca9a1": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ - "Int8", "Int8" ] } }, - "query": "\n UPDATE transactions\n SET effective_gas_price = 0\n WHERE miniblock_number BETWEEN $1 AND $2\n AND is_priority = TRUE\n " + "query": "SELECT * from prover_jobs where id=$1" }, - "227daa1e8d647c207869d7c306d9d13a38c6baf07281cf72cd93d20da2e3cf3c": { + "20b22fd457417e9a72f5941887448f9a11b97b449db4759da0b9d368ce93996b": { "describe": { "columns": [ { - "name": "exists", + "name": "recursion_scheduler_level_vk_hash", "ordinal": 0, - "type_info": "Bool" + "type_info": "Bytea" + }, + { + "name": "recursion_node_level_vk_hash", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "recursion_leaf_level_vk_hash", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "recursion_circuits_set_vks_hash", + "ordinal": 3, + "type_info": "Bytea" } ], "nullable": [ - null + false, + false, + false, + false ], "parameters": { "Left": [ - "Bytea", - "Bytea" + "Int4" ] } }, - "query": "\n SELECT true as \"exists\"\n FROM (\n SELECT * FROM storage_logs\n WHERE hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) sl\n WHERE sl.value != $2\n " + "query": "SELECT recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash\n FROM protocol_versions\n WHERE id = $1\n " }, "22b57675a726d9cfeb82a60ba50c36cab1548d197ea56a7658d3f005df07c60b": { "describe": { @@ -1491,223 +1520,178 @@ }, "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true AND miniblock_number IS NOT NULL" }, - "230ad5f76b258a756e91732857db772b1f241066278fefc742122f4d1830f56e": { + "2397c1a050d358b596c9881c379bf823e267c03172f72c42da84cc0c04cc9d93": { "describe": { "columns": [ { - "name": "number", + "name": "miniblock_number!", "ordinal": 0, "type_info": "Int8" }, { - "name": "timestamp", + "name": "hash", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "is_finished", + "name": "index_in_block!", "ordinal": 2, - "type_info": "Bool" + "type_info": "Int4" }, { - "name": "l1_tx_count", + "name": "l1_batch_tx_index!", "ordinal": 3, "type_info": "Int4" - }, + } + ], + "nullable": [ + true, + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT miniblock_number as \"miniblock_number!\",\n hash, index_in_block as \"index_in_block!\", l1_batch_tx_index as \"l1_batch_tx_index!\"\n FROM transactions\n WHERE l1_batch_number = $1\n ORDER BY miniblock_number, index_in_block\n " + }, + "2424f0ab2b156e953841107cfc0ccd76519d13c62fdcd5fd6b39e3503d6ec82c": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE l1_batch_number = $2\n " + }, + "249d8c0334a8a1a4ff993f72f5245dc55c60773732bfe7596dc5f05f34c15131": { + "describe": { + "columns": [ { - "name": "l2_tx_count", - "ordinal": 4, + "name": "id", + "ordinal": 0, "type_info": "Int4" - }, + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Text" + ] + } + }, + "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ('\\x00', 0, $1, '', 0, now(), now())\n RETURNING id" + }, + "252c1398bf08802e9dc038f7c9d95cc9d56cbf760d7de5a48f014478850daede": { + "describe": { + "columns": [ { - "name": "fee_account_address", - "ordinal": 5, - "type_info": "Bytea" + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "bloom", - "ordinal": 6, - "type_info": "Bytea" + "name": "scheduler_witness_blob_url", + "ordinal": 1, + "type_info": "Text" }, { - "name": "priority_ops_onchain_data", - "ordinal": 7, - "type_info": "ByteaArray" - }, + "name": "final_node_aggregations_blob_url", + "ordinal": 2, + "type_info": "Text" + } + ], + "nullable": [ + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT l1_batch_number, scheduler_witness_blob_url, final_node_aggregations_blob_url FROM scheduler_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND updated_at < NOW() - INTERVAL '30 days'\n AND scheduler_witness_blob_url is NOT NULL\n AND final_node_aggregations_blob_url is NOT NULL\n LIMIT $1;\n " + }, + "269f3ac58705d65f775a6c84a62b9c0726beef51eb633937fa2a75b80c6d7fbc": { + "describe": { + "columns": [ { "name": "hash", - "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "parent_hash", - "ordinal": 9, - "type_info": "Bytea" - }, - { - "name": "commitment", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "compressed_write_logs", - "ordinal": 11, + "ordinal": 0, "type_info": "Bytea" }, { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" + "name": "number", + "ordinal": 1, + "type_info": "Int8" }, { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, + "name": "timestamp", + "ordinal": 2, "type_info": "Int8" - }, + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT hash, number, timestamp FROM miniblocks WHERE number > $1 ORDER BY number ASC" + }, + "2928cd054e9d6898559f964906a2ee0d3750fbe6fbd99209a48fc7b197fa2a22": { + "describe": { + "columns": [ { - "name": "predicted_prove_gas_cost", - "ordinal": 22, + "name": "id", + "ordinal": 0, "type_info": "Int8" }, { - "name": "predicted_execute_gas_cost", - "ordinal": 23, + "name": "l1_batch_number", + "ordinal": 1, "type_info": "Int8" }, { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" - }, - { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" }, { - "name": "l2_l1_merkle_root", - "ordinal": 29, - "type_info": "Bytea" + "name": "aggregation_round", + "ordinal": 3, + "type_info": "Int2" }, { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, + "name": "sequence_number", + "ordinal": 4, "type_info": "Int4" }, { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" - }, - { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" - }, - { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" - }, - { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" + "name": "depth", + "ordinal": 5, + "type_info": "Int4" }, { - "name": "skip_proof", - "ordinal": 40, + "name": "is_node_final_proof", + "ordinal": 6, "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" } ], "nullable": [ @@ -1717,173 +1701,95 @@ false, false, false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - true, - true, - false, - false, false ], "parameters": { - "Left": [] + "Left": [ + "Int2Array", + "Int2Array" + ] } }, - "query": "SELECT * FROM l1_batches ORDER BY number DESC LIMIT 1" + "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n AND (circuit_id, aggregation_round) IN (\n SELECT * FROM UNNEST($1::smallint[], $2::smallint[])\n )\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " }, - "2397c1a050d358b596c9881c379bf823e267c03172f72c42da84cc0c04cc9d93": { + "297d6517ec5f050e8d8fe4878e4ff330b4b10af4d60de86e8a25e2cd70e0363b": { "describe": { "columns": [ { - "name": "miniblock_number!", + "name": "verification_info", "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "index_in_block!", - "ordinal": 2, - "type_info": "Int4" - }, - { - "name": "l1_batch_tx_index!", - "ordinal": 3, - "type_info": "Int4" + "type_info": "Jsonb" } ], "nullable": [ - true, - false, - true, true ], "parameters": { "Left": [ - "Int8" + "Bytea" ] } }, - "query": "\n SELECT miniblock_number as \"miniblock_number!\",\n hash, index_in_block as \"index_in_block!\", l1_batch_tx_index as \"l1_batch_tx_index!\"\n FROM transactions\n WHERE l1_batch_number = $1\n ORDER BY miniblock_number, index_in_block\n " + "query": "SELECT verification_info FROM contracts_verification_info WHERE address = $1" }, - "2424f0ab2b156e953841107cfc0ccd76519d13c62fdcd5fd6b39e3503d6ec82c": { + "2985ea2bf34a94573103654c00a49d2a946afe5d552ac1c2a2d055eb9d6f2cf1": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Text", + "Time", "Int8" ] } }, - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE l1_batch_number = $2\n " + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE id = $2\n " }, - "249d8c0334a8a1a4ff993f72f5245dc55c60773732bfe7596dc5f05f34c15131": { + "2adfdba6fa2b6b967ba03ae6f930e7f3ea851f678d30df699ced27b2dbb01c2a": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ false ], "parameters": { - "Left": [ - "Text" - ] + "Left": [] } }, - "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ('\\x00', 0, $1, '', 0, now(), now())\n RETURNING id" + "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id) WHERE execute_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" }, - "251d3e3615046ec5f061cfba65dc5ad891ee7fa315abe39aedbd291e36140610": { + "2b22e7d15adf069c8e68954059b83f71a71350f3325b4280840c4be7e54a319f": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "l1_address", "ordinal": 0, "type_info": "Bytea" }, { - "name": "topic2!", + "name": "l2_address", "ordinal": 1, "type_info": "Bytea" }, { - "name": "topic3!", + "name": "name", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Varchar" }, { - "name": "value!", + "name": "symbol", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Varchar" }, { - "name": "l1_address!", + "name": "decimals", "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "l2_address!", - "ordinal": 5, - "type_info": "Bytea" - }, - { - "name": "symbol!", - "ordinal": 6, - "type_info": "Varchar" - }, - { - "name": "name!", - "ordinal": 7, - "type_info": "Varchar" - }, - { - "name": "decimals!", - "ordinal": 8, "type_info": "Int4" - }, - { - "name": "usd_price?", - "ordinal": 9, - "type_info": "Numeric" } ], "nullable": [ @@ -1891,24 +1797,15 @@ false, false, false, - false, - false, - false, - false, - false, - true + false ], "parameters": { - "Left": [ - "ByteaArray", - "Bytea", - "Bytea" - ] + "Left": [] } }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n events.topic4 = ('\\x000000000000000000000000'::bytea || tokens.l2_address)\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n " + "query": "SELECT l1_address, l2_address, name, symbol, decimals FROM tokens\n WHERE well_known = true\n ORDER BY symbol" }, - "252c1398bf08802e9dc038f7c9d95cc9d56cbf760d7de5a48f014478850daede": { + "2b76ca7059810f691a2d7d053e7e62e06de13e7ddb7747e39335bb10c45534e9": { "describe": { "columns": [ { @@ -1917,227 +1814,279 @@ "type_info": "Int8" }, { - "name": "scheduler_witness_blob_url", + "name": "circuit_id", "ordinal": 1, - "type_info": "Text" - }, - { - "name": "final_node_aggregations_blob_url", - "ordinal": 2, - "type_info": "Text" + "type_info": "Int2" } ], "nullable": [ false, - true, - true + false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT l1_batch_number, scheduler_witness_blob_url, final_node_aggregations_blob_url FROM scheduler_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND updated_at < NOW() - INTERVAL '30 days'\n AND scheduler_witness_blob_url is NOT NULL\n AND final_node_aggregations_blob_url is NOT NULL\n LIMIT $1;\n " + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status='queued'\n WHERE (l1_batch_number, circuit_id) IN\n (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id\n FROM prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj ON\n prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number, circuit_id;\n " }, - "269f3ac58705d65f775a6c84a62b9c0726beef51eb633937fa2a75b80c6d7fbc": { + "2c136284610f728ddba3e255d7dc573b10e4baf9151de194b7d8e0dc40c40602": { "describe": { - "columns": [ - { - "name": "hash", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "number", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 2, - "type_info": "Int8" - } - ], - "nullable": [ - false, - false, - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Bytea", + "Jsonb" ] } }, - "query": "SELECT hash, number, timestamp FROM miniblocks WHERE number > $1 ORDER BY number ASC" + "query": "INSERT INTO transaction_traces (tx_hash, trace, created_at, updated_at) VALUES ($1, $2, now(), now())" }, - "2928cd054e9d6898559f964906a2ee0d3750fbe6fbd99209a48fc7b197fa2a22": { + "2c4178a125ddc46a36f7548c840e481e85738502c56566d1eef84feef2161b2e": { "describe": { "columns": [ { - "name": "id", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l1_batch_number", + "name": "is_priority", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "circuit_id", + "name": "full_fee", "ordinal": 2, - "type_info": "Int2" + "type_info": "Numeric" }, { - "name": "aggregation_round", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Int2" + "type_info": "Numeric" }, { - "name": "sequence_number", + "name": "initiator_address", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "depth", + "name": "nonce", "ordinal": 5, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "is_node_final_proof", + "name": "signature", "ordinal": 6, - "type_info": "Bool" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false - ], - "parameters": { - "Left": [ - "Int2Array", - "Int2Array" - ] - } - }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n AND (circuit_id, aggregation_round) IN (\n SELECT * FROM UNNEST($1::smallint[], $2::smallint[])\n )\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " - }, - "2985ea2bf34a94573103654c00a49d2a946afe5d552ac1c2a2d055eb9d6f2cf1": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Time", - "Int8" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE id = $2\n " - }, - "2adfdba6fa2b6b967ba03ae6f930e7f3ea851f678d30df699ced27b2dbb01c2a": { - "describe": { - "columns": [ + "type_info": "Bytea" + }, { - "name": "number", - "ordinal": 0, + "name": "input", + "ordinal": 7, + "type_info": "Bytea" + }, + { + "name": "data", + "ordinal": 8, + "type_info": "Jsonb" + }, + { + "name": "received_at", + "ordinal": 9, + "type_info": "Timestamp" + }, + { + "name": "priority_op_id", + "ordinal": 10, "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id) WHERE execute_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" - }, - "2b22e7d15adf069c8e68954059b83f71a71350f3325b4280840c4be7e54a319f": { - "describe": { - "columns": [ + }, { - "name": "l1_address", - "ordinal": 0, - "type_info": "Bytea" + "name": "l1_batch_number", + "ordinal": 11, + "type_info": "Int8" }, { - "name": "l2_address", - "ordinal": 1, - "type_info": "Bytea" + "name": "index_in_block", + "ordinal": 12, + "type_info": "Int4" }, { - "name": "name", - "ordinal": 2, + "name": "error", + "ordinal": 13, "type_info": "Varchar" }, { - "name": "symbol", - "ordinal": 3, - "type_info": "Varchar" + "name": "gas_limit", + "ordinal": 14, + "type_info": "Numeric" }, { - "name": "decimals", - "ordinal": 4, + "name": "gas_per_storage_limit", + "ordinal": 15, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 16, + "type_info": "Numeric" + }, + { + "name": "tx_format", + "ordinal": 17, "type_info": "Int4" - } - ], - "nullable": [ - false, - false, - false, - false, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT l1_address, l2_address, name, symbol, decimals FROM tokens\n WHERE well_known = true\n ORDER BY symbol" - }, - "2b76ca7059810f691a2d7d053e7e62e06de13e7ddb7747e39335bb10c45534e9": { - "describe": { - "columns": [ + }, { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" + "name": "created_at", + "ordinal": 18, + "type_info": "Timestamp" }, { - "name": "circuit_id", - "ordinal": 1, - "type_info": "Int2" - } - ], - "nullable": [ + "name": "updated_at", + "ordinal": 19, + "type_info": "Timestamp" + }, + { + "name": "execution_info", + "ordinal": 20, + "type_info": "Jsonb" + }, + { + "name": "contract_address", + "ordinal": 21, + "type_info": "Bytea" + }, + { + "name": "in_mempool", + "ordinal": 22, + "type_info": "Bool" + }, + { + "name": "l1_block_number", + "ordinal": 23, + "type_info": "Int4" + }, + { + "name": "value", + "ordinal": 24, + "type_info": "Numeric" + }, + { + "name": "paymaster", + "ordinal": 25, + "type_info": "Bytea" + }, + { + "name": "paymaster_input", + "ordinal": 26, + "type_info": "Bytea" + }, + { + "name": "max_fee_per_gas", + "ordinal": 27, + "type_info": "Numeric" + }, + { + "name": "max_priority_fee_per_gas", + "ordinal": 28, + "type_info": "Numeric" + }, + { + "name": "effective_gas_price", + "ordinal": 29, + "type_info": "Numeric" + }, + { + "name": "miniblock_number", + "ordinal": 30, + "type_info": "Int8" + }, + { + "name": "l1_batch_tx_index", + "ordinal": 31, + "type_info": "Int4" + }, + { + "name": "refunded_gas", + "ordinal": 32, + "type_info": "Int8" + }, + { + "name": "l1_tx_mint", + "ordinal": 33, + "type_info": "Numeric" + }, + { + "name": "l1_tx_refund_recipient", + "ordinal": 34, + "type_info": "Bytea" + }, + { + "name": "upgrade_id", + "ordinal": 35, + "type_info": "Int4" + } + ], + "nullable": [ false, - false + false, + true, + true, + false, + true, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + false, + true, + false, + false, + false, + true, + true, + true, + true, + true, + false, + true, + true, + true ], "parameters": { - "Left": [] + "Left": [ + "Int8", + "Numeric", + "Numeric", + "Int4" + ] } }, - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status='queued'\n WHERE (l1_batch_number, circuit_id) IN\n (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id\n FROM prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj ON\n prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number, circuit_id;\n " + "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n AND tx_format != $4\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n ) as subquery1\n ORDER BY hash\n ) as subquery2\n WHERE transactions.hash = subquery2.hash\n RETURNING transactions.*" }, - "2c136284610f728ddba3e255d7dc573b10e4baf9151de194b7d8e0dc40c40602": { + "2dbadf3ff3134bc35bc98cf7201097256aed32b75d3809d7d24c95f70672e21c": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Jsonb" + "ByteaArray", + "Int8Array" ] } }, - "query": "INSERT INTO transaction_traces (tx_hash, trace, created_at, updated_at) VALUES ($1, $2, now(), now())" + "query": "UPDATE initial_writes SET index = data_table.index FROM ( SELECT UNNEST($1::bytea[]) as hashed_key, UNNEST($2::bigint[]) as index ) as data_table WHERE initial_writes.hashed_key = data_table.hashed_key" }, "2e543dc0013150040bb86e278bbe86765ce1ebad72a32bb931fe02a9c516a11c": { "describe": { @@ -2193,227 +2142,279 @@ }, "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1 AND miniblock_number <= $2" }, - "335826f54feadf6aa30a4e7668ad3f17a2afc6bd67d4f863e3ad61fefd1bd8d2": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(number) as \"number\" FROM miniblocks" - }, - "3418353764615faa995ff518579ff2f28b79f60d0421cb4d209f62a0abbf06cf": { + "300e5d4fa6d2481a10cb6d857f66a81b6c3760906c6c2ab02f126d52efc0d4d1": { "describe": { "columns": [ { - "name": "number", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "is_priority", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "hash", + "name": "full_fee", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l1_tx_count", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "l2_tx_count", + "name": "initiator_address", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "base_fee_per_gas", + "name": "nonce", "ordinal": 5, - "type_info": "Numeric" + "type_info": "Int8" }, { - "name": "l1_gas_price", + "name": "signature", "ordinal": 6, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l2_fair_gas_price", + "name": "input", "ordinal": 7, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "bootloader_code_hash", + "name": "data", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "default_aa_code_hash", + "name": "received_at", "ordinal": 9, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash FROM miniblocks ORDER BY number DESC LIMIT 1" - }, - "36c483775b604324eacd7e5aac591b927cc32abb89fe1b0c5cf4b0383e9bd443": { - "describe": { - "columns": [ + "type_info": "Timestamp" + }, + { + "name": "priority_op_id", + "ordinal": 10, + "type_info": "Int8" + }, { "name": "l1_batch_number", - "ordinal": 0, + "ordinal": 11, "type_info": "Int8" }, { - "name": "leaf_layer_subqueues_blob_url", - "ordinal": 1, - "type_info": "Text" + "name": "index_in_block", + "ordinal": 12, + "type_info": "Int4" }, { - "name": "aggregation_outputs_blob_url", - "ordinal": 2, - "type_info": "Text" - } - ], - "nullable": [ - false, - true, - true - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT l1_batch_number, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url FROM node_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND leaf_layer_subqueues_blob_url is NOT NULL\n AND aggregation_outputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " - }, - "38a3bdae346fdd362452af152c6886c93696dd2db561f6622f8eaf6fabb1e5be": { - "describe": { - "columns": [ + "name": "error", + "ordinal": 13, + "type_info": "Varchar" + }, { - "name": "id", - "ordinal": 0, + "name": "gas_limit", + "ordinal": 14, + "type_info": "Numeric" + }, + { + "name": "gas_per_storage_limit", + "ordinal": 15, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 16, + "type_info": "Numeric" + }, + { + "name": "tx_format", + "ordinal": 17, "type_info": "Int4" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Int4", - "Text", - "Timestamp" - ] - } - }, - "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at)\n VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3)\n RETURNING id" - }, - "393345441797999e9f11b8b5ddce0b64356e1e167056d7f76ef6dfffd3534607": { - "describe": { - "columns": [ + }, { - "name": "name!", - "ordinal": 0, - "type_info": "Varchar" + "name": "created_at", + "ordinal": 18, + "type_info": "Timestamp" }, { - "name": "symbol!", - "ordinal": 1, - "type_info": "Varchar" + "name": "updated_at", + "ordinal": 19, + "type_info": "Timestamp" }, { - "name": "decimals!", - "ordinal": 2, + "name": "execution_info", + "ordinal": 20, + "type_info": "Jsonb" + }, + { + "name": "contract_address", + "ordinal": 21, + "type_info": "Bytea" + }, + { + "name": "in_mempool", + "ordinal": 22, + "type_info": "Bool" + }, + { + "name": "l1_block_number", + "ordinal": 23, + "type_info": "Int4" + }, + { + "name": "value", + "ordinal": 24, + "type_info": "Numeric" + }, + { + "name": "paymaster", + "ordinal": 25, + "type_info": "Bytea" + }, + { + "name": "paymaster_input", + "ordinal": 26, + "type_info": "Bytea" + }, + { + "name": "max_fee_per_gas", + "ordinal": 27, + "type_info": "Numeric" + }, + { + "name": "max_priority_fee_per_gas", + "ordinal": 28, + "type_info": "Numeric" + }, + { + "name": "effective_gas_price", + "ordinal": 29, + "type_info": "Numeric" + }, + { + "name": "miniblock_number", + "ordinal": 30, + "type_info": "Int8" + }, + { + "name": "l1_batch_tx_index", + "ordinal": 31, + "type_info": "Int4" + }, + { + "name": "refunded_gas", + "ordinal": 32, + "type_info": "Int8" + }, + { + "name": "l1_tx_mint", + "ordinal": 33, + "type_info": "Numeric" + }, + { + "name": "l1_tx_refund_recipient", + "ordinal": 34, + "type_info": "Bytea" + }, + { + "name": "upgrade_id", + "ordinal": 35, "type_info": "Int4" } ], "nullable": [ - null, - null, - null + false, + false, + true, + true, + false, + true, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + false, + true, + false, + false, + false, + true, + true, + true, + true, + true, + false, + true, + true, + true ], "parameters": { - "Left": [ - "Bytea" - ] + "Left": [] } }, - "query": "\n SELECT\n COALESCE(token_list_name, name) as \"name!\",\n COALESCE(token_list_symbol, symbol) as \"symbol!\",\n COALESCE(token_list_decimals, decimals) as \"decimals!\"\n FROM tokens WHERE l2_address = $1\n " + "query": "SELECT * FROM transactions WHERE miniblock_number IS NOT NULL AND l1_batch_number IS NULL ORDER BY miniblock_number, index_in_block" }, - "394bbd64939d47fda4e1545e2752b208901e872b7234a5c3af456bdf429a6074": { + "334197fef9eeca55790d366ae67bbe95d77181bdfd2ad3208a32bd50585aef2d": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "hashed_key", "ordinal": 0, "type_info": "Bytea" - }, - { - "name": "call_trace", - "ordinal": 1, - "type_info": "Bytea" } ], "nullable": [ - false, false ], "parameters": { "Left": [ - "Bytea" + "ByteaArray" ] } }, - "query": "\n SELECT * FROM call_traces\n WHERE tx_hash = $1\n " + "query": "SELECT hashed_key FROM initial_writes WHERE hashed_key = ANY($1)" }, - "3ac1fe562e9664bbf8c02ba3090cf97a37663e228eff48fec326f74b2313daa9": { + "335826f54feadf6aa30a4e7668ad3f17a2afc6bd67d4f863e3ad61fefd1bd8d2": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], "parameters": { - "Left": [ - "ByteaArray" - ] + "Left": [] } }, - "query": "DELETE FROM call_traces\n WHERE tx_hash = ANY($1)" + "query": "SELECT MAX(number) as \"number\" FROM miniblocks" }, - "3bc54eb6ad9c5b7810954f2dfd7c49ff0d4f2bc5c020b04448db6b5883439a2d": { + "357347157ed8ff19d223c54533c3a85bd7e64a37514d657f8d49bd6eb5be1806": { "describe": { "columns": [ { - "name": "number", + "name": "id", "ordinal": 0, - "type_info": "Int8" + "type_info": "Int4" }, { "name": "timestamp", @@ -2421,265 +2422,300 @@ "type_info": "Int8" }, { - "name": "is_finished", + "name": "recursion_scheduler_level_vk_hash", "ordinal": 2, - "type_info": "Bool" + "type_info": "Bytea" }, { - "name": "l1_tx_count", + "name": "recursion_node_level_vk_hash", "ordinal": 3, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "l2_tx_count", + "name": "recursion_leaf_level_vk_hash", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "fee_account_address", + "name": "recursion_circuits_set_vks_hash", "ordinal": 5, "type_info": "Bytea" }, { - "name": "bloom", + "name": "bootloader_code_hash", "ordinal": 6, "type_info": "Bytea" }, { - "name": "priority_ops_onchain_data", + "name": "default_account_code_hash", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Bytea" }, { - "name": "hash", + "name": "verifier_address", "ordinal": 8, "type_info": "Bytea" }, { - "name": "parent_hash", + "name": "upgrade_tx_hash", "ordinal": 9, "type_info": "Bytea" }, { - "name": "commitment", + "name": "created_at", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Timestamp" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT * FROM protocol_versions ORDER BY id DESC LIMIT 1" + }, + "36c483775b604324eacd7e5aac591b927cc32abb89fe1b0c5cf4b0383e9bd443": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "compressed_write_logs", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" + "name": "leaf_layer_subqueues_blob_url", + "ordinal": 1, + "type_info": "Text" }, { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" - }, + "name": "aggregation_outputs_blob_url", + "ordinal": 2, + "type_info": "Text" + } + ], + "nullable": [ + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT l1_batch_number, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url FROM node_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND leaf_layer_subqueues_blob_url is NOT NULL\n AND aggregation_outputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + }, + "38a3bdae346fdd362452af152c6886c93696dd2db561f6622f8eaf6fabb1e5be": { + "describe": { + "columns": [ { - "name": "eth_commit_tx_id", - "ordinal": 14, + "name": "id", + "ordinal": 0, "type_info": "Int4" - }, + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int4", + "Text", + "Timestamp" + ] + } + }, + "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at)\n VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3)\n RETURNING id" + }, + "393345441797999e9f11b8b5ddce0b64356e1e167056d7f76ef6dfffd3534607": { + "describe": { + "columns": [ { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" + "name": "name!", + "ordinal": 0, + "type_info": "Varchar" }, { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" + "name": "symbol!", + "ordinal": 1, + "type_info": "Varchar" }, { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, + "name": "decimals!", + "ordinal": 2, + "type_info": "Int4" + } + ], + "nullable": [ + null, + null, + null + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "\n SELECT\n COALESCE(token_list_name, name) as \"name!\",\n COALESCE(token_list_symbol, symbol) as \"symbol!\",\n COALESCE(token_list_decimals, decimals) as \"decimals!\"\n FROM tokens WHERE l2_address = $1\n " + }, + "394bbd64939d47fda4e1545e2752b208901e872b7234a5c3af456bdf429a6074": { + "describe": { + "columns": [ { - "name": "merkle_root_hash", - "ordinal": 18, + "name": "tx_hash", + "ordinal": 0, "type_info": "Bytea" }, { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, + "name": "call_trace", + "ordinal": 1, "type_info": "Bytea" - }, + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "\n SELECT * FROM call_traces\n WHERE tx_hash = $1\n " + }, + "3ac1fe562e9664bbf8c02ba3090cf97a37663e228eff48fec326f74b2313daa9": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "DELETE FROM call_traces\n WHERE tx_hash = ANY($1)" + }, + "3af5a385c6636afb16e0fa5eda5373d64a76cef695dfa0b3b156e236224d32c8": { + "describe": { + "columns": [ { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "l2_l1_compressed_messages", - "ordinal": 28, + "name": "scheduler_witness", + "ordinal": 1, "type_info": "Bytea" }, { - "name": "l2_l1_merkle_root", - "ordinal": 29, + "name": "final_node_aggregations", + "ordinal": 2, "type_info": "Bytea" }, { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" + "name": "status", + "ordinal": 3, + "type_info": "Text" }, { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" + "name": "processing_started_at", + "ordinal": 4, + "type_info": "Timestamp" }, { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" + "name": "time_taken", + "ordinal": 5, + "type_info": "Time" }, { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" + "name": "error", + "ordinal": 6, + "type_info": "Text" }, { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" + "name": "created_at", + "ordinal": 7, + "type_info": "Timestamp" }, { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" + "name": "updated_at", + "ordinal": 8, + "type_info": "Timestamp" }, { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" + "name": "attempts", + "ordinal": 9, + "type_info": "Int4" }, { - "name": "aux_data_hash", - "ordinal": 37, + "name": "aggregation_result_coords", + "ordinal": 10, "type_info": "Bytea" }, { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" + "name": "scheduler_witness_blob_url", + "ordinal": 11, + "type_info": "Text" }, { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" + "name": "final_node_aggregations_blob_url", + "ordinal": 12, + "type_info": "Text" }, { - "name": "skip_proof", - "ordinal": 40, + "name": "is_blob_cleaned", + "ordinal": 13, "type_info": "Bool" }, { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "name": "protocol_version", + "ordinal": 14, + "type_info": "Int4" } ], "nullable": [ false, false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, true, + false, true, true, true, false, false, - true, - false, false, + true, + true, + true, false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - true, - true, - false, - false, - false + true ], "parameters": { "Left": [ - "Bytea", - "Bytea", - "Int8" + "Interval", + "Int4", + "Int8", + "Int4Array" ] } }, - "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NULL AND number != 0 AND bootloader_code_hash = $1 AND default_aa_code_hash = $2 AND commitment IS NOT NULL ORDER BY number LIMIT $3" + "query": "\n UPDATE scheduler_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n AND protocol_version = ANY($4)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs.*\n " }, "3c582aeed32235ef175707de412a9f9129fad6ea5e87ebb85f68e20664b0da46": { "describe": { @@ -2695,7 +2731,71 @@ }, "query": "\n UPDATE transactions\n SET \n l1_batch_number = $3,\n l1_batch_tx_index = data_table.l1_batch_tx_index,\n updated_at = now()\n FROM\n (SELECT\n UNNEST($1::int[]) AS l1_batch_tx_index,\n UNNEST($2::bytea[]) AS hash\n ) AS data_table\n WHERE transactions.hash=data_table.hash \n " }, - "3ccd4d053bb664a40c3887ef4f87fe2d4aa8be36e6c84c5f1d358ce712072082": { + "3d41f05e1d5c5a74e0605e66fe08e09f14b8bf0269e5dcde518aa08db92a3ea0": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "DELETE FROM events WHERE miniblock_number > $1" + }, + "3de5668eca2211f9701304e374100d45b359b1f7832d4a30b325fa679012c3e7": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Numeric", + "Timestamp" + ] + } + }, + "query": "UPDATE tokens SET market_volume = $2, market_volume_updated_at = $3, updated_at = now() WHERE l1_address = $1" + }, + "3f6332706376ef4cadda96498872429b6ed28eca5402b03b1aa3b77b8262bccd": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text" + ] + } + }, + "query": "DELETE FROM compiler_versions WHERE compiler = $1" + }, + "3f671298a05f3f69a8ffb2e36d5ae79c544145fc1c289dd9e0c060dca3ec6e21": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray" + ] + } + }, + "query": "UPDATE storage SET value = u.value FROM UNNEST($1::bytea[], $2::bytea[]) AS u(key, value) WHERE u.key = hashed_key" + }, + "40a86f39a74ab22bdcd8b40446ea063c68bfb3e930e3150212474a657e82b38f": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + } + }, + "query": "\n UPDATE scheduler_witness_jobs\n SET final_node_aggregations_blob_url = $2,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $1 AND status != 'queued'\n " + }, + "419344dd86dfa0fbc73bdedd8408971ecb1b97382cf3eb4c57ed6d9ffb87ce58": { "describe": { "columns": [ { @@ -2778,140 +2878,100 @@ "ordinal": 15, "type_info": "Int4" }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, { "name": "merkle_root_hash", - "ordinal": 18, + "ordinal": 16, "type_info": "Bytea" }, { "name": "l2_to_l1_logs", - "ordinal": 19, + "ordinal": 17, "type_info": "ByteaArray" }, { "name": "l2_to_l1_messages", - "ordinal": 20, + "ordinal": 18, "type_info": "ByteaArray" }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, { "name": "used_contract_hashes", - "ordinal": 25, + "ordinal": 19, "type_info": "Jsonb" }, { "name": "compressed_initial_writes", - "ordinal": 26, + "ordinal": 20, "type_info": "Bytea" }, { "name": "compressed_repeated_writes", - "ordinal": 27, + "ordinal": 21, "type_info": "Bytea" }, { "name": "l2_l1_compressed_messages", - "ordinal": 28, + "ordinal": 22, "type_info": "Bytea" }, { "name": "l2_l1_merkle_root", - "ordinal": 29, + "ordinal": 23, "type_info": "Bytea" }, { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" + "name": "l1_gas_price", + "ordinal": 24, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 25, + "type_info": "Int8" }, { "name": "rollup_last_leaf_index", - "ordinal": 31, + "ordinal": 26, "type_info": "Int8" }, { "name": "zkporter_is_available", - "ordinal": 32, + "ordinal": 27, "type_info": "Bool" }, { "name": "bootloader_code_hash", - "ordinal": 33, + "ordinal": 28, "type_info": "Bytea" }, { "name": "default_aa_code_hash", - "ordinal": 34, + "ordinal": 29, "type_info": "Bytea" }, { "name": "base_fee_per_gas", - "ordinal": 35, + "ordinal": 30, "type_info": "Numeric" }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" - }, { "name": "aux_data_hash", - "ordinal": 37, + "ordinal": 31, "type_info": "Bytea" }, { "name": "pass_through_data_hash", - "ordinal": 38, + "ordinal": 32, "type_info": "Bytea" }, { "name": "meta_parameters_hash", - "ordinal": 39, + "ordinal": 33, "type_info": "Bytea" }, { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "name": "protocol_version", + "ordinal": 34, + "type_info": "Int4" } ], "nullable": [ @@ -2931,167 +2991,202 @@ true, true, true, - false, - false, true, false, false, false, - false, - false, - false, - false, - true, true, true, true, true, + false, + false, true, true, true, true, false, - false, true, true, true, - false, - false, - false + true ], "parameters": { "Left": [ - "Int8", - "Int8", "Int8" ] } }, - "query": "SELECT * FROM l1_batches WHERE number BETWEEN $1 AND $2 ORDER BY number LIMIT $3" + "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, protocol_version FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT $1" }, - "3d41f05e1d5c5a74e0605e66fe08e09f14b8bf0269e5dcde518aa08db92a3ea0": { + "433d5da4d72150cf2c1e1007ee3ff51edfa51924f4b662b8cf382f06e60fd228": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8" + "Int4", + "Int8", + "Text", + "Text" ] } }, - "query": "DELETE FROM events WHERE miniblock_number > $1" + "query": "\n UPDATE node_aggregation_witness_jobs\n SET number_of_leaf_circuits = $1,\n leaf_layer_subqueues_blob_url = $3,\n aggregation_outputs_blob_url = $4,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $2 AND status != 'queued'\n " }, - "3de5668eca2211f9701304e374100d45b359b1f7832d4a30b325fa679012c3e7": { + "448d283cab6ae334de9676f69416974656d11563b58e0188d53ca9e0995dd287": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Numeric", - "Timestamp" + "Int8Array" ] } }, - "query": "UPDATE tokens SET market_volume = $2, market_volume_updated_at = $3, updated_at = now() WHERE l1_address = $1" + "query": "\n UPDATE scheduler_dependency_tracker_fri\n SET status='queued'\n WHERE l1_batch_number = ANY($1)\n " }, - "3f6332706376ef4cadda96498872429b6ed28eca5402b03b1aa3b77b8262bccd": { + "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], "parameters": { "Left": [ - "Text" + "Interval" ] } }, - "query": "DELETE FROM compiler_versions WHERE compiler = $1" + "query": "DELETE FROM transactions WHERE miniblock_number IS NULL AND received_at < now() - $1::interval AND is_priority=false AND error IS NULL RETURNING hash" }, - "3f671298a05f3f69a8ffb2e36d5ae79c544145fc1c289dd9e0c060dca3ec6e21": { + "4ac212a08324b9d4c3febc585109f19105b4d20aa3e290352e3c63d7ec58c5b2": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "l2_address", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray" - ] + "Left": [] } }, - "query": "UPDATE storage SET value = u.value FROM UNNEST($1::bytea[], $2::bytea[]) AS u(key, value) WHERE u.key = hashed_key" + "query": "SELECT l2_address FROM tokens" }, - "3f86b7cb793dd8849af45ff3de4eabb80082a1cf8b213be607e6e13bb3d6710d": { + "4ac92a8436108097a32e94e53f7fe99261c7c3a40dbc433c20ccea3a7d06650c": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "hashed_key", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "basic_circuits", + "name": "value!", "ordinal": 1, "type_info": "Bytea" - }, - { - "name": "basic_circuits_inputs", - "ordinal": 2, + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "SELECT hashed_key, value as \"value!\" FROM storage WHERE hashed_key = ANY($1)" + }, + "4acb725974d006c388be8965c3dff2e4c538ab8d2366addb3fb8cff3b789f114": { + "describe": { + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1" + }, + "4b8597a47c0724155ad9592dc32134523bcbca11c9d82763d1bebbe17479c7b4": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "recursion_scheduler_level_vk_hash", + "ordinal": 2, "type_info": "Bytea" }, { - "name": "number_of_basic_circuits", + "name": "recursion_node_level_vk_hash", "ordinal": 3, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "status", + "name": "recursion_leaf_level_vk_hash", "ordinal": 4, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "processing_started_at", + "name": "recursion_circuits_set_vks_hash", "ordinal": 5, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "time_taken", + "name": "bootloader_code_hash", "ordinal": 6, - "type_info": "Time" + "type_info": "Bytea" }, { - "name": "error", + "name": "default_account_code_hash", "ordinal": 7, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "verifier_address", "ordinal": 8, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "updated_at", + "name": "upgrade_tx_hash", "ordinal": 9, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "attempts", + "name": "created_at", "ordinal": 10, - "type_info": "Int4" - }, - { - "name": "basic_circuits_blob_url", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "basic_circuits_inputs_blob_url", - "ordinal": 12, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 13, - "type_info": "Bool" + "type_info": "Timestamp" } ], "nullable": [ @@ -3100,256 +3195,178 @@ false, false, false, - true, - true, - true, false, false, false, - true, + false, true, false ], "parameters": { "Left": [ - "Interval", - "Int4", - "Int8" + "Int4" ] } }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM leaf_aggregation_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs.*\n " + "query": "SELECT * FROM protocol_versions\n WHERE id = $1\n " }, - "40a86f39a74ab22bdcd8b40446ea063c68bfb3e930e3150212474a657e82b38f": { + "4bab972cbbd8b53237a840ba9307079705bd4b5270428d2b41f05ee3d2aa42af": { + "describe": { + "columns": [ + { + "name": "l1_batch_number!", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "circuit_type", + "ordinal": 1, + "type_info": "Text" + } + ], + "nullable": [ + null, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number!\", circuit_type\n FROM prover_jobs\n WHERE aggregation_round = 0 AND (status = 'queued' OR status = 'in_progress'\n OR status = 'in_gpu_proof'\n OR status = 'failed')\n GROUP BY circuit_type\n " + }, + "4c0d2aa6e08f3b4748b88cad5cf7b3a9eb9c051e8e8e747a3c38c1b37ce3a6b7": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Text" + "Int8" ] } }, - "query": "\n UPDATE scheduler_witness_jobs\n SET final_node_aggregations_blob_url = $2,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $1 AND status != 'queued'\n " + "query": "DELETE FROM l2_to_l1_logs WHERE miniblock_number > $1" }, - "41913b02b13a0dad87268c5e0d673d9f04d5207ab6a48b63004e6c3ed07b93bc": { + "4c83881635e957872a435737392bfed829de58780887c9a0fa7921ea648296fb": { "describe": { "columns": [ { "name": "number", "ordinal": 0, "type_info": "Int8" - }, + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT 1" + }, + "4d2e106c809a48ace74952df2b883a5e747aaa1bc6bee28e986dccee7fa130b6": { + "describe": { + "columns": [ + { + "name": "nonce", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT nonce FROM eth_txs ORDER BY id DESC LIMIT 1" + }, + "4d36aff2bdeb0b659b8c4cd031f7c3fc204d92bb500a4efe8b6beb9255a232f6": { + "describe": { + "columns": [ { "name": "timestamp", - "ordinal": 1, + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT timestamp FROM l1_batches WHERE eth_execute_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" + }, + "4e2b733fea9ca7cef542602fcd80acf1a9d2e0f1e22566f1076c4837e3ac7e61": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, "type_info": "Int8" }, { - "name": "is_finished", + "name": "instance_host", + "ordinal": 1, + "type_info": "Inet" + }, + { + "name": "instance_port", "ordinal": 2, - "type_info": "Bool" + "type_info": "Int4" }, { - "name": "l1_tx_count", + "name": "instance_status", "ordinal": 3, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "l2_tx_count", + "name": "created_at", "ordinal": 4, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "fee_account_address", + "name": "updated_at", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "bloom", + "name": "processing_started_at", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "priority_ops_onchain_data", + "name": "queue_free_slots", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Int4" }, { - "name": "hash", + "name": "queue_capacity", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "parent_hash", + "name": "specialized_prover_group_id", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Int2" }, { - "name": "commitment", + "name": "region", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "compressed_write_logs", + "name": "zone", "ordinal": 11, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "compressed_contracts", + "name": "num_gpu", "ordinal": 12, - "type_info": "Bytea" - }, - { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" - }, - { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" - }, - { - "name": "l2_l1_merkle_root", - "ordinal": 29, - "type_info": "Bytea" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" - }, - { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" - }, - { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" - }, - { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" - }, - { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "type_info": "Int2" } ], "nullable": [ @@ -3359,1681 +3376,1239 @@ false, false, false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, true, true, true, true, - true, - true, - true, - false, false, - true, - true, - true, false, + true + ], + "parameters": { + "Left": [ + "Interval", + "Int2", + "Text", + "Text" + ] + } + }, + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE id in (\n SELECT id\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND region=$3\n AND zone=$4\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " + }, + "5089dfb745ff04a9b071b5785e68194a6f6a7a72754d23a65adc7d6838f7f640": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "UPDATE eth_txs SET has_failed = TRUE WHERE id = $1" + }, + "51cb712685991ffd600dce59f5ed8b5a1bfce8feed46ebd02471c43802e6e65a": { + "describe": { + "columns": [ + { + "name": "bootloader_code_hash", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "default_account_code_hash", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ false, false ], "parameters": { - "Left": [] + "Left": [ + "Int4" + ] } }, - "query": "SELECT * FROM l1_batches WHERE number = 0 OR eth_commit_tx_id IS NOT NULL AND commitment IS NOT NULL ORDER BY number DESC LIMIT 1" + "query": "SELECT bootloader_code_hash, default_account_code_hash FROM protocol_versions\n WHERE id = $1\n " }, - "433d5da4d72150cf2c1e1007ee3ff51edfa51924f4b662b8cf382f06e60fd228": { + "51d788b5e8d808db143b6c057485f0a0b314a0c33e3eb2dff99ca0b32d12f8e4": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int4", "Int8", + "Int2", "Text", - "Text" + "Int2", + "Int4", + "Int4", + "Bool" ] } }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET number_of_leaf_circuits = $1,\n leaf_layer_subqueues_blob_url = $3,\n aggregation_outputs_blob_url = $4,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $2 AND status != 'queued'\n " + "query": "\n INSERT INTO prover_jobs_fri (l1_batch_number, circuit_id, circuit_blob_url, aggregation_round, sequence_number, depth, is_node_final_proof, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number)\n DO UPDATE SET updated_at=now()\n " }, - "448d283cab6ae334de9676f69416974656d11563b58e0188d53ca9e0995dd287": { + "52eeb8c529efb796fdefb30a381fcf6c931512f30e55e24c155f6c649e662909": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE scheduler_dependency_tracker_fri\n SET status='queuing'\n WHERE l1_batch_number IN\n (SELECT l1_batch_number FROM scheduler_dependency_tracker_fri\n WHERE status != 'queued'\n AND circuit_1_final_prover_job_id IS NOT NULL\n AND circuit_2_final_prover_job_id IS NOT NULL\n AND circuit_3_final_prover_job_id IS NOT NULL\n AND circuit_4_final_prover_job_id IS NOT NULL\n AND circuit_5_final_prover_job_id IS NOT NULL\n AND circuit_6_final_prover_job_id IS NOT NULL\n AND circuit_7_final_prover_job_id IS NOT NULL\n AND circuit_8_final_prover_job_id IS NOT NULL\n AND circuit_9_final_prover_job_id IS NOT NULL\n AND circuit_10_final_prover_job_id IS NOT NULL\n AND circuit_11_final_prover_job_id IS NOT NULL\n AND circuit_12_final_prover_job_id IS NOT NULL\n AND circuit_13_final_prover_job_id IS NOT NULL\n )\n RETURNING l1_batch_number;\n " + }, + "53726a35b24a838df04c1f7201da322aab287830c96fc2c712a67d360bbc2bd0": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8Array" + "Int8", + "Text" ] } }, - "query": "\n UPDATE scheduler_dependency_tracker_fri\n SET status='queued'\n WHERE l1_batch_number = ANY($1)\n " + "query": "INSERT INTO witness_inputs_fri(l1_batch_number, merkle_tree_paths_blob_url, status, created_at, updated_at) VALUES ($1, $2, 'queued', now(), now())\n ON CONFLICT (l1_batch_number) DO NOTHING" }, - "474c72dc36171ee1983e0eb4272cdbc180e3773093280556e8e5229b68bc793d": { + "5490012051be6faaaa11fad0f196eb53160a9c5c045fe9d66afcef7f33403fe2": { "describe": { "columns": [ { - "name": "hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "is_priority", + "name": "timestamp", "ordinal": 1, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "full_fee", + "name": "recursion_scheduler_level_vk_hash", "ordinal": 2, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "layer_2_tip_fee", + "name": "recursion_node_level_vk_hash", "ordinal": 3, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "initiator_address", + "name": "recursion_leaf_level_vk_hash", "ordinal": 4, "type_info": "Bytea" }, { - "name": "nonce", + "name": "recursion_circuits_set_vks_hash", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "signature", + "name": "bootloader_code_hash", "ordinal": 6, "type_info": "Bytea" }, { - "name": "input", + "name": "default_account_code_hash", "ordinal": 7, "type_info": "Bytea" }, { - "name": "data", + "name": "verifier_address", "ordinal": 8, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "received_at", + "name": "upgrade_tx_hash", "ordinal": 9, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "priority_op_id", + "name": "created_at", "ordinal": 10, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "index_in_block", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" - }, - { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" - }, - { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" - }, - { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 18, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 19, "type_info": "Timestamp" - }, - { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" - }, - { - "name": "in_mempool", - "ordinal": 22, - "type_info": "Bool" - }, - { - "name": "l1_block_number", - "ordinal": 23, - "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, - { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" - }, - { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" - }, - { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" - }, - { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" - }, - { - "name": "l1_batch_tx_index", - "ordinal": 31, - "type_info": "Int4" - }, - { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" - }, - { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" - }, - { - "name": "l1_tx_refund_recipient", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "block_hash?", - "ordinal": 35, - "type_info": "Bytea" - }, - { - "name": "miniblock_timestamp?", - "ordinal": 36, - "type_info": "Int8" - }, - { - "name": "eth_commit_tx_hash?", - "ordinal": 37, - "type_info": "Text" - }, - { - "name": "eth_prove_tx_hash?", - "ordinal": 38, - "type_info": "Text" - }, - { - "name": "eth_execute_tx_hash?", - "ordinal": 39, - "type_info": "Text" } ], "nullable": [ false, false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, false, false, - true, false, - true, false, false, false, - true, - true, - true, - true, - true, false, true, - true, - false, - false, - false, - false, false ], "parameters": { "Left": [ - "Bytea" + "Int4" ] } }, - "query": "\n SELECT transactions.*, miniblocks.hash as \"block_hash?\",\n miniblocks.timestamp as \"miniblock_timestamp?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " + "query": "SELECT * FROM protocol_versions\n WHERE id < $1\n ORDER BY id DESC\n LIMIT 1\n " }, - "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { + "5563da0d52ca7310ae7bc957caa5d8b3dcbd9386bb2a0be68dcd21ebb044cdbd": { "describe": { "columns": [ { - "name": "hash", + "name": "bytecode_hash", "ordinal": 0, "type_info": "Bytea" + }, + { + "name": "bytecode", + "ordinal": 1, + "type_info": "Bytea" } ], "nullable": [ + false, false ], "parameters": { "Left": [ - "Interval" + "Int8" ] } }, - "query": "DELETE FROM transactions WHERE miniblock_number IS NULL AND received_at < now() - $1::interval AND is_priority=false AND error IS NULL RETURNING hash" + "query": "SELECT bytecode_hash, bytecode FROM factory_deps INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number WHERE miniblocks.l1_batch_number = $1" }, - "4ac212a08324b9d4c3febc585109f19105b4d20aa3e290352e3c63d7ec58c5b2": { + "55debba852ef32f3b5ba6ffcb745f7b59d6888a21cb8792f8f9027e3b164a245": { "describe": { "columns": [ { - "name": "l2_address", + "name": "region", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Text" + }, + { + "name": "zone", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "total_gpus", + "ordinal": 2, + "type_info": "Int8" } ], "nullable": [ - false + false, + false, + null ], "parameters": { "Left": [] } }, - "query": "SELECT l2_address FROM tokens" + "query": "\n SELECT region, zone, SUM(num_gpu) AS total_gpus\n FROM gpu_prover_queue\n GROUP BY region, zone\n " }, - "4ac92a8436108097a32e94e53f7fe99261c7c3a40dbc433c20ccea3a7d06650c": { + "57742ed088179b89b50920a2ab1a103b745598ee0ba05d1793fc54e63b477319": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int8" + ] + } + }, + "query": "UPDATE l1_batches SET eth_commit_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + }, + "57b4e8fb728f1e90dc5ed80c1493471f8e9eff828c99eadc531b28a068ade83e": { "describe": { "columns": [ { - "name": "hashed_key", + "name": "count!", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "value!", + "name": "circuit_type!", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Text" + }, + { + "name": "status!", + "ordinal": 2, + "type_info": "Text" } ], "nullable": [ + null, false, false ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT COUNT(*) as \"count!\", circuit_type as \"circuit_type!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY circuit_type, status\n " + }, + "580d973b404123108e8e8b27cd754f108a289e1556da10a466e4c795fbd23ddf": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "ByteaArray" + "Int4", + "Int4" ] } }, - "query": "SELECT hashed_key, value as \"value!\" FROM storage WHERE hashed_key = ANY($1)" + "query": "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now()\n WHERE id = $1 AND sent_at_block IS NULL" }, - "4acb725974d006c388be8965c3dff2e4c538ab8d2366addb3fb8cff3b789f114": { + "58489a4e8730646ce20efee849742444740c72f59fad2495647742417ed0ab5a": { "describe": { "columns": [ { - "name": "count!", + "name": "base_fee_per_gas", "ordinal": 0, - "type_info": "Int8" + "type_info": "Numeric" } ], "nullable": [ - null + false ], "parameters": { "Left": [ + "Int8", "Int8" ] } }, - "query": "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1" + "query": "SELECT base_fee_per_gas FROM miniblocks WHERE number <= $1 ORDER BY number DESC LIMIT $2" }, - "4bab972cbbd8b53237a840ba9307079705bd4b5270428d2b41f05ee3d2aa42af": { + "5922fdf40632a6ffecfe824a3ba29bcf7b379aff5253db2739cc7be6145524e8": { "describe": { "columns": [ { - "name": "l1_batch_number!", + "name": "bootloader_code_hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "circuit_type", + "name": "default_account_code_hash", "ordinal": 1, - "type_info": "Text" + "type_info": "Bytea" + }, + { + "name": "id", + "ordinal": 2, + "type_info": "Int4" } ], "nullable": [ - null, + false, + false, false ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] } }, - "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number!\", circuit_type\n FROM prover_jobs\n WHERE aggregation_round = 0 AND (status = 'queued' OR status = 'in_progress'\n OR status = 'in_gpu_proof'\n OR status = 'failed')\n GROUP BY circuit_type\n " + "query": "SELECT bootloader_code_hash, default_account_code_hash, id FROM protocol_versions\n WHERE timestamp <= $1\n ORDER BY id DESC\n LIMIT 1\n " }, - "4c0d2aa6e08f3b4748b88cad5cf7b3a9eb9c051e8e8e747a3c38c1b37ce3a6b7": { + "59a318fc330369353f2570bfef09909d11e22a1c76ba5277839a6866d8e796b6": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "hashed_key", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "index", + "ordinal": 1, + "type_info": "Int8" + } + ], + "nullable": [ + false, + true + ], "parameters": { "Left": [ "Int8" ] } }, - "query": "DELETE FROM l2_to_l1_logs WHERE miniblock_number > $1" + "query": "SELECT hashed_key, index FROM initial_writes WHERE l1_batch_number = $1 ORDER BY index" }, - "4c83881635e957872a435737392bfed829de58780887c9a0fa7921ea648296fb": { + "5a27a65fa105897b60a99c1e0015e4b8c93c45e0c448e77b03565db5c36695ed": { "describe": { "columns": [ { - "name": "number", + "name": "max", "ordinal": 0, "type_info": "Int8" } ], "nullable": [ - false + null ], "parameters": { "Left": [] } }, - "query": "SELECT number FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT 1" + "query": "SELECT MAX(l1_batch_number) FROM witness_inputs WHERE merkel_tree_paths_blob_url IS NOT NULL" + }, + "5a5844af61cc685a414fcd3cad70900bdce8f48e905c105f8dd50dc52e0c6f14": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "attempts", + "ordinal": 1, + "type_info": "Int4" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE prover_jobs\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n RETURNING l1_batch_number, attempts\n " + }, + "5ac872e2c5a00b376cc053324b3776ef6a0bb7f6850e5a24a133dfee052c49e1": { + "describe": { + "columns": [ + { + "name": "value", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT value FROM storage WHERE hashed_key = $1" }, - "4ca0356959e4cc50e09b6fe08e9d45cbd929601935506acbbade4a42c2eaea89": { + "5b2935b5b7e8c2907f5e221a6b1e6f4b8737b9fc618c5d021a3e1d58a3aed116": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Bytea", - "Text" + "Text", + "Int8" ] } }, - "query": "\n INSERT INTO scheduler_witness_jobs\n (l1_batch_number, scheduler_witness, scheduler_witness_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, 'waiting_for_artifacts', now(), now())\n " + "query": "\n UPDATE prover_jobs_fri\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n " }, - "4d2e106c809a48ace74952df2b883a5e747aaa1bc6bee28e986dccee7fa130b6": { + "5bc8a41ae0f255b966df2102f1bd9059d55833e0afaf6e62c7ddcc9c06de8deb": { "describe": { "columns": [ { - "name": "nonce", + "name": "l1_batch_number!", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "aggregation_round", + "ordinal": 1, + "type_info": "Int4" } ], "nullable": [ + null, false ], "parameters": { "Left": [] } }, - "query": "SELECT nonce FROM eth_txs ORDER BY id DESC LIMIT 1" + "query": "SELECT MAX(l1_batch_number) as \"l1_batch_number!\", aggregation_round FROM prover_jobs \n WHERE status='successful'\n GROUP BY aggregation_round \n " }, - "4d36aff2bdeb0b659b8c4cd031f7c3fc204d92bb500a4efe8b6beb9255a232f6": { + "5bc8cdc7ed710bb2f9b0035654fd7e9dcc01731ca581c6aa75d55184817bc100": { "describe": { "columns": [ { - "name": "timestamp", + "name": "number", "ordinal": 0, "type_info": "Int8" } ], "nullable": [ - false + null ], "parameters": { "Left": [] } }, - "query": "SELECT timestamp FROM l1_batches WHERE eth_execute_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" + "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE hash IS NOT NULL" + }, + "5df806b33f84893d4ddfacf3b289b0e173e85ad9204cbb7ad314e68a94cdc41e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int2", + "Int4", + "Int4" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET aggregations_url = $1, number_of_dependent_jobs = $5, updated_at = now()\n WHERE l1_batch_number = $2\n AND circuit_id = $3\n AND depth = $4\n " }, - "4d7b5a423b29ce07bd12f168d1ee707e6e413d9a4f0daafb4beed102d22d1745": { + "5e09f2359dd69380c1f183f613d82696029a56896e2b985738a2fa25d6cb8a71": { "describe": { "columns": [ { - "name": "address", + "name": "op_id", "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "key", - "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ - false, - false + null ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT address, key FROM protective_reads\n WHERE l1_batch_number = $1\n " + "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true" }, - "4e2b733fea9ca7cef542602fcd80acf1a9d2e0f1e22566f1076c4837e3ac7e61": { + "5eb9f25dacfb02e70a9fcf0a41937d4c63bd786efb2fd0d1180f449a3ae0bbc0": { "describe": { "columns": [ { - "name": "id", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" }, { - "name": "instance_host", + "name": "leaf_layer_subqueues", "ordinal": 1, - "type_info": "Inet" + "type_info": "Bytea" }, { - "name": "instance_port", + "name": "aggregation_outputs", "ordinal": 2, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "instance_status", + "name": "number_of_leaf_circuits", "ordinal": 3, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "created_at", + "name": "status", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Text" }, { - "name": "updated_at", + "name": "processing_started_at", "ordinal": 5, "type_info": "Timestamp" }, { - "name": "processing_started_at", + "name": "time_taken", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "Time" }, { - "name": "queue_free_slots", + "name": "error", "ordinal": 7, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "queue_capacity", + "name": "created_at", "ordinal": 8, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "specialized_prover_group_id", + "name": "updated_at", "ordinal": 9, - "type_info": "Int2" + "type_info": "Timestamp" }, { - "name": "region", + "name": "attempts", "ordinal": 10, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "zone", + "name": "leaf_layer_subqueues_blob_url", "ordinal": 11, "type_info": "Text" }, { - "name": "num_gpu", + "name": "aggregation_outputs_blob_url", "ordinal": 12, - "type_info": "Int2" + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 13, + "type_info": "Bool" + }, + { + "name": "protocol_version", + "ordinal": 14, + "type_info": "Int4" } ], "nullable": [ false, + true, + true, + true, false, + true, + true, + true, false, false, false, - false, - true, - true, true, true, false, - false, true ], "parameters": { "Left": [ "Interval", - "Int2", - "Text", - "Text" + "Int4", + "Int8", + "Int4Array" ] } }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE id in (\n SELECT id\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND region=$3\n AND zone=$4\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " + "query": "\n UPDATE node_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM node_aggregation_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n AND protocol_version = ANY($4)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs.*\n " }, - "4eefec8f46f9b8bae265230dab09ab66fde5f24b023c87726dbd856e782de986": { + "5f037f6ae8489d5224772d4f9e3e6cfc2075560957fa491d97a95c0e79ff4830": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "block_batch?", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "max_batch?", + "ordinal": 1, + "type_info": "Int8" + } + ], + "nullable": [ + null, + null + ], "parameters": { "Left": [ - "Int8", "Int8" ] } }, - "query": "\n UPDATE transactions\n SET effective_gas_price = max_fee_per_gas\n WHERE miniblock_number BETWEEN $1 AND $2\n AND is_priority = TRUE\n " + "query": "SELECT (SELECT l1_batch_number FROM miniblocks WHERE number = $1) as \"block_batch?\", (SELECT MAX(number) + 1 FROM l1_batches) as \"max_batch?\"" }, - "5049eaa4b2050312d13a02c06e87f96548a299894d0f0b268d4e91d49c536cb6": { + "5f4b1091b74424ffd20c0aede98287418afa2bb37dbc941200c1d6190c96bec5": { "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "Int4Array", - "ByteaArray", - "ByteaArray", - "NumericArray", - "NumericArray", - "NumericArray", - "NumericArray", - "Int4Array", - "Int4Array", - "VarcharArray", - "NumericArray", - "JsonbArray", - "ByteaArray", - "JsonbArray", - "Int8Array", - "NumericArray", - "ByteaArray", - "ByteaArray", - "ByteaArray", - "Int8" - ] - } - }, - "query": "\n UPDATE transactions\n SET \n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::int[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::numeric[]) AS gas_limit,\n UNNEST($6::numeric[]) AS max_fee_per_gas,\n UNNEST($7::numeric[]) AS max_priority_fee_per_gas,\n UNNEST($8::numeric[]) AS gas_per_pubdata_limit,\n UNNEST($9::int[]) AS tx_format,\n UNNEST($10::integer[]) AS index_in_block,\n UNNEST($11::varchar[]) AS error,\n UNNEST($12::numeric[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::bigint[]) as refunded_gas,\n UNNEST($17::numeric[]) as value,\n UNNEST($18::bytea[]) as contract_address,\n UNNEST($19::bytea[]) as paymaster,\n UNNEST($20::bytea[]) as paymaster_input\n ) AS data_table\n WHERE transactions.initiator_address=data_table.initiator_address \n AND transactions.nonce=data_table.nonce\n " - }, - "5089dfb745ff04a9b071b5785e68194a6f6a7a72754d23a65adc7d6838f7f640": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "UPDATE eth_txs SET has_failed = TRUE WHERE id = $1" - }, - "516e309a97010cd1eb8398b2b7ff809786703c075e4c3dff1133c41cdcfdd3f3": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "is_finished", - "ordinal": 2, - "type_info": "Bool" - }, - { - "name": "l1_tx_count", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "fee_account_address", - "ordinal": 5, - "type_info": "Bytea" - }, - { - "name": "bloom", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "priority_ops_onchain_data", - "ordinal": 7, - "type_info": "ByteaArray" - }, - { - "name": "hash", - "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "parent_hash", - "ordinal": 9, - "type_info": "Bytea" - }, - { - "name": "commitment", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "compressed_write_logs", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" - }, - { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" - }, - { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" - }, - { - "name": "l2_l1_merkle_root", - "ordinal": 29, - "type_info": "Bytea" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" - }, - { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" - }, - { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" - }, - { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" - }, - { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - true, - true, - false, - false, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT * FROM l1_batches WHERE number = $1" - }, - "51d788b5e8d808db143b6c057485f0a0b314a0c33e3eb2dff99ca0b32d12f8e4": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text", - "Int2", - "Int4", - "Int4", - "Bool" - ] - } - }, - "query": "\n INSERT INTO prover_jobs_fri (l1_batch_number, circuit_id, circuit_blob_url, aggregation_round, sequence_number, depth, is_node_final_proof, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number)\n DO UPDATE SET updated_at=now()\n " - }, - "52eeb8c529efb796fdefb30a381fcf6c931512f30e55e24c155f6c649e662909": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE scheduler_dependency_tracker_fri\n SET status='queuing'\n WHERE l1_batch_number IN\n (SELECT l1_batch_number FROM scheduler_dependency_tracker_fri\n WHERE status != 'queued'\n AND circuit_1_final_prover_job_id IS NOT NULL\n AND circuit_2_final_prover_job_id IS NOT NULL\n AND circuit_3_final_prover_job_id IS NOT NULL\n AND circuit_4_final_prover_job_id IS NOT NULL\n AND circuit_5_final_prover_job_id IS NOT NULL\n AND circuit_6_final_prover_job_id IS NOT NULL\n AND circuit_7_final_prover_job_id IS NOT NULL\n AND circuit_8_final_prover_job_id IS NOT NULL\n AND circuit_9_final_prover_job_id IS NOT NULL\n AND circuit_10_final_prover_job_id IS NOT NULL\n AND circuit_11_final_prover_job_id IS NOT NULL\n AND circuit_12_final_prover_job_id IS NOT NULL\n AND circuit_13_final_prover_job_id IS NOT NULL\n )\n RETURNING l1_batch_number;\n " - }, - "53726a35b24a838df04c1f7201da322aab287830c96fc2c712a67d360bbc2bd0": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - } - }, - "query": "INSERT INTO witness_inputs_fri(l1_batch_number, merkle_tree_paths_blob_url, status, created_at, updated_at) VALUES ($1, $2, 'queued', now(), now())\n ON CONFLICT (l1_batch_number) DO NOTHING" - }, - "5543380548ce40063d43c1d54e368c7d385800d7ade9e720306808cc4c376978": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "is_finished", - "ordinal": 2, - "type_info": "Bool" - }, - { - "name": "l1_tx_count", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "fee_account_address", - "ordinal": 5, - "type_info": "Bytea" - }, - { - "name": "bloom", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "priority_ops_onchain_data", - "ordinal": 7, - "type_info": "ByteaArray" - }, - { - "name": "hash", - "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "parent_hash", - "ordinal": 9, - "type_info": "Bytea" - }, - { - "name": "commitment", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "compressed_write_logs", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" - }, - { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" - }, - { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" - }, - { - "name": "l2_l1_merkle_root", - "ordinal": 29, - "type_info": "Bytea" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" - }, - { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" - }, - { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" - }, - { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" - }, - { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, + "columns": [ { - "name": "l2_fair_gas_price", - "ordinal": 42, + "name": "timestamp", + "ordinal": 0, "type_info": "Int8" } ], "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - true, - true, - false, - false, false ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT timestamp FROM l1_batches WHERE eth_commit_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" + }, + "62e8b4afd4df9e30bfa08cb30c74ba4566fa2e9f4934b7a2777f9e90b49e8fce": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Int4" ] } }, - "query": "SELECT * FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT $1" + "query": "DELETE FROM eth_txs_history\n WHERE id = $1" }, - "5563da0d52ca7310ae7bc957caa5d8b3dcbd9386bb2a0be68dcd21ebb044cdbd": { + "6317155050a5dae24ea202cfd54d1e58cc7aeb0bfd4d95aa351f85cff04d3bff": { "describe": { "columns": [ { - "name": "bytecode_hash", + "name": "version", "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "bytecode", - "ordinal": 1, - "type_info": "Bytea" + "type_info": "Text" } ], "nullable": [ - false, false ], "parameters": { "Left": [ - "Int8" + "Text" ] } }, - "query": "SELECT bytecode_hash, bytecode FROM factory_deps INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number WHERE miniblocks.l1_batch_number = $1" + "query": "SELECT version FROM compiler_versions WHERE compiler = $1 ORDER by version" }, - "55debba852ef32f3b5ba6ffcb745f7b59d6888a21cb8792f8f9027e3b164a245": { + "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { "describe": { "columns": [ { - "name": "region", + "name": "status", "ordinal": 0, "type_info": "Text" }, { - "name": "zone", + "name": "error", "ordinal": 1, "type_info": "Text" }, { - "name": "total_gpus", - "ordinal": 2, + "name": "compilation_errors", + "ordinal": 2, + "type_info": "Jsonb" + } + ], + "nullable": [ + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " + }, + "665112c83ed7f126f94d1c47408de3495ee6431970e334d94ae75f853496eb48": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE id = $2\n " + }, + "67a47f1e7d5f8dafcef94bea3f268b4baec1888c6ef11c92ab66480ecdcb9aef": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", + "Bytea", + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE prover_jobs\n SET status = 'successful', updated_at = now(), time_taken = $1, result = $2, proccesed_by = $3\n WHERE id = $4\n " + }, + "67ecdc69e39e689f1f23f867d31e6b8c47e9c041e18cbd84a2ad6482a9be4e74": { + "describe": { + "columns": [ + { + "name": "l2_to_l1_logs", + "ordinal": 0, + "type_info": "ByteaArray" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1" + }, + "67efc7ea5bd3821d8325759ed8357190f6122dd2ae503a57faf15d8b749a4361": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, "type_info": "Int8" } ], "nullable": [ - false, - false, - null + false ], "parameters": { "Left": [] } }, - "query": "\n SELECT region, zone, SUM(num_gpu) AS total_gpus\n FROM gpu_prover_queue\n GROUP BY region, zone\n " + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN leaf_aggregation_witness_jobs lawj ON prover_jobs.l1_batch_number = lawj.l1_batch_number\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 0\n GROUP BY prover_jobs.l1_batch_number, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number;\n " }, - "560f088f500d3c369453453b2e5903253eee00a49690c309ab7f3a0131a0a467": { + "697835cdd5be1b99a0f332c4c8f3245e317b0282b46e55f15e728a7642382b25": { "describe": { "columns": [ { - "name": "hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "is_priority", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "full_fee", + "name": "circuit_id", "ordinal": 2, - "type_info": "Numeric" + "type_info": "Int2" }, { - "name": "layer_2_tip_fee", + "name": "aggregation_round", "ordinal": 3, - "type_info": "Numeric" + "type_info": "Int2" }, { - "name": "initiator_address", + "name": "sequence_number", "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "nonce", - "ordinal": 5, - "type_info": "Int8" - }, - { - "name": "signature", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "input", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "priority_op_id", - "ordinal": 10, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "index_in_block", - "ordinal": 12, "type_info": "Int4" }, { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" - }, - { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" - }, - { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" - }, - { - "name": "tx_format", - "ordinal": 17, + "name": "depth", + "ordinal": 5, "type_info": "Int4" }, { - "name": "created_at", - "ordinal": 18, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 19, - "type_info": "Timestamp" - }, - { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" - }, - { - "name": "in_mempool", - "ordinal": 22, + "name": "is_node_final_proof", + "ordinal": 6, "type_info": "Bool" - }, + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ], + "parameters": { + "Left": [ + "Time", + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE prover_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1, proof_blob_url=$2\n WHERE id = $3\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " + }, + "6a282084b02cddd8646e984a729b689bdb758e07096fc8cf60f68c6ec5bd6a9c": { + "describe": { + "columns": [ { - "name": "l1_block_number", - "ordinal": 23, + "name": "max?", + "ordinal": 0, "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT MAX(id) as \"max?\" FROM protocol_versions" + }, + "6ac39e83e446e70a2875624db78a05e56eb35f46e11d0f2fbb2165cda56fbacd": { + "describe": { + "columns": [ { - "name": "paymaster_input", - "ordinal": 26, + "name": "bytecode", + "ordinal": 0, "type_info": "Bytea" }, { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" - }, - { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" - }, - { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" - }, - { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" - }, - { - "name": "l1_batch_tx_index", - "ordinal": 31, - "type_info": "Int4" - }, - { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" - }, - { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" + "name": "data?", + "ordinal": 1, + "type_info": "Jsonb" }, { - "name": "l1_tx_refund_recipient", - "ordinal": 34, + "name": "contract_address?", + "ordinal": 2, "type_info": "Bytea" } ], "nullable": [ false, false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - false, - true, - false, - false, - false, - true, - true, - true, - true, - true, - false, - true, true ], "parameters": { "Left": [ - "Int8", - "Numeric", - "Numeric" + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT factory_deps.bytecode, transactions.data as \"data?\", transactions.contract_address as \"contract_address?\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n LEFT JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " + }, + "6ea2cd1c5df69ba8ab1ef635b64870587325219abbef188007747851e313b084": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [] + } + }, + "query": "UPDATE initial_writes SET index = NULL" + }, + "6ffd22b0590341c38ce3957dccdb5a4edf47fb558bc64e4df08897a0c72dbf23": { + "describe": { + "columns": [ + { + "name": "protocol_version", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [ + "Int8" ] } }, - "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n FOR UPDATE\n ) as subquery\n WHERE transactions.hash = subquery.hash\n RETURNING transactions.*" + "query": "\n SELECT protocol_version\n FROM witness_inputs\n WHERE l1_batch_number = $1\n " }, - "57742ed088179b89b50920a2ab1a103b745598ee0ba05d1793fc54e63b477319": { + "715aba794d60ce2faf937eacd9498b203dbb8e620d6d8850b9071cd72902ffbf": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int4", - "Int8", + "ByteaArray", + "ByteaArray", "Int8" ] } }, - "query": "UPDATE l1_batches SET eth_commit_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + "query": "INSERT INTO factory_deps (bytecode_hash, bytecode, miniblock_number, created_at, updated_at) SELECT u.bytecode_hash, u.bytecode, $3, now(), now() FROM UNNEST($1::bytea[], $2::bytea[]) AS u(bytecode_hash, bytecode) ON CONFLICT (bytecode_hash) DO NOTHING" }, - "57b4e8fb728f1e90dc5ed80c1493471f8e9eff828c99eadc531b28a068ade83e": { + "721367902328f9e2e5f8a99820b11d230c60553db366fc76f97c5680470bece8": { "describe": { "columns": [ { - "name": "count!", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" }, { - "name": "circuit_type!", + "name": "basic_circuits_blob_url", "ordinal": 1, "type_info": "Text" }, { - "name": "status!", + "name": "basic_circuits_inputs_blob_url", "ordinal": 2, "type_info": "Text" } ], "nullable": [ - null, false, - false + true, + true ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] } }, - "query": "\n SELECT COUNT(*) as \"count!\", circuit_type as \"circuit_type!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY circuit_type, status\n " + "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, - "580d973b404123108e8e8b27cd754f108a289e1556da10a466e4c795fbd23ddf": { + "73f0e672ff1a5e144b3034beb18271f1164e95029998d6750c6a8953f7344db5": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ + "Int8", + "Int4", "Int4", + "Int8", + "Bool", + "Bytea", + "ByteaArray", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Numeric", + "Int8", + "Int8", + "Bytea", + "Bytea", "Int4" ] } }, - "query": "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now()\n WHERE id = $1 AND sent_at_block IS NULL" + "query": "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count, timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, created_at, updated_at ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, now(), now())" }, - "59b10abd699d19cbdf285334162ee40f294c5fad8f99fc00a4cdb3b233a494d6": { + "741b13b0a4769a30186c650a4a1b24855806a27ccd8d5a50594741842dde44ec": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "min?", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic2!", + "name": "max?", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" + } + ], + "nullable": [ + null, + null + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT MIN(miniblocks.number) as \"min?\", MAX(miniblocks.number) as \"max?\" FROM miniblocks WHERE l1_batch_number = $1" + }, + "74637e3c2a52bc7c00521c38f476497792e29046fbb4bb6caa4715f017f5d828": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "topic3!", + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "is_finished", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "value!", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "l1_address!", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "l2_address!", + "name": "fee_account_address", "ordinal": 5, "type_info": "Bytea" }, { - "name": "symbol!", + "name": "bloom", "ordinal": 6, - "type_info": "Varchar" + "type_info": "Bytea" }, { - "name": "name!", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Varchar" + "type_info": "ByteaArray" }, { - "name": "decimals!", + "name": "hash", "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "parent_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "commitment", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, + { + "name": "eth_prove_tx_id", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, "type_info": "Int4" }, { - "name": "usd_price?", - "ordinal": 9, - "type_info": "Numeric" + "name": "merkle_root_hash", + "ordinal": 16, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 17, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 18, + "type_info": "ByteaArray" + }, + { + "name": "used_contract_hashes", + "ordinal": 19, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 20, + "type_info": "Bytea" + }, + { + "name": "compressed_repeated_writes", + "ordinal": 21, + "type_info": "Bytea" + }, + { + "name": "l2_l1_compressed_messages", + "ordinal": 22, + "type_info": "Bytea" + }, + { + "name": "l2_l1_merkle_root", + "ordinal": 23, + "type_info": "Bytea" + }, + { + "name": "l1_gas_price", + "ordinal": 24, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 25, + "type_info": "Int8" + }, + { + "name": "rollup_last_leaf_index", + "ordinal": 26, + "type_info": "Int8" + }, + { + "name": "zkporter_is_available", + "ordinal": 27, + "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 28, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 29, + "type_info": "Bytea" + }, + { + "name": "base_fee_per_gas", + "ordinal": 30, + "type_info": "Numeric" + }, + { + "name": "aux_data_hash", + "ordinal": 31, + "type_info": "Bytea" + }, + { + "name": "pass_through_data_hash", + "ordinal": 32, + "type_info": "Bytea" + }, + { + "name": "meta_parameters_hash", + "ordinal": 33, + "type_info": "Bytea" + }, + { + "name": "protocol_version", + "ordinal": 34, + "type_info": "Int4" } ], "nullable": [ @@ -5045,160 +4620,149 @@ false, false, false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + true, + true, + true, + false, + false, + true, + true, + true, + true, false, + true, + true, + true, true ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", - "Bytea" + "Int8" ] } }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n events.topic4 = ('\\x000000000000000000000000'::bytea || tokens.l2_address)\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, protocol_version FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL ORDER BY number LIMIT $1" }, - "5a27a65fa105897b60a99c1e0015e4b8c93c45e0c448e77b03565db5c36695ed": { + "751c8e5ed1fc211dbb4c7419a316c5f4e49a7f0b4f3a5c74c2abd8daebc457dd": { "describe": { "columns": [ { - "name": "max", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" } ], "nullable": [ - null + true ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] } }, - "query": "SELECT MAX(l1_batch_number) FROM witness_inputs WHERE merkel_tree_paths_blob_url IS NOT NULL" + "query": "SELECT l1_batch_number FROM miniblocks WHERE number = $1" }, - "5a5844af61cc685a414fcd3cad70900bdce8f48e905c105f8dd50dc52e0c6f14": { + "769c021b51b9aaafdf27b4019834729047702b17b0684f7271eecd6ffdf96e7c": { "describe": { "columns": [ { "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "attempts", - "ordinal": 1, - "type_info": "Int4" } ], "nullable": [ - false, false ], "parameters": { - "Left": [ - "Text", - "Int8" - ] + "Left": [] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n RETURNING l1_batch_number, attempts\n " + "query": "\n UPDATE scheduler_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN scheduler_witness_jobs swj ON prover_jobs.l1_batch_number = swj.l1_batch_number\n WHERE swj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 2\n GROUP BY prover_jobs.l1_batch_number\n HAVING COUNT(*) = 1)\n RETURNING l1_batch_number;\n " }, - "5ac872e2c5a00b376cc053324b3776ef6a0bb7f6850e5a24a133dfee052c49e1": { + "7717652bb4933f87cbeb7baa2e70e8e0b439663c6b15493bd2e406bed2486b42": { "describe": { "columns": [ { - "name": "value", + "name": "max", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ - false + null ], "parameters": { "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT value FROM storage WHERE hashed_key = $1" - }, - "5b2935b5b7e8c2907f5e221a6b1e6f4b8737b9fc618c5d021a3e1d58a3aed116": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Int8" + "Numeric" ] } }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n " + "query": "SELECT max(l1_batches.number) FROM l1_batches JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT(epoch FROM commit_tx.confirmed_at) < $1" }, - "5bc8a41ae0f255b966df2102f1bd9059d55833e0afaf6e62c7ddcc9c06de8deb": { + "778dd9ef4d302f38f068aceabf3872c0325fbdb5cfc7c18feb5db3768d98564f": { "describe": { "columns": [ { - "name": "l1_batch_number!", + "name": "index", "ordinal": 0, "type_info": "Int8" }, { - "name": "aggregation_round", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ - null, + true, false ], "parameters": { "Left": [] } }, - "query": "SELECT MAX(l1_batch_number) as \"l1_batch_number!\", aggregation_round FROM prover_jobs \n WHERE status='successful'\n GROUP BY aggregation_round \n " + "query": "SELECT index, l1_batch_number FROM initial_writes WHERE index IS NOT NULL ORDER BY index DESC LIMIT 1" }, - "5bc8cdc7ed710bb2f9b0035654fd7e9dcc01731ca581c6aa75d55184817bc100": { + "780b30e56a3ecfb3daa5310168ac6cd9e94bd5f1d871e1eaf36fbfd463a5e7e0": { "describe": { "columns": [ { - "name": "number", + "name": "address_and_key?", "ordinal": 0, - "type_info": "Int8" + "type_info": "ByteaArray" } ], "nullable": [ null ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE hash IS NOT NULL" - }, - "5df806b33f84893d4ddfacf3b289b0e173e85ad9204cbb7ad314e68a94cdc41e": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ - "Text", - "Int8", - "Int2", - "Int4", - "Int4" + "ByteaArray" ] } }, - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET aggregations_url = $1, number_of_dependent_jobs = $5, updated_at = now()\n WHERE l1_batch_number = $2\n AND circuit_id = $3\n AND depth = $4\n " + "query": "SELECT (SELECT ARRAY[address,key] FROM storage_logs WHERE hashed_key = u.hashed_key ORDER BY miniblock_number, operation_number LIMIT 1) as \"address_and_key?\" FROM UNNEST($1::bytea[]) AS u(hashed_key)" }, - "5e09f2359dd69380c1f183f613d82696029a56896e2b985738a2fa25d6cb8a71": { + "7889294ffe999d3c8b3b093d3add7f9b826e8259451068aeaeca0da0772648e8": { "describe": { "columns": [ { - "name": "op_id", + "name": "count!", "ordinal": 0, "type_info": "Int8" } @@ -5210,25 +4774,25 @@ "Left": [] } }, - "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true" + "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contract_verification_requests\n WHERE status = 'queued'\n " }, - "5f037f6ae8489d5224772d4f9e3e6cfc2075560957fa491d97a95c0e79ff4830": { + "79420f7676acb3f17aeb538271cdb4067a342fd554adcf7bd0550b6682b4c82b": { "describe": { "columns": [ { - "name": "block_batch?", + "name": "tx_hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "max_batch?", + "name": "call_trace", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" } ], "nullable": [ - null, - null + false, + false ], "parameters": { "Left": [ @@ -5236,93 +4800,159 @@ ] } }, - "query": "SELECT (SELECT l1_batch_number FROM miniblocks WHERE number = $1) as \"block_batch?\", (SELECT MAX(number) + 1 FROM l1_batches) as \"max_batch?\"" + "query": "SELECT * FROM call_traces WHERE tx_hash IN (SELECT hash FROM transactions WHERE miniblock_number = $1)" }, - "5f4b1091b74424ffd20c0aede98287418afa2bb37dbc941200c1d6190c96bec5": { + "7a5aba2130fec60318266c8059d3757cd78eb6099d50486b4996fb4090c99622": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Bytea", + "Text", + "Text", + "Int4", + "Int4" + ] + } + }, + "query": "\n INSERT INTO leaf_aggregation_witness_jobs\n (l1_batch_number, basic_circuits, basic_circuits_inputs, basic_circuits_blob_url, basic_circuits_inputs_blob_url, number_of_basic_circuits, protocol_version, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, 'waiting_for_proofs', now(), now())\n " + }, + "7acba1f016450b084a5fd97199a757a471f8b8a880a800c29737f1bceae3ff46": { "describe": { "columns": [ { - "name": "timestamp", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "merkel_tree_paths_blob_url", + "ordinal": 1, + "type_info": "Text" } ], "nullable": [ - false + false, + true ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] } }, - "query": "SELECT timestamp FROM l1_batches WHERE eth_commit_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" + "query": "SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs WHERE status = 'successful' AND is_blob_cleaned = FALSE AND merkel_tree_paths_blob_url is NOT NULL AND updated_at < NOW() - INTERVAL '30 days' LIMIT $1" + }, + "7bbb3ba8c9860818d04bad46dee94f59d054619c961fd3d59d26fcb364598d5d": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text", + "Int4" + ] + } + }, + "query": "\n INSERT INTO leaf_aggregation_witness_jobs_fri\n (l1_batch_number, circuit_id, closed_form_inputs_blob_url, number_of_basic_circuits, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id)\n DO UPDATE SET updated_at=now()\n " }, - "5f5974e7033eea82896a435c7776a6740f4a2df77175744a9670d3fee2f24b32": { + "7c3e55a10c8cf90e60001bca401113fd5335ec6c4b1ffdb6d6ff063d244d23e2": { "describe": { "columns": [ { - "name": "address", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic1", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic2", + "name": "circuit_type", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "topic3", + "name": "prover_input", "ordinal": 3, "type_info": "Bytea" }, { - "name": "topic4", + "name": "status", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "value", + "name": "error", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "block_hash", + "name": "processing_started_at", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l1_batch_number?", + "name": "created_at", "ordinal": 7, - "type_info": "Int8" + "type_info": "Timestamp" }, { - "name": "miniblock_number", + "name": "updated_at", "ordinal": 8, - "type_info": "Int8" + "type_info": "Timestamp" }, { - "name": "tx_hash", + "name": "time_taken", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Time" }, { - "name": "tx_index_in_block", + "name": "aggregation_round", "ordinal": 10, "type_info": "Int4" }, { - "name": "event_index_in_block", + "name": "result", "ordinal": 11, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "event_index_in_tx", + "name": "sequence_number", "ordinal": 12, "type_info": "Int4" + }, + { + "name": "attempts", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "circuit_input_blob_url", + "ordinal": 14, + "type_info": "Text" + }, + { + "name": "proccesed_by", + "ordinal": 15, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 16, + "type_info": "Bool" + }, + { + "name": "protocol_version", + "ordinal": 17, + "type_info": "Int4" } ], "nullable": [ @@ -5331,142 +4961,81 @@ false, false, false, + true, + true, false, - null, - null, false, false, false, + true, false, - false + false, + true, + true, + false, + true ], "parameters": { "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " - }, - "62e8b4afd4df9e30bfa08cb30c74ba4566fa2e9f4934b7a2777f9e90b49e8fce": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4" + "TextArray", + "Int4Array" ] } }, - "query": "DELETE FROM eth_txs_history\n WHERE id = $1" + "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE circuit_type = ANY($1)\n AND status = 'queued'\n AND protocol_version = ANY($2)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " }, - "6317155050a5dae24ea202cfd54d1e58cc7aeb0bfd4d95aa351f85cff04d3bff": { + "7ca78be8b18638857111cdbc6117ed2c204e3eb22682d5e4553ac4f47efab6e2": { "describe": { "columns": [ { - "name": "version", + "name": "hash", "ordinal": 0, - "type_info": "Text" + "type_info": "Bytea" } ], "nullable": [ false ], - "parameters": { - "Left": [ - "Text" - ] - } - }, - "query": "SELECT version FROM compiler_versions WHERE compiler = $1 ORDER by version" - }, - "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { - "describe": { - "columns": [ - { - "name": "status", - "ordinal": 0, - "type_info": "Text" - }, - { - "name": "error", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "compilation_errors", - "ordinal": 2, - "type_info": "Jsonb" - } - ], - "nullable": [ - false, - true, - true - ], "parameters": { "Left": [ "Int8" ] } }, - "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " + "query": "UPDATE transactions\n SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}'\n WHERE miniblock_number > $1\n RETURNING hash\n " }, - "657e576ab02338ce40ae905acdbc1d372f4c1b4c50f8690a23e04824716b8674": { + "7cf855c4869db43b765b92762402596f6b97b3717735b6d87a16a5776f2eca71": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Int8", "Bytea", - "Int4", - "Int4", "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea" - ] - } - }, - "query": "INSERT INTO miniblocks (number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now())" - }, - "665112c83ed7f126f94d1c47408de3495ee6431970e334d94ae75f853496eb48": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Int8" + "Timestamp" ] } }, - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE id = $2\n " + "query": "UPDATE tokens SET usd_price = $2, usd_price_updated_at = $3, updated_at = now() WHERE l1_address = $1" }, - "67a47f1e7d5f8dafcef94bea3f268b4baec1888c6ef11c92ab66480ecdcb9aef": { + "7d3a57126f111ebe51d678b91f64c34b8394df3e7b1d59ca80b6eca01c606da4": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Time", "Bytea", - "Text", - "Int8" + "Jsonb" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'successful', updated_at = now(), time_taken = $1, result = $2, proccesed_by = $3\n WHERE id = $4\n " + "query": "\n INSERT INTO contracts_verification_info\n (address, verification_info)\n VALUES ($1, $2)\n ON CONFLICT (address)\n DO UPDATE SET verification_info = $2\n " }, - "67b861c97d16bf99a2d305c100116cbcb0334473c4462e4164436885481197fb": { + "7d4210089c5abb84befec962fc769b396ff7ad7da212d079bd4460f9ea4d60dc": { "describe": { "columns": [ { - "name": "total_transactions!", + "name": "l1_batch_number?", "ordinal": 0, "type_info": "Int8" } @@ -5475,320 +5044,312 @@ null ], "parameters": { - "Left": [ - "Bytea" - ] + "Left": [] } }, - "query": "\n SELECT COUNT(*) as \"total_transactions!\"\n FROM transactions\n WHERE contract_address = $1\n " + "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number?\" FROM (\n SELECT MIN(l1_batch_number) as \"l1_batch_number\"\n FROM prover_jobs\n WHERE status = 'successful' OR aggregation_round < 3\n GROUP BY l1_batch_number\n HAVING MAX(aggregation_round) < 3\n ) as inn\n " }, - "67ecdc69e39e689f1f23f867d31e6b8c47e9c041e18cbd84a2ad6482a9be4e74": { + "7df997e5a203e8df350b1346863fddf26d32123159213c02e8794c39240e48dc": { "describe": { - "columns": [ - { - "name": "l2_to_l1_logs", - "ordinal": 0, - "type_info": "ByteaArray" - } - ], - "nullable": [ - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ "Int8" ] } }, - "query": "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1" - }, - "67efc7ea5bd3821d8325759ed8357190f6122dd2ae503a57faf15d8b749a4361": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN leaf_aggregation_witness_jobs lawj ON prover_jobs.l1_batch_number = lawj.l1_batch_number\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 0\n GROUP BY prover_jobs.l1_batch_number, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number;\n " + "query": "UPDATE miniblocks SET l1_batch_number = $1 WHERE l1_batch_number IS NULL" }, - "697835cdd5be1b99a0f332c4c8f3245e317b0282b46e55f15e728a7642382b25": { + "8045a697a6a1070857b6fdc656f60ee6bab4b3a875ab98099beee227c199f818": { "describe": { "columns": [ { - "name": "id", + "name": "miniblock_number", "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number", + "name": "log_index_in_miniblock", "ordinal": 1, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "circuit_id", + "name": "log_index_in_tx", "ordinal": 2, - "type_info": "Int2" + "type_info": "Int4" }, { - "name": "aggregation_round", + "name": "tx_hash", "ordinal": 3, - "type_info": "Int2" + "type_info": "Bytea" }, { - "name": "sequence_number", + "name": "block_hash", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "depth", + "name": "l1_batch_number?", "ordinal": 5, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "is_node_final_proof", + "name": "shard_id", "ordinal": 6, + "type_info": "Int4" + }, + { + "name": "is_service", + "ordinal": 7, "type_info": "Bool" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false - ], - "parameters": { - "Left": [ - "Time", - "Text", - "Int8" - ] - } - }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1, proof_blob_url=$2\n WHERE id = $3\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " - }, - "6ac39e83e446e70a2875624db78a05e56eb35f46e11d0f2fbb2165cda56fbacd": { - "describe": { - "columns": [ + }, { - "name": "bytecode", - "ordinal": 0, + "name": "tx_index_in_miniblock", + "ordinal": 8, + "type_info": "Int4" + }, + { + "name": "tx_index_in_l1_batch", + "ordinal": 9, + "type_info": "Int4" + }, + { + "name": "sender", + "ordinal": 10, "type_info": "Bytea" }, { - "name": "data?", - "ordinal": 1, - "type_info": "Jsonb" + "name": "key", + "ordinal": 11, + "type_info": "Bytea" }, { - "name": "contract_address?", - "ordinal": 2, + "name": "value", + "ordinal": 12, "type_info": "Bytea" } ], "nullable": [ false, false, - true - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT factory_deps.bytecode, transactions.data as \"data?\", transactions.contract_address as \"contract_address?\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n LEFT JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " - }, - "715aba794d60ce2faf937eacd9498b203dbb8e620d6d8850b9071cd72902ffbf": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "Int8" - ] - } - }, - "query": "INSERT INTO factory_deps (bytecode_hash, bytecode, miniblock_number, created_at, updated_at) SELECT u.bytecode_hash, u.bytecode, $3, now(), now() FROM UNNEST($1::bytea[], $2::bytea[]) AS u(bytecode_hash, bytecode) ON CONFLICT (bytecode_hash) DO NOTHING" - }, - "71df95e25f719ed9bc32622b33c1da0aad14c6ad1a96f25454ce8618470c2ea3": { - "describe": { - "columns": [], - "nullable": [], + false, + false, + null, + null, + false, + false, + false, + false, + false, + false, + false + ], "parameters": { "Left": [ - "ByteaArray", - "Int8" + "Bytea" ] } }, - "query": "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at)\n SELECT u.hashed_key, $2, now(), now()\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n ON CONFLICT (hashed_key) DO NOTHING\n " + "query": "SELECT miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash, Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\", shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value FROM l2_to_l1_logs WHERE tx_hash = $1 ORDER BY log_index_in_tx ASC" }, - "721367902328f9e2e5f8a99820b11d230c60553db366fc76f97c5680470bece8": { + "84b6ac6bc44503de193e0e4e1201ffd200eddf690722659dad6ddea0604427dc": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "id", "ordinal": 0, "type_info": "Int8" }, { - "name": "basic_circuits_blob_url", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Text" + "type_info": "Int8" }, { - "name": "basic_circuits_inputs_blob_url", + "name": "circuit_id", "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "depth", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "status", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 5, + "type_info": "Int2" + }, + { + "name": "aggregations_url", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "processing_started_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 8, + "type_info": "Time" + }, + { + "name": "error", + "ordinal": 9, "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 10, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 11, + "type_info": "Timestamp" + }, + { + "name": "number_of_dependent_jobs", + "ordinal": 12, + "type_info": "Int4" } ], "nullable": [ false, + false, + false, + false, + false, + false, + true, + true, true, + true, + false, + false, true ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM node_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC, depth ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs_fri.*\n " }, - "741b13b0a4769a30186c650a4a1b24855806a27ccd8d5a50594741842dde44ec": { + "852b8d72a8dcbf620e528e983b836b2b05596eb0b7c5d7d1791080bef6a6b821": { "describe": { "columns": [ { - "name": "min?", + "name": "number", "ordinal": 0, "type_info": "Int8" }, { - "name": "max?", + "name": "l1_tx_count", "ordinal": 1, - "type_info": "Int8" - } - ], - "nullable": [ - null, - null - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT MIN(miniblocks.number) as \"min?\", MAX(miniblocks.number) as \"max?\" FROM miniblocks WHERE l1_batch_number = $1" - }, - "751c8e5ed1fc211dbb4c7419a316c5f4e49a7f0b4f3a5c74c2abd8daebc457dd": { - "describe": { - "columns": [ + "type_info": "Int4" + }, { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - true - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT l1_batch_number FROM miniblocks WHERE number = $1" - }, - "769c021b51b9aaafdf27b4019834729047702b17b0684f7271eecd6ffdf96e7c": { - "describe": { - "columns": [ + "name": "l2_tx_count", + "ordinal": 2, + "type_info": "Int4" + }, { - "name": "l1_batch_number", - "ordinal": 0, + "name": "timestamp", + "ordinal": 3, "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE scheduler_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN scheduler_witness_jobs swj ON prover_jobs.l1_batch_number = swj.l1_batch_number\n WHERE swj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 2\n GROUP BY prover_jobs.l1_batch_number\n HAVING COUNT(*) = 1)\n RETURNING l1_batch_number;\n " - }, - "7717652bb4933f87cbeb7baa2e70e8e0b439663c6b15493bd2e406bed2486b42": { - "describe": { - "columns": [ + }, { - "name": "max", - "ordinal": 0, + "name": "is_finished", + "ordinal": 4, + "type_info": "Bool" + }, + { + "name": "fee_account_address", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 6, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 7, + "type_info": "ByteaArray" + }, + { + "name": "bloom", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "priority_ops_onchain_data", + "ordinal": 9, + "type_info": "ByteaArray" + }, + { + "name": "used_contract_hashes", + "ordinal": 10, + "type_info": "Jsonb" + }, + { + "name": "base_fee_per_gas", + "ordinal": 11, + "type_info": "Numeric" + }, + { + "name": "l1_gas_price", + "ordinal": 12, "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Float8" - ] - } - }, - "query": "SELECT max(l1_batches.number) FROM l1_batches JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT(epoch FROM commit_tx.confirmed_at) < $1" - }, - "7889294ffe999d3c8b3b093d3add7f9b826e8259451068aeaeca0da0772648e8": { - "describe": { - "columns": [ + }, { - "name": "count!", - "ordinal": 0, + "name": "l2_fair_gas_price", + "ordinal": 13, "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contract_verification_requests\n WHERE status = 'queued'\n " - }, - "79420f7676acb3f17aeb538271cdb4067a342fd554adcf7bd0550b6682b4c82b": { - "describe": { - "columns": [ + }, { - "name": "tx_hash", - "ordinal": 0, + "name": "bootloader_code_hash", + "ordinal": 14, "type_info": "Bytea" }, { - "name": "call_trace", - "ordinal": 1, + "name": "default_aa_code_hash", + "ordinal": 15, "type_info": "Bytea" + }, + { + "name": "protocol_version", + "ordinal": 16, + "type_info": "Int4" } ], "nullable": [ false, - false + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true ], "parameters": { "Left": [ @@ -5796,20 +5357,20 @@ ] } }, - "query": "SELECT * FROM call_traces WHERE tx_hash IN (SELECT hash FROM transactions WHERE miniblock_number = $1)" + "query": "SELECT number, l1_tx_count, l2_tx_count, timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version FROM l1_batches WHERE number = $1" }, - "7acba1f016450b084a5fd97199a757a471f8b8a880a800c29737f1bceae3ff46": { + "85c52cb09c73499507144e3a684c3230c2c71eb4f8ddef43e67fbd33de2747c8": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "timestamp", "ordinal": 0, "type_info": "Int8" }, { - "name": "merkel_tree_paths_blob_url", + "name": "hash", "ordinal": 1, - "type_info": "Text" + "type_info": "Bytea" } ], "nullable": [ @@ -5822,362 +5383,271 @@ ] } }, - "query": "SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs WHERE status = 'successful' AND is_blob_cleaned = FALSE AND merkel_tree_paths_blob_url is NOT NULL AND updated_at < NOW() - INTERVAL '30 days' LIMIT $1" + "query": "SELECT timestamp, hash FROM l1_batches WHERE number = $1" }, - "7bbb3ba8c9860818d04bad46dee94f59d054619c961fd3d59d26fcb364598d5d": { + "87e1ae393bf250f834704c940482884c9ed729a24f41d1ec07319fa0cbcc21a7": { "describe": { "columns": [], "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text", - "Int4" - ] - } - }, - "query": "\n INSERT INTO leaf_aggregation_witness_jobs_fri\n (l1_batch_number, circuit_id, closed_form_inputs_blob_url, number_of_basic_circuits, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id)\n DO UPDATE SET updated_at=now()\n " - }, - "7ca78be8b18638857111cdbc6117ed2c204e3eb22682d5e4553ac4f47efab6e2": { - "describe": { - "columns": [ - { - "name": "hash", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], "parameters": { "Left": [ "Int8" ] } }, - "query": "UPDATE transactions\n SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}'\n WHERE miniblock_number > $1\n RETURNING hash\n " - }, - "7cf855c4869db43b765b92762402596f6b97b3717735b6d87a16a5776f2eca71": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Numeric", - "Timestamp" - ] - } - }, - "query": "UPDATE tokens SET usd_price = $2, usd_price_updated_at = $3, updated_at = now() WHERE l1_address = $1" - }, - "7d3a57126f111ebe51d678b91f64c34b8394df3e7b1d59ca80b6eca01c606da4": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Jsonb" - ] - } - }, - "query": "\n INSERT INTO contracts_verification_info\n (address, verification_info)\n VALUES ($1, $2)\n ON CONFLICT (address)\n DO UPDATE SET verification_info = $2\n " + "query": "DELETE FROM l1_batches WHERE number > $1" }, - "7d4210089c5abb84befec962fc769b396ff7ad7da212d079bd4460f9ea4d60dc": { + "88c49ebeb45f7208d223de59ec08a332beac765644e4f29ed855808b8f9cef91": { "describe": { "columns": [ { - "name": "l1_batch_number?", + "name": "id", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "circuit_input_blob_url", + "ordinal": 1, + "type_info": "Text" } ], "nullable": [ - null + false, + true ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number?\" FROM (\n SELECT MIN(l1_batch_number) as \"l1_batch_number\"\n FROM prover_jobs\n WHERE status = 'successful' OR aggregation_round < 3\n GROUP BY l1_batch_number\n HAVING MAX(aggregation_round) < 3\n ) as inn\n " - }, - "7df997e5a203e8df350b1346863fddf26d32123159213c02e8794c39240e48dc": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ "Int8" ] } }, - "query": "UPDATE miniblocks SET l1_batch_number = $1 WHERE l1_batch_number IS NULL" + "query": "\n SELECT id, circuit_input_blob_url FROM prover_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND circuit_input_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, - "7e3623674226e5bb934f7769cdf595138015ad346e12074398fd57dbc03962d3": { + "8996a1794585dfe0f9c16a11e113831a63d5d944bc8061d7caa25ea33f12b19d": { "describe": { "columns": [ { - "name": "number", + "name": "is_priority", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "timestamp", + "name": "initiator_address", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "is_finished", + "name": "gas_limit", "ordinal": 2, - "type_info": "Bool" + "type_info": "Numeric" }, { - "name": "l1_tx_count", + "name": "gas_per_pubdata_limit", "ordinal": 3, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "l2_tx_count", + "name": "received_at", "ordinal": 4, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "fee_account_address", + "name": "miniblock_number", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "bloom", + "name": "error", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Varchar" }, { - "name": "priority_ops_onchain_data", + "name": "effective_gas_price", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Numeric" }, { - "name": "hash", + "name": "refunded_gas", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "parent_hash", + "name": "eth_commit_tx_hash?", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "commitment", + "name": "eth_prove_tx_hash?", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "compressed_write_logs", + "name": "eth_execute_tx_hash?", "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" - }, - { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, + "type_info": "Text" + } + ], + "nullable": [ + false, + false, + true, + true, + false, + true, + true, + true, + false, + false, + false, + false + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "\n SELECT transactions.is_priority,\n transactions.initiator_address,\n transactions.gas_limit,\n transactions.gas_per_pubdata_limit,\n transactions.received_at,\n transactions.miniblock_number,\n transactions.error,\n transactions.effective_gas_price,\n transactions.refunded_gas,\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " + }, + "89b124c78f4f6e86790af8ec391a2c486ce01b33cfb4492a443187b1731cae1e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int8" + ] + } + }, + "query": "UPDATE l1_batches SET eth_prove_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + }, + "8a05b6c052ace9b5a383b301f3f441536d90a96bbb791f4711304b22e02193df": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", + "Int8" + ] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE id = $2\n " + }, + "8cd540b6063f4a0c1bf4ccb3d111a0ecc341ca8b46b83544c515aa4d809ab9f1": { + "describe": { + "columns": [ { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" + "name": "number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" + "name": "l1_batch_number!", + "ordinal": 1, + "type_info": "Int8" }, { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" + "name": "timestamp", + "ordinal": 2, + "type_info": "Int8" }, { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" }, { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" }, { - "name": "l2_l1_merkle_root", - "ordinal": 29, + "name": "root_hash?", + "ordinal": 5, "type_info": "Bytea" }, { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" + "name": "commit_tx_hash?", + "ordinal": 6, + "type_info": "Text" }, { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" + "name": "committed_at?", + "ordinal": 7, + "type_info": "Timestamp" }, { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" + "name": "prove_tx_hash?", + "ordinal": 8, + "type_info": "Text" }, { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" + "name": "proven_at?", + "ordinal": 9, + "type_info": "Timestamp" }, { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" + "name": "execute_tx_hash?", + "ordinal": 10, + "type_info": "Text" }, { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" + "name": "executed_at?", + "ordinal": 11, + "type_info": "Timestamp" }, { - "name": "gas_per_pubdata_limit", - "ordinal": 36, + "name": "l1_gas_price", + "ordinal": 12, "type_info": "Int8" }, { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" + "name": "l2_fair_gas_price", + "ordinal": 13, + "type_info": "Int8" }, { - "name": "pass_through_data_hash", - "ordinal": 38, + "name": "bootloader_code_hash", + "ordinal": 14, "type_info": "Bytea" }, { - "name": "meta_parameters_hash", - "ordinal": 39, + "name": "default_aa_code_hash", + "ordinal": 15, "type_info": "Bytea" }, { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" + "name": "protocol_version", + "ordinal": 16, + "type_info": "Int4" }, { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "name": "fee_account_address?", + "ordinal": 17, + "type_info": "Bytea" } ], "nullable": [ false, + null, false, false, false, false, false, - false, - false, - true, - true, - true, - true, - true, - true, - true, true, false, - false, true, false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, true, false, false, true, true, true, - false, - false, false ], "parameters": { @@ -6186,101 +5656,125 @@ ] } }, - "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL ORDER BY number LIMIT $1" + "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " }, - "8045a697a6a1070857b6fdc656f60ee6bab4b3a875ab98099beee227c199f818": { + "8d3c9575e3cea3956ba84edc982fcf6e0f7667350e6c2cd6801db8400eabaf9b": { "describe": { "columns": [ { - "name": "miniblock_number", + "name": "hashed_key", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT DISTINCT ON (hashed_key) hashed_key FROM (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn" + }, + "8d48fb84bd08f6103fe28d13331f4e3422b61adab6037e8760b0ca7b1a48907e": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" }, { - "name": "log_index_in_miniblock", + "name": "scheduler_partial_input_blob_url", "ordinal": 1, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "log_index_in_tx", + "name": "status", "ordinal": 2, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "tx_hash", + "name": "processing_started_at", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "block_hash", + "name": "time_taken", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Time" }, { - "name": "l1_batch_number?", + "name": "error", "ordinal": 5, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "shard_id", + "name": "created_at", "ordinal": 6, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "is_service", + "name": "updated_at", "ordinal": 7, - "type_info": "Bool" + "type_info": "Timestamp" }, { - "name": "tx_index_in_miniblock", + "name": "attempts", "ordinal": 8, - "type_info": "Int4" - }, - { - "name": "tx_index_in_l1_batch", - "ordinal": 9, - "type_info": "Int4" - }, - { - "name": "sender", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "key", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "value", - "ordinal": 12, - "type_info": "Bytea" + "type_info": "Int2" } ], "nullable": [ false, false, false, - false, - null, - null, - false, - false, + true, + true, + true, false, false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs_fri.*\n " + }, + "8dcbaaa6186da52ca8b440b6428826288dc668af5a6fc99ef3078c8bcb38c419": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 1, + "type_info": "Int2" + }, + { + "name": "depth", + "ordinal": 2, + "type_info": "Int4" + } + ], + "nullable": [ false, false, false ], "parameters": { - "Left": [ - "Bytea" - ] + "Left": [] } }, - "query": "SELECT miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash, Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\", shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value FROM l2_to_l1_logs WHERE tx_hash = $1 ORDER BY log_index_in_tx ASC" + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status='queued'\n WHERE (l1_batch_number, circuit_id, depth) IN\n (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth\n FROM prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON\n prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth, nawj.number_of_dependent_jobs\n HAVING COUNT(*) = nawj.number_of_dependent_jobs)\n RETURNING l1_batch_number, circuit_id, depth;\n " }, - "84b6ac6bc44503de193e0e4e1201ffd200eddf690722659dad6ddea0604427dc": { + "8de48960815f48f5d66e82b770a2e0caee42261643ec535a8f21cba1b5d4f50d": { "describe": { "columns": [ { @@ -6299,52 +5793,52 @@ "type_info": "Int2" }, { - "name": "depth", + "name": "closed_form_inputs_blob_url", "ordinal": 3, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "status", + "name": "attempts", "ordinal": 4, - "type_info": "Text" + "type_info": "Int2" }, { - "name": "attempts", + "name": "status", "ordinal": 5, - "type_info": "Int2" + "type_info": "Text" }, { - "name": "aggregations_url", + "name": "error", "ordinal": 6, "type_info": "Text" }, { - "name": "processing_started_at", + "name": "created_at", "ordinal": 7, "type_info": "Timestamp" }, { - "name": "time_taken", + "name": "updated_at", "ordinal": 8, - "type_info": "Time" + "type_info": "Timestamp" }, { - "name": "error", + "name": "processing_started_at", "ordinal": 9, - "type_info": "Text" + "type_info": "Timestamp" }, { - "name": "created_at", + "name": "time_taken", "ordinal": 10, - "type_info": "Timestamp" + "type_info": "Time" }, { - "name": "updated_at", + "name": "is_blob_cleaned", "ordinal": 11, - "type_info": "Timestamp" + "type_info": "Bool" }, { - "name": "number_of_dependent_jobs", + "name": "number_of_basic_circuits", "ordinal": 12, "type_info": "Int4" } @@ -6353,166 +5847,223 @@ false, false, false, - false, + true, false, false, true, + false, + false, true, true, true, - false, - false, true ], "parameters": { "Left": [] } }, - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM node_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC, depth ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs_fri.*\n " + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM leaf_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs_fri.*\n " }, - "85c52cb09c73499507144e3a684c3230c2c71eb4f8ddef43e67fbd33de2747c8": { + "8fa1a390d7b11b60b3352fafc0a8a7fa15bc761b1bb902f5105fd66b2e3087f2": { "describe": { - "columns": [ - { - "name": "timestamp", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - true - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ "Int8" ] } }, - "query": "SELECT timestamp, hash FROM l1_batches WHERE number = $1" + "query": "\n INSERT INTO scheduler_dependency_tracker_fri\n (l1_batch_number, status, created_at, updated_at)\n VALUES ($1, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number)\n DO UPDATE SET updated_at=now()\n " }, - "87e1ae393bf250f834704c940482884c9ed729a24f41d1ec07319fa0cbcc21a7": { + "8fda20e48c41a9c1e58c8c607222a65e1409f63eba91ac99b2736ca5ebbb5ec6": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8" + "Bytea", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Jsonb", + "Int4", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Timestamp" ] } }, - "query": "DELETE FROM l1_batches WHERE number > $1" + "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n\n data,\n upgrade_id,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, now(), now()\n )\n ON CONFLICT (hash) DO NOTHING\n " }, - "88c49ebeb45f7208d223de59ec08a332beac765644e4f29ed855808b8f9cef91": { + "8fe01036cac5181aabfdc06095da291c4de6b1e0f82f846c37509bb550ef544e": { "describe": { "columns": [ { - "name": "id", + "name": "l1_address", "ordinal": 0, - "type_info": "Int8" - }, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_address FROM tokens WHERE well_known = false" + }, + "9008367aad7877f269b765c4d0772d0f60689fcde6987c620fe5749a259a8db7": { + "describe": { + "columns": [ { - "name": "circuit_input_blob_url", - "ordinal": 1, - "type_info": "Text" + "name": "id", + "ordinal": 0, + "type_info": "Int4" } ], "nullable": [ - false, - true + false ], "parameters": { "Left": [ - "Int8" + "Int4", + "Int8", + "Int8", + "Text", + "Bytea" ] } }, - "query": "\n SELECT id, circuit_input_blob_url FROM prover_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND circuit_input_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING id" }, - "89b124c78f4f6e86790af8ec391a2c486ce01b33cfb4492a443187b1731cae1e": { + "908f10640f805957e3f77ed685a7170345d835166e1857c12d76c15b09dffff5": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int4", "Int8", + "Int2", + "Int4", + "Text", + "Int4" + ] + } + }, + "query": "INSERT INTO node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, depth, aggregations_url, number_of_dependent_jobs, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id, depth)\n DO UPDATE SET updated_at=now()" + }, + "91db60cc4f98ebcaef1435342607da0a86fe16e20a696cb81a569772d5d5ae88": { + "describe": { + "columns": [ + { + "name": "value", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", "Int8" ] } }, - "query": "UPDATE l1_batches SET eth_prove_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + "query": "\n SELECT value\n FROM storage_logs\n WHERE storage_logs.hashed_key = $1 AND storage_logs.miniblock_number <= $2\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n " }, - "8a05b6c052ace9b5a383b301f3f441536d90a96bbb791f4711304b22e02193df": { + "957ceda740ffb36740acf1e3fbacf76a2ea7422dd9d76a38d745113359e4b7a6": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "protocol_version", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + true + ], "parameters": { "Left": [ - "Time", "Int8" ] } }, - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE id = $2\n " + "query": "SELECT protocol_version FROM l1_batches WHERE number = $1" }, - "8a35349a1aa79ac111e442df2cf3f31ecbebe3de7763554b5beb2210ebaa4dc6": { + "95ce099fde99c57a930ed3d44f74a90d632b831360210ec7fe21b33bed1a4582": { "describe": { "columns": [ { - "name": "number", + "name": "id", "ordinal": 0, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "timestamp", + "name": "nonce", "ordinal": 1, "type_info": "Int8" }, { - "name": "hash", + "name": "raw_tx", "ordinal": 2, "type_info": "Bytea" }, { - "name": "l1_tx_count", + "name": "contract_address", "ordinal": 3, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "l2_tx_count", + "name": "tx_type", "ordinal": 4, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "base_fee_per_gas", + "name": "gas_used", "ordinal": 5, - "type_info": "Numeric" + "type_info": "Int8" }, { - "name": "l1_gas_price", + "name": "created_at", "ordinal": 6, - "type_info": "Int8" + "type_info": "Timestamp" }, { - "name": "l2_fair_gas_price", + "name": "updated_at", "ordinal": 7, - "type_info": "Int8" + "type_info": "Timestamp" }, { - "name": "bootloader_code_hash", + "name": "has_failed", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "default_aa_code_hash", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Int4" + }, + { + "name": "confirmed_eth_tx_history_id", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "predicted_gas_cost", + "ordinal": 11, + "type_info": "Int8" } ], "nullable": [ @@ -6521,253 +6072,433 @@ false, false, false, + true, false, false, false, true, - true + true, + false + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Text", + "Text", + "Int8" + ] + } + }, + "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n RETURNING *" + }, + "95e0e783794ac55ab20b30366f037c313fb0d17e93d3e6ec60667ef1b4da30d5": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8Array" + ] + } + }, + "query": "\n UPDATE prover_jobs\n SET is_blob_cleaned=TRUE\n WHERE id = ANY($1);\n " + }, + "96b1cd2bb6861064b633d597a4a09d279dbc7bcd7a810a7270da3d7941af0fff": { + "describe": { + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null ], "parameters": { "Left": [ + "Bytea", + "Bytea" + ] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM (SELECT * FROM storage_logs WHERE storage_logs.hashed_key = $1 ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC LIMIT 1) sl WHERE sl.value != $2" + }, + "96f6d06a49646f93ba1918080ef1efba868d506c6b51ede981e610f1b57bf88b": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "DELETE FROM storage WHERE hashed_key = ANY($1)" + }, + "987fcbbd716648c7c368462643f13d8001d5c6d197add90613ae21d21fdef79b": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", "Int8" ] } }, - "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash FROM miniblocks WHERE number = $1" + "query": "UPDATE prover_jobs_fri SET status = $1, updated_at = now() WHERE id = $2" }, - "8b881a834dc813ac5bd4dcd2f973d34ae92cafa929ce933982704d4afe13f972": { + "9970bb69f5ca9ab9f103e1547eb40c1d4f5dd3a540ff6f1b9724821350c9501a": { "describe": { "columns": [ { - "name": "number", + "name": "id", "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number!", + "name": "l1_batch_number", "ordinal": 1, "type_info": "Int8" }, { - "name": "timestamp", + "name": "circuit_type", "ordinal": 2, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "l1_tx_count", + "name": "prover_input", "ordinal": 3, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "l2_tx_count", + "name": "status", "ordinal": 4, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "root_hash?", + "name": "error", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "commit_tx_hash?", + "name": "processing_started_at", "ordinal": 6, - "type_info": "Text" + "type_info": "Timestamp" }, { - "name": "committed_at?", + "name": "created_at", "ordinal": 7, "type_info": "Timestamp" }, { - "name": "prove_tx_hash?", + "name": "updated_at", "ordinal": 8, - "type_info": "Text" + "type_info": "Timestamp" }, { - "name": "proven_at?", + "name": "time_taken", "ordinal": 9, - "type_info": "Timestamp" + "type_info": "Time" }, { - "name": "execute_tx_hash?", + "name": "aggregation_round", "ordinal": 10, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "executed_at?", + "name": "result", "ordinal": 11, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "l1_gas_price", + "name": "sequence_number", "ordinal": 12, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "l2_fair_gas_price", + "name": "attempts", "ordinal": 13, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "bootloader_code_hash", + "name": "circuit_input_blob_url", "ordinal": 14, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "default_aa_code_hash", + "name": "proccesed_by", "ordinal": 15, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "fee_account_address?", + "name": "is_blob_cleaned", "ordinal": 16, - "type_info": "Bytea" + "type_info": "Bool" + }, + { + "name": "protocol_version", + "ordinal": 17, + "type_info": "Int4" } ], "nullable": [ - false, - null, false, false, false, false, false, true, - false, true, false, + false, + false, + false, true, false, false, true, true, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " - }, - "8d3c9575e3cea3956ba84edc982fcf6e0f7667350e6c2cd6801db8400eabaf9b": { - "describe": { - "columns": [ - { - "name": "hashed_key", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false + false, + true ], "parameters": { "Left": [ - "Int8" + "Int4Array" ] } }, - "query": "SELECT DISTINCT ON (hashed_key) hashed_key FROM (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn" + "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued'\n AND protocol_version = ANY($1)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " }, - "8d48fb84bd08f6103fe28d13331f4e3422b61adab6037e8760b0ca7b1a48907e": { + "997229f1dd293ccc0379ffb3df49d69a4ea145b5d263eea53b7abc635dd53cc6": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "number", "ordinal": 0, "type_info": "Int8" }, { - "name": "scheduler_partial_input_blob_url", + "name": "timestamp", "ordinal": 1, - "type_info": "Text" + "type_info": "Int8" }, { - "name": "status", + "name": "hash", "ordinal": 2, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "processing_started_at", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "time_taken", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Time" + "type_info": "Int4" }, { - "name": "error", + "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Text" + "type_info": "Numeric" }, { - "name": "created_at", + "name": "l1_gas_price", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "Int8" }, { - "name": "updated_at", + "name": "l2_fair_gas_price", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "Int8" }, { - "name": "attempts", + "name": "bootloader_code_hash", "ordinal": 8, - "type_info": "Int2" + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "protocol_version", + "ordinal": 10, + "type_info": "Int4" } ], "nullable": [ false, false, false, - true, - true, - true, false, false, - false + false, + false, + false, + true, + true, + true ], "parameters": { "Left": [] } }, - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs_fri.*\n " + "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version FROM miniblocks ORDER BY number DESC LIMIT 1" }, - "8dcbaaa6186da52ca8b440b6428826288dc668af5a6fc99ef3078c8bcb38c419": { + "99d331d233d357302ab0cc7e3269ef9e414f0c3111785212660f471e3b4f6a04": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int4Array", + "ByteaArray", + "ByteaArray", + "NumericArray", + "NumericArray", + "NumericArray", + "NumericArray", + "Int4Array", + "Int4Array", + "VarcharArray", + "NumericArray", + "JsonbArray", + "ByteaArray", + "JsonbArray", + "Int8Array", + "NumericArray", + "ByteaArray", + "ByteaArray", + "ByteaArray", + "Int8" + ] + } + }, + "query": "\n UPDATE transactions\n SET \n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = now()\n FROM\n (\n SELECT data_table_temp.* FROM (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::int[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::numeric[]) AS gas_limit,\n UNNEST($6::numeric[]) AS max_fee_per_gas,\n UNNEST($7::numeric[]) AS max_priority_fee_per_gas,\n UNNEST($8::numeric[]) AS gas_per_pubdata_limit,\n UNNEST($9::int[]) AS tx_format,\n UNNEST($10::integer[]) AS index_in_block,\n UNNEST($11::varchar[]) AS error,\n UNNEST($12::numeric[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::bigint[]) as refunded_gas,\n UNNEST($17::numeric[]) as value,\n UNNEST($18::bytea[]) as contract_address,\n UNNEST($19::bytea[]) as paymaster,\n UNNEST($20::bytea[]) as paymaster_input\n ) AS data_table_temp\n JOIN transactions ON transactions.initiator_address = data_table_temp.initiator_address\n AND transactions.nonce = data_table_temp.nonce\n ORDER BY transactions.hash\n ) AS data_table\n WHERE transactions.initiator_address=data_table.initiator_address\n AND transactions.nonce=data_table.nonce\n " + }, + "9aaf98668f384f634860c4acf793ff47be08975e5d09061cc26fd53dea249c55": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Text", + "Int4" + ] + } + }, + "query": "\n INSERT INTO scheduler_witness_jobs\n (l1_batch_number, scheduler_witness, scheduler_witness_blob_url, protocol_version, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, 'waiting_for_artifacts', now(), now())\n " + }, + "9b4d87f7d7cabe0d61f10d26bb856cce3dc7f36f521efbb6992d98937e5a91ba": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "id", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Text", + "Text", + "Text", + "Text", + "Bool", + "Text", + "Bytea", + "Bool" + ] + } + }, + "query": "\n INSERT INTO contract_verification_requests (\n contract_address,\n source_code,\n contract_name,\n zk_compiler_version,\n compiler_version,\n optimization_used,\n optimizer_mode,\n constructor_arguments,\n is_system,\n status,\n created_at,\n updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 'queued', now(), now())\n RETURNING id\n " + }, + "9b70e9039cdc1a8c8baf9220a9d42a9b1b209ce73f74cccb9e313bcacdc3daf3": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Int4", + "Bytea", + "Int4", + "Text", + "Int4" + ] + } + }, + "query": "\n INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, protocol_version, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, sequence_number) DO NOTHING\n " + }, + "9bf32ea710825c1f0560a7eaa89f8f097ad196755ba82d98a729a2b0d34e1aca": { + "describe": { + "columns": [ + { + "name": "successful_limit!", "ordinal": 0, "type_info": "Int8" }, { - "name": "circuit_id", + "name": "queued_limit!", "ordinal": 1, - "type_info": "Int2" + "type_info": "Int8" }, { - "name": "depth", + "name": "max_block!", "ordinal": 2, - "type_info": "Int4" + "type_info": "Int8" } ], - "nullable": [ - false, - false, - false - ], + "nullable": [ + null, + null, + null + ], "parameters": { "Left": [] } }, - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status='queued'\n WHERE (l1_batch_number, circuit_id, depth) IN\n (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth\n FROM prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON\n prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth, nawj.number_of_dependent_jobs\n HAVING COUNT(*) = nawj.number_of_dependent_jobs)\n RETURNING l1_batch_number, circuit_id, depth;\n " + "query": "\n SELECT\n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status NOT IN ('successful', 'skipped')\n ORDER BY l1_batch_number\n LIMIT 1) as \"successful_limit!\",\n \n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status <> 'queued'\n ORDER BY l1_batch_number DESC\n LIMIT 1) as \"queued_limit!\",\n\n (SELECT MAX(l1_batch_number) as \"max!\" FROM prover_jobs) as \"max_block!\"\n " }, - "8de48960815f48f5d66e82b770a2e0caee42261643ec535a8f21cba1b5d4f50d": { + "9c77342759fc71b12f05c2395ac36aabadab1fa64ff585d6349b8053300cf76c": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bool", + "Bytea", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int8" + ] + } + }, + "query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, commitment = $3, compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, updated_at = now() WHERE number = $14 AND hash IS NULL" + }, + "9feee3fd267dc4e58185aeae7cab798c03eefa69470e4b98716615cecf6c012a": { "describe": { "columns": [ { @@ -6776,240 +6507,158 @@ "type_info": "Int8" }, { - "name": "l1_batch_number", + "name": "contract_address", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "circuit_id", + "name": "source_code", "ordinal": 2, - "type_info": "Int2" + "type_info": "Text" }, { - "name": "closed_form_inputs_blob_url", + "name": "contract_name", "ordinal": 3, "type_info": "Text" }, { - "name": "attempts", + "name": "zk_compiler_version", "ordinal": 4, - "type_info": "Int2" + "type_info": "Text" }, { - "name": "status", + "name": "compiler_version", "ordinal": 5, "type_info": "Text" }, { - "name": "error", + "name": "optimization_used", "ordinal": 6, - "type_info": "Text" + "type_info": "Bool" }, { - "name": "created_at", + "name": "optimizer_mode", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "Text" }, { - "name": "updated_at", + "name": "constructor_arguments", "ordinal": 8, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "processing_started_at", + "name": "is_system", "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 10, - "type_info": "Time" - }, - { - "name": "is_blob_cleaned", - "ordinal": 11, "type_info": "Bool" - }, - { - "name": "number_of_basic_circuits", - "ordinal": 12, - "type_info": "Int4" } ], "nullable": [ false, false, false, - true, false, false, - true, false, false, true, - true, - true, - true + false, + false ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM leaf_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs_fri.*\n " - }, - "8fa1a390d7b11b60b3352fafc0a8a7fa15bc761b1bb902f5105fd66b2e3087f2": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ - "Int8" + "Interval" ] } }, - "query": "\n INSERT INTO scheduler_dependency_tracker_fri\n (l1_batch_number, status, created_at, updated_at)\n VALUES ($1, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number)\n DO UPDATE SET updated_at=now()\n " + "query": "UPDATE contract_verification_requests\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id FROM contract_verification_requests\n WHERE status = 'queued' OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n ORDER BY created_at\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used,\n optimizer_mode, constructor_arguments, is_system\n " }, - "8fe01036cac5181aabfdc06095da291c4de6b1e0f82f846c37509bb550ef544e": { + "a19b7137403c5cdf1be5f5122ce4d297ed661fa8bdb3bc91f8a81fe9da47469e": { "describe": { "columns": [ { - "name": "l1_address", + "name": "upgrade_tx_hash", "ordinal": 0, "type_info": "Bytea" } ], "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT l1_address FROM tokens WHERE well_known = false" - }, - "9008367aad7877f269b765c4d0772d0f60689fcde6987c620fe5749a259a8db7": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int4" - } - ], - "nullable": [ - false + true ], "parameters": { "Left": [ - "Int4", - "Int8", - "Int8", - "Text", - "Bytea" - ] - } - }, - "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING id" - }, - "908f10640f805957e3f77ed685a7170345d835166e1857c12d76c15b09dffff5": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Int4", - "Text", "Int4" ] } }, - "query": "INSERT INTO node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, depth, aggregations_url, number_of_dependent_jobs, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id, depth)\n DO UPDATE SET updated_at=now()" + "query": "\n SELECT upgrade_tx_hash FROM protocol_versions\n WHERE id = $1\n " }, - "91db60cc4f98ebcaef1435342607da0a86fe16e20a696cb81a569772d5d5ae88": { + "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { "describe": { "columns": [ { - "name": "value", + "name": "address", "ordinal": 0, "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - } - }, - "query": "\n SELECT value\n FROM storage_logs\n WHERE storage_logs.hashed_key = $1 AND storage_logs.miniblock_number <= $2\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n " - }, - "95ce099fde99c57a930ed3d44f74a90d632b831360210ec7fe21b33bed1a4582": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int4" }, { - "name": "nonce", + "name": "topic1", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "raw_tx", + "name": "topic2", "ordinal": 2, "type_info": "Bytea" }, { - "name": "contract_address", + "name": "topic3", "ordinal": 3, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "tx_type", + "name": "topic4", "ordinal": 4, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "gas_used", + "name": "value", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "block_hash", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "updated_at", + "name": "l1_batch_number?", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "Int8" }, { - "name": "has_failed", + "name": "miniblock_number", "ordinal": 8, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "sent_at_block", + "name": "tx_hash", "ordinal": 9, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "confirmed_eth_tx_history_id", + "name": "tx_index_in_block", "ordinal": 10, "type_info": "Int4" }, { - "name": "predicted_gas_cost", + "name": "event_index_in_block", "ordinal": 11, - "type_info": "Int8" + "type_info": "Int4" + }, + { + "name": "event_index_in_tx", + "ordinal": 12, + "type_info": "Int4" } ], "nullable": [ @@ -7018,155 +6667,100 @@ false, false, false, - true, + false, + null, + null, + false, false, false, false, - true, - true, false ], "parameters": { "Left": [ - "Bytea", - "Int8", - "Text", - "Text", - "Int8" - ] - } - }, - "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n RETURNING *" - }, - "95e0e783794ac55ab20b30366f037c313fb0d17e93d3e6ec60667ef1b4da30d5": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE prover_jobs\n SET is_blob_cleaned=TRUE\n WHERE id = ANY($1);\n " - }, - "96b1cd2bb6861064b633d597a4a09d279dbc7bcd7a810a7270da3d7941af0fff": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Bytea", "Bytea" ] } }, - "query": "SELECT COUNT(*) as \"count!\" FROM (SELECT * FROM storage_logs WHERE storage_logs.hashed_key = $1 ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC LIMIT 1) sl WHERE sl.value != $2" + "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, - "96f6d06a49646f93ba1918080ef1efba868d506c6b51ede981e610f1b57bf88b": { + "a3d526a5a341618e9784fc81626143a3174709483a527879254ff8e28f210ac3": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "ByteaArray" + "Int4", + "Int8", + "Int8" ] } }, - "query": "DELETE FROM storage WHERE hashed_key = ANY($1)" + "query": "UPDATE l1_batches SET eth_execute_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" }, - "9b4d87f7d7cabe0d61f10d26bb856cce3dc7f36f521efbb6992d98937e5a91ba": { + "a42626c162a0600b9c7d22dd0d7997fa70cc95296ecc185ff9ae2e03593b07bf": { "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Bytea", - "Text", - "Text", - "Text", - "Text", - "Bool", - "Text", - "Bytea", - "Bool" + "Int8" ] } }, - "query": "\n INSERT INTO contract_verification_requests (\n contract_address,\n source_code,\n contract_name,\n zk_compiler_version,\n compiler_version,\n optimization_used,\n optimizer_mode,\n constructor_arguments,\n is_system,\n status,\n created_at,\n updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 'queued', now(), now())\n RETURNING id\n " + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status='queued'\n WHERE l1_batch_number = $1\n AND status != 'successful'\n AND status != 'in_progress'\n " }, - "9bf32ea710825c1f0560a7eaa89f8f097ad196755ba82d98a729a2b0d34e1aca": { + "a4a14eb42b9acca3f93c67e5760ba700c333b5e9a38c132a3060a94c988e7f13": { "describe": { "columns": [ { - "name": "successful_limit!", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "queued_limit!", + "name": "received_at", "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "max_block!", - "ordinal": 2, - "type_info": "Int8" + "type_info": "Timestamp" } ], "nullable": [ - null, - null, - null + false, + false ], "parameters": { - "Left": [] + "Left": [ + "Timestamp", + "Int8" + ] } }, - "query": "\n SELECT\n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status NOT IN ('successful', 'skipped')\n ORDER BY l1_batch_number\n LIMIT 1) as \"successful_limit!\",\n \n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status <> 'queued'\n ORDER BY l1_batch_number DESC\n LIMIT 1) as \"queued_limit!\",\n\n (SELECT MAX(l1_batch_number) as \"max!\" FROM prover_jobs) as \"max_block!\"\n " + "query": "SELECT transactions.hash, transactions.received_at FROM transactions LEFT JOIN miniblocks ON miniblocks.number = miniblock_number WHERE received_at > $1 ORDER BY received_at ASC LIMIT $2" }, - "9c77342759fc71b12f05c2395ac36aabadab1fa64ff585d6349b8053300cf76c": { + "a5f23ec9759a7d8bc02125a67d6139bb885cc80225519346d4c7ecfe45c59704": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], "parameters": { "Left": [ "Bytea", "Bytea", "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bool", - "Bytea", - "Int8", - "Bytea", - "Bytea", - "Bytea", - "Int8" + "Bytea" ] } }, - "query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, commitment = $3, compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, updated_at = now() WHERE number = $14 AND hash IS NULL" + "query": "\n SELECT id\n FROM protocol_versions\n WHERE recursion_circuits_set_vks_hash = $1\n AND recursion_leaf_level_vk_hash = $2\n AND recursion_node_level_vk_hash = $3\n AND recursion_scheduler_level_vk_hash = $4\n " }, - "9fccfc087388898a7da57c88c3e14eb6623f90682abf43e293def3580ea1a8dd": { + "a8878258bac2876686f1218213457edd70652e8145743b6b44a846220829bbe2": { "describe": { "columns": [ { @@ -7249,140 +6843,100 @@ "ordinal": 15, "type_info": "Int4" }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, { "name": "merkle_root_hash", - "ordinal": 18, + "ordinal": 16, "type_info": "Bytea" }, { "name": "l2_to_l1_logs", - "ordinal": 19, + "ordinal": 17, "type_info": "ByteaArray" }, { "name": "l2_to_l1_messages", - "ordinal": 20, + "ordinal": 18, "type_info": "ByteaArray" }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, { "name": "used_contract_hashes", - "ordinal": 25, + "ordinal": 19, "type_info": "Jsonb" }, { "name": "compressed_initial_writes", - "ordinal": 26, + "ordinal": 20, "type_info": "Bytea" }, { "name": "compressed_repeated_writes", - "ordinal": 27, + "ordinal": 21, "type_info": "Bytea" }, { "name": "l2_l1_compressed_messages", - "ordinal": 28, + "ordinal": 22, "type_info": "Bytea" }, { "name": "l2_l1_merkle_root", - "ordinal": 29, + "ordinal": 23, "type_info": "Bytea" }, { "name": "l1_gas_price", - "ordinal": 30, + "ordinal": 24, "type_info": "Int8" }, { "name": "l2_fair_gas_price", - "ordinal": 31, + "ordinal": 25, "type_info": "Int8" }, { "name": "rollup_last_leaf_index", - "ordinal": 32, + "ordinal": 26, "type_info": "Int8" }, { "name": "zkporter_is_available", - "ordinal": 33, + "ordinal": 27, "type_info": "Bool" }, { "name": "bootloader_code_hash", - "ordinal": 34, + "ordinal": 28, "type_info": "Bytea" }, { "name": "default_aa_code_hash", - "ordinal": 35, + "ordinal": 29, "type_info": "Bytea" }, { "name": "base_fee_per_gas", - "ordinal": 36, + "ordinal": 30, "type_info": "Numeric" }, { "name": "aux_data_hash", - "ordinal": 37, + "ordinal": 31, "type_info": "Bytea" }, { "name": "pass_through_data_hash", - "ordinal": 38, + "ordinal": 32, "type_info": "Bytea" }, { "name": "meta_parameters_hash", - "ordinal": 39, + "ordinal": 33, "type_info": "Bytea" }, { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 41, + "name": "protocol_version", + "ordinal": 34, "type_info": "Int4" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 42, - "type_info": "Int8" } ], "nullable": [ @@ -7402,16 +6956,10 @@ true, true, true, - false, - false, true, false, false, false, - false, - false, - false, - false, true, true, true, @@ -7426,160 +6974,151 @@ true, true, true, - false, - true, - false + true ], "parameters": { "Left": [ - "Int8", + "Bytea", + "Bytea", + "Int4", "Int8" ] } }, - "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit FROM (SELECT l1_batches.*, row_number() OVER (ORDER BY number ASC) AS row_number FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND l1_batches.skip_proof = TRUE AND l1_batches.number > $1 ORDER BY number LIMIT $2) inn WHERE number - row_number = $1" + "query": "SELECT number, l1_batches.timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, protocol_version FROM l1_batches JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version WHERE eth_commit_tx_id IS NULL AND number != 0 AND protocol_versions.bootloader_code_hash = $1 AND protocol_versions.default_account_code_hash = $2 AND commitment IS NOT NULL AND (protocol_versions.id = $3 OR protocol_versions.upgrade_tx_hash IS NULL) ORDER BY number LIMIT $4" }, - "9feee3fd267dc4e58185aeae7cab798c03eefa69470e4b98716615cecf6c012a": { + "a9b1a31def214f8b1441dc3ab720bd270f3991c9f1c7528256276e176d532163": { "describe": { "columns": [ { - "name": "id", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "contract_address", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "source_code", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "contract_name", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "zk_compiler_version", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "compiler_version", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "optimization_used", - "ordinal": 6, - "type_info": "Bool" - }, + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1" + }, + "a9b7a880dbde4f7de5a6c2ff4009281527f2d01a547228981af3af2129ffb3f7": { + "describe": { + "columns": [ { - "name": "optimizer_mode", - "ordinal": 7, - "type_info": "Text" - }, + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Bytea", + "Numeric", + "Interval", + "Interval" + ] + } + }, + "query": "\n SELECT COUNT(*) as \"count!\" FROM tokens\n WHERE l2_address = $1 AND\n market_volume > $2 AND now() - market_volume_updated_at < $3 AND\n usd_price > 0 AND now() - usd_price_updated_at < $4\n " + }, + "a9d96d6774af2637173d471f02995652cd4c131c05fdcb3d0e1644bcd1aa1809": { + "describe": { + "columns": [ { - "name": "constructor_arguments", - "ordinal": 8, + "name": "proof", + "ordinal": 0, "type_info": "Bytea" }, - { - "name": "is_system", - "ordinal": 9, - "type_info": "Bool" + { + "name": "aggregation_result_coords", + "ordinal": 1, + "type_info": "Bytea" } ], "nullable": [ - false, - false, - false, - false, - false, - false, - false, true, - false, - false + true ], "parameters": { "Left": [ - "Interval" + "Int8", + "Int8" ] } }, - "query": "UPDATE contract_verification_requests\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id FROM contract_verification_requests\n WHERE status = 'queued' OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n ORDER BY created_at\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used,\n optimizer_mode, constructor_arguments, is_system\n " + "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n " }, - "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { + "aa1534f03679fd2d1d9e7c1da1f94cc0e2ec5fc3a0e1ac7137147533eacf0aaf": { "describe": { "columns": [ { - "name": "address", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic1", + "name": "nonce", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic2", + "name": "raw_tx", "ordinal": 2, "type_info": "Bytea" }, { - "name": "topic3", + "name": "contract_address", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "topic4", + "name": "tx_type", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "value", + "name": "gas_used", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "block_hash", + "name": "created_at", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l1_batch_number?", + "name": "updated_at", "ordinal": 7, - "type_info": "Int8" + "type_info": "Timestamp" }, { - "name": "miniblock_number", + "name": "has_failed", "ordinal": 8, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "tx_hash", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "tx_index_in_block", + "name": "confirmed_eth_tx_history_id", "ordinal": 10, "type_info": "Int4" }, { - "name": "event_index_in_block", + "name": "predicted_gas_cost", "ordinal": 11, - "type_info": "Int4" - }, - { - "name": "event_index_in_tx", - "ordinal": 12, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ @@ -7588,101 +7127,129 @@ false, false, false, - false, - null, - null, - false, + true, false, false, false, + true, + true, false ], "parameters": { "Left": [ - "Bytea" + "Int8" ] } }, - "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " + "query": "SELECT * FROM eth_txs \n WHERE id > (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history)\n ORDER BY id\n LIMIT $1\n " }, - "a3d526a5a341618e9784fc81626143a3174709483a527879254ff8e28f210ac3": { + "aa7ae476aed5979227887891e9be995924588aa10ccba7424d6ce58f811eaa02": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "number!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], "parameters": { - "Left": [ - "Int4", - "Int8", - "Int8" - ] + "Left": [] } }, - "query": "UPDATE l1_batches SET eth_execute_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + "query": "SELECT COALESCE(MAX(number), 0) AS \"number!\" FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL" }, - "a42626c162a0600b9c7d22dd0d7997fa70cc95296ecc185ff9ae2e03593b07bf": { + "aacaeff95b9a2988167dde78200d7139ba99edfa30dbcd8a7a57f72efc676477": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) WHERE commit_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" + }, + "ac35fb205c83d82d78983f4c9b47f56d3c91fbb2c95046555c7d60a9a2ebb446": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ + "ByteaArray", + "Int8Array", "Int8" ] } }, - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status='queued'\n WHERE l1_batch_number = $1\n AND status != 'successful'\n AND status != 'in_progress'\n " + "query": "INSERT INTO initial_writes (hashed_key, index, l1_batch_number, created_at, updated_at) SELECT u.hashed_key, u.index, $3, now(), now() FROM UNNEST($1::bytea[], $2::bigint[]) AS u(hashed_key, index)" }, - "a482c481a9ffaad4735775282cf6e8d68f284884e7c6f043e9737a0d236f2e97": { + "ad11ec3e628ae6c64ac160d8dd689b2f64033f620e17a31469788b3ce4968ad3": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic2!", + "name": "eth_tx_id", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic3!", + "name": "tx_hash", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "value!", + "name": "created_at", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l1_address!", + "name": "updated_at", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_address!", + "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "symbol!", + "name": "priority_fee_per_gas", "ordinal": 6, - "type_info": "Varchar" + "type_info": "Int8" }, { - "name": "name!", + "name": "confirmed_at", "ordinal": 7, - "type_info": "Varchar" + "type_info": "Timestamp" }, { - "name": "decimals!", + "name": "signed_raw_tx", "ordinal": 8, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "usd_price?", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Numeric" + "type_info": "Int4" + }, + { + "name": "sent_at", + "ordinal": 10, + "type_info": "Timestamp" } ], "nullable": [ @@ -7693,99 +7260,184 @@ false, false, false, - false, - false, + true, + true, + true, + true + ], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC LIMIT 1" + }, + "ad4f74aa6f131df0243f4fa500ade1b98aa335bd71ed417b02361e2c697e60f8": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Int8" + ] + } + }, + "query": "\n UPDATE scheduler_witness_jobs\n SET aggregation_result_coords = $1,\n updated_at = now()\n WHERE l1_batch_number = $2\n " + }, + "adc9ad2c944f9dacc28b5bd133aa37d9e8ea99eca1c5dfbeef37cda4b793f434": { + "describe": { + "columns": [ + { + "name": "market_volume", + "ordinal": 0, + "type_info": "Numeric" + }, + { + "name": "market_volume_updated_at", + "ordinal": 1, + "type_info": "Timestamp" + } + ], + "nullable": [ + true, + true + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1" + }, + "ae072f51b65d0b5212264be9a34027922e5aedef7e4741517ad8104bf5aa79e9": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "DELETE FROM factory_deps WHERE miniblock_number > $1" + }, + "aea4e8d1b018836973d252df943a2c1988dd5f3ffc629064b87d25af8cdb8638": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_tx_index", + "ordinal": 1, + "type_info": "Int4" + } + ], + "nullable": [ + true, true ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", - "Bytea" + "Bytea" + ] + } + }, + "query": "SELECT l1_batch_number, l1_batch_tx_index FROM transactions WHERE hash = $1" + }, + "af22ad34bde12b8d25eb85da9939d12b7bed6407d732b868eeaf2916568c8646": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", + "Int8" ] } }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE l1_batch_number = $2\n " }, - "a4a14eb42b9acca3f93c67e5760ba700c333b5e9a38c132a3060a94c988e7f13": { + "af75db6b7e42b73ce62b28a7281e1bfa181ee0c80a85d7d8078831db5dcdb699": { "describe": { "columns": [ { - "name": "hash", + "name": "l1_block_number", "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "received_at", - "ordinal": 1, - "type_info": "Timestamp" + "type_info": "Int4" } ], "nullable": [ - false, - false + true ], "parameters": { - "Left": [ - "Timestamp", - "Int8" - ] + "Left": [] } }, - "query": "SELECT transactions.hash, transactions.received_at FROM transactions LEFT JOIN miniblocks ON miniblocks.number = miniblock_number WHERE received_at > $1 ORDER BY received_at ASC LIMIT $2" + "query": "SELECT l1_block_number FROM transactions\n WHERE priority_op_id IS NOT NULL\n ORDER BY priority_op_id DESC\n LIMIT 1" }, - "a7d575d90f9bf19427ddbe342d296effb7c38bc90f213aa1cc94523930dd8f15": { + "b1478907214ad20dddd4f3846fba4b0ddf1fff63ddb3b95c8999635e77c8b863": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "l1_sender!", + "name": "eth_tx_id", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic2!", + "name": "tx_hash", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "value!", + "name": "created_at", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l1_address!", + "name": "updated_at", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_address!", + "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "symbol!", + "name": "priority_fee_per_gas", "ordinal": 6, - "type_info": "Varchar" + "type_info": "Int8" }, { - "name": "name!", + "name": "confirmed_at", "ordinal": 7, - "type_info": "Varchar" + "type_info": "Timestamp" }, { - "name": "decimals!", + "name": "signed_raw_tx", "ordinal": 8, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "usd_price?", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Numeric" + "type_info": "Int4" + }, + { + "name": "sent_at", + "ordinal": 10, + "type_info": "Timestamp" } ], "nullable": [ @@ -7796,25 +7448,37 @@ false, false, false, - false, - false, + true, + true, + true, true ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", - "Bytea" + "Int4" ] } }, - "query": "\n SELECT events.tx_hash, transactions.initiator_address as \"l1_sender!\", events.topic2 as \"topic2!\", events.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n INNER JOIN transactions ON transactions.hash = events.tx_hash\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, events.miniblock_number ASC, event_index_in_block ASC\n " + "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC" }, - "a9b1a31def214f8b1441dc3ab720bd270f3991c9f1c7528256276e176d532163": { + "b14997f84d11d7eea89168383195c5579eed1c57bb2b416a749e2863ae6594a5": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE id = $2\n " + }, + "b18718ad56a6b543df407d4cc5094ff4b1f26a407b07b97c3eee2b2fbf787c76": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "timestamp", "ordinal": 0, "type_info": "Int8" } @@ -7824,63 +7488,115 @@ ], "parameters": { "Left": [ - "Bytea" + "Int8", + "Int8" ] } }, - "query": "SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1" + "query": "SELECT timestamp FROM miniblocks WHERE number BETWEEN $1 AND $2 ORDER BY number" }, - "a9b7a880dbde4f7de5a6c2ff4009281527f2d01a547228981af3af2129ffb3f7": { + "b479b7d3334f8d4566c294a44e2adb282fbc66a87be5c248c65211c2a8a07db0": { "describe": { "columns": [ { - "name": "count!", + "name": "number", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 1, + "type_info": "Bytea" } ], "nullable": [ - null + false, + false ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT number, hash FROM miniblocks WHERE number > $1 ORDER BY number ASC LIMIT $2" + }, + "b4c576db7c762103dc6700ded458e996d2e9ef670d7b58b181dbfab02fa426ce": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ + "Bytea", "Bytea", "Numeric", - "Interval", - "Interval" + "Numeric", + "Numeric", + "Jsonb", + "Int8", + "Numeric", + "Numeric", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Timestamp" ] } }, - "query": "\n SELECT COUNT(*) as \"count!\" FROM tokens\n WHERE l2_address = $1 AND\n market_volume > $2 AND now() - market_volume_updated_at < $3 AND\n usd_price > 0 AND now() - usd_price_updated_at < $4\n " + "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, $17, $18, now(), now()\n )\n ON CONFLICT (hash) DO NOTHING\n " }, - "a9d96d6774af2637173d471f02995652cd4c131c05fdcb3d0e1644bcd1aa1809": { + "b4da918ee3b36b56d95c8834edebe65eb48ebb8270fa1e6ccf73ad354fd71134": { "describe": { "columns": [ { - "name": "proof", + "name": "l1_address", "ordinal": 0, "type_info": "Bytea" }, { - "name": "aggregation_result_coords", + "name": "l2_address", "ordinal": 1, "type_info": "Bytea" } ], "nullable": [ - true, - true + false, + false ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_address, l2_address FROM tokens WHERE well_known = true" + }, + "b58a33c9c056b58e597a888fbaacb309520dff728ea65f8b7f756ca185f4ae57": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ + "Int4", "Int8", - "Int8" + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea" ] } }, - "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n " + "query": "INSERT INTO protocol_versions\n (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash,\n recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, bootloader_code_hash,\n default_account_code_hash, verifier_address, upgrade_tx_hash, created_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, now())\n " }, - "aa1534f03679fd2d1d9e7c1da1f94cc0e2ec5fc3a0e1ac7137147533eacf0aaf": { + "b6c8e0827b2389a14433c031332962495311562ae9652ae7e9409a4bf48dc55b": { "describe": { "columns": [ { @@ -7959,106 +7675,293 @@ false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "SELECT * FROM eth_txs \n WHERE id > (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history)\n ORDER BY id\n LIMIT $1\n " + "query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id" }, - "aa7ae476aed5979227887891e9be995924588aa10ccba7424d6ce58f811eaa02": { + "b79f02c8663c6b99d0aa46b430de32103afa0333e8293cf8661cfc1c3f9fc12e": { "describe": { "columns": [ { - "name": "number!", + "name": "id", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "contract_address", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "source_code", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "contract_name", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "zk_compiler_version", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "compiler_version", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "optimization_used", + "ordinal": 6, + "type_info": "Bool" + }, + { + "name": "optimizer_mode", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "constructor_arguments", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "is_system", + "ordinal": 9, + "type_info": "Bool" } ], "nullable": [ - null + false, + false, + false, + false, + false, + false, + false, + true, + false, + false ], "parameters": { "Left": [] } }, - "query": "SELECT COALESCE(MAX(number), 0) AS \"number!\" FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL" + "query": "SELECT id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used,\n optimizer_mode, constructor_arguments, is_system\n FROM contract_verification_requests\n WHERE status = 'successful'\n ORDER BY id" }, - "aacaeff95b9a2988167dde78200d7139ba99edfa30dbcd8a7a57f72efc676477": { + "b7ab3aeee71e87c7469428ec411b410d81282ff6fed63fe5cda0e81a330d2ac5": { "describe": { "columns": [ { - "name": "number", + "name": "id", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" } ], "nullable": [ + false, + false, false ], "parameters": { - "Left": [] + "Left": [ + "Interval", + "Int2" + ] } }, - "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) WHERE commit_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " }, - "ad11ec3e628ae6c64ac160d8dd689b2f64033f620e17a31469788b3ce4968ad3": { + "b7c3d8606c77f78897763bc8c77b7bc85ce1daf8d079402eb20dfc0a3f164834": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "eth_tx_id", + "name": "timestamp", "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "tx_hash", + "name": "is_finished", "ordinal": 2, - "type_info": "Text" + "type_info": "Bool" }, { - "name": "created_at", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "updated_at", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "base_fee_per_gas", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "priority_fee_per_gas", + "name": "bloom", "ordinal": 6, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "confirmed_at", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "ByteaArray" }, { - "name": "signed_raw_tx", + "name": "hash", "ordinal": 8, "type_info": "Bytea" }, { - "name": "sent_at_block", + "name": "parent_hash", "ordinal": 9, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "sent_at", + "name": "commitment", "ordinal": 10, - "type_info": "Timestamp" + "type_info": "Bytea" + }, + { + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, + { + "name": "eth_prove_tx_id", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "merkle_root_hash", + "ordinal": 16, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 17, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 18, + "type_info": "ByteaArray" + }, + { + "name": "used_contract_hashes", + "ordinal": 19, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 20, + "type_info": "Bytea" + }, + { + "name": "compressed_repeated_writes", + "ordinal": 21, + "type_info": "Bytea" + }, + { + "name": "l2_l1_compressed_messages", + "ordinal": 22, + "type_info": "Bytea" + }, + { + "name": "l2_l1_merkle_root", + "ordinal": 23, + "type_info": "Bytea" + }, + { + "name": "l1_gas_price", + "ordinal": 24, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 25, + "type_info": "Int8" + }, + { + "name": "rollup_last_leaf_index", + "ordinal": 26, + "type_info": "Int8" + }, + { + "name": "zkporter_is_available", + "ordinal": 27, + "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 28, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 29, + "type_info": "Bytea" + }, + { + "name": "base_fee_per_gas", + "ordinal": 30, + "type_info": "Numeric" + }, + { + "name": "aux_data_hash", + "ordinal": 31, + "type_info": "Bytea" + }, + { + "name": "pass_through_data_hash", + "ordinal": 32, + "type_info": "Bytea" + }, + { + "name": "meta_parameters_hash", + "ordinal": 33, + "type_info": "Bytea" + }, + { + "name": "protocol_version", + "ordinal": 34, + "type_info": "Int4" } ], "nullable": [ @@ -8069,71 +7972,44 @@ false, false, false, + false, true, true, true, - true - ], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC LIMIT 1" - }, - "ad4f74aa6f131df0243f4fa500ade1b98aa335bd71ed417b02361e2c697e60f8": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - } - }, - "query": "\n UPDATE scheduler_witness_jobs\n SET aggregation_result_coords = $1,\n updated_at = now()\n WHERE l1_batch_number = $2\n " - }, - "adc9ad2c944f9dacc28b5bd133aa37d9e8ea99eca1c5dfbeef37cda4b793f434": { - "describe": { - "columns": [ - { - "name": "market_volume", - "ordinal": 0, - "type_info": "Numeric" - }, - { - "name": "market_volume_updated_at", - "ordinal": 1, - "type_info": "Timestamp" - } - ], - "nullable": [ true, - true - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1" - }, - "ae072f51b65d0b5212264be9a34027922e5aedef7e4741517ad8104bf5aa79e9": { - "describe": { - "columns": [], - "nullable": [], + true, + true, + true, + true, + true, + false, + false, + false, + true, + true, + true, + true, + false, + false, + true, + true, + true, + true, + false, + true, + true, + true, + true + ], "parameters": { "Left": [ "Int8" ] } }, - "query": "DELETE FROM factory_deps WHERE miniblock_number > $1" + "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, protocol_version FROM l1_batches WHERE number = $1" }, - "aea4e8d1b018836973d252df943a2c1988dd5f3ffc629064b87d25af8cdb8638": { + "b7d3b30bff2ed9aabcdaed89ebfd1f0303b70c6d5483ff9183475bb232a04f21": { "describe": { "columns": [ { @@ -8142,111 +8018,107 @@ "type_info": "Int8" }, { - "name": "l1_batch_tx_index", + "name": "status", "ordinal": 1, - "type_info": "Int4" + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" } ], "nullable": [ - true, - true + false, + false, + false ], "parameters": { "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT l1_batch_number, l1_batch_tx_index FROM transactions WHERE hash = $1" - }, - "af22ad34bde12b8d25eb85da9939d12b7bed6407d732b868eeaf2916568c8646": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Time", - "Int8" + "Interval", + "Int2" ] } }, - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE l1_batch_number = $2\n " + "query": "\n UPDATE witness_inputs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING l1_batch_number, status, attempts\n " }, - "af75db6b7e42b73ce62b28a7281e1bfa181ee0c80a85d7d8078831db5dcdb699": { + "b944df7af612ec911170a43be846eb2f6e27163b0d3983672de2b8d5d60af640": { "describe": { "columns": [ { - "name": "l1_block_number", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ - true + false ], "parameters": { - "Left": [] + "Left": [ + "Interval" + ] } }, - "query": "SELECT l1_block_number FROM transactions\n WHERE priority_op_id IS NOT NULL\n ORDER BY priority_op_id DESC\n LIMIT 1" + "query": "UPDATE proof_generation_details SET status = 'picked_by_prover', updated_at = now(), prover_taken_at = now() WHERE l1_batch_number = ( SELECT l1_batch_number FROM proof_generation_details WHERE status = 'ready_to_be_proven' OR (status = 'picked_by_prover' AND prover_taken_at < now() - $1::interval) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE SKIP LOCKED ) RETURNING proof_generation_details.l1_batch_number" }, - "b1478907214ad20dddd4f3846fba4b0ddf1fff63ddb3b95c8999635e77c8b863": { + "bc360f5148a0a8ddb2475068b651781873f757cf46035e0f05cf420f34c738c6": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "eth_tx_id", + "name": "timestamp", "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "tx_hash", + "name": "hash", "ordinal": 2, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "updated_at", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Int4" }, { "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Int8" + "type_info": "Numeric" }, { - "name": "priority_fee_per_gas", + "name": "l1_gas_price", "ordinal": 6, "type_info": "Int8" }, { - "name": "confirmed_at", + "name": "l2_fair_gas_price", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "Int8" }, { - "name": "signed_raw_tx", + "name": "bootloader_code_hash", "ordinal": 8, "type_info": "Bytea" }, { - "name": "sent_at_block", + "name": "default_aa_code_hash", "ordinal": 9, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "sent_at", + "name": "protocol_version", "ordinal": 10, - "type_info": "Timestamp" + "type_info": "Int4" } ], "nullable": [ @@ -8257,42 +8129,52 @@ false, false, false, - true, + false, true, true, true ], "parameters": { "Left": [ - "Int4" + "Int8" ] } }, - "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC" + "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version FROM miniblocks WHERE number = $1" }, - "b14997f84d11d7eea89168383195c5579eed1c57bb2b416a749e2863ae6594a5": { + "be824de76050461afe29dfd229e524bdf113eab3ca24208782c200531db1c940": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], "parameters": { "Left": [ - "Text", - "Int8" + "Int8", + "Int2", + "Int2", + "Int4" ] } }, - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE id = $2\n " + "query": "\n SELECT id from prover_jobs_fri\n WHERE l1_batch_number = $1\n AND circuit_id = $2\n AND aggregation_round = $3\n AND depth = $4\n AND status = 'successful'\n ORDER BY sequence_number ASC;\n " }, - "b479b7d3334f8d4566c294a44e2adb282fbc66a87be5c248c65211c2a8a07db0": { + "bef58e581dd0b658350dcdc15ebf7cf350cf088b60c916a15889e31ee7534907": { "describe": { "columns": [ { - "name": "number", + "name": "bytecode", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "hash", + "name": "bytecode_hash", "ordinal": 1, "type_info": "Bytea" } @@ -8303,219 +8185,126 @@ ], "parameters": { "Left": [ - "Int8", - "Int8" + "ByteaArray" ] } }, - "query": "SELECT number, hash FROM miniblocks WHERE number > $1 ORDER BY number ASC LIMIT $2" + "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)" }, - "b4c576db7c762103dc6700ded458e996d2e9ef670d7b58b181dbfab02fa426ce": { + "c115b25ea0d6b33331d1737cbc4e37ed44c466782d25f3d9c5519dd886f103ee": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Jsonb", - "Int8", - "Numeric", - "Numeric", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Timestamp" + "TextArray", + "Text" ] } }, - "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, $17, $18, now(), now()\n )\n ON CONFLICT (hash) DO NOTHING\n " + "query": "\n INSERT INTO compiler_versions (version, compiler, created_at, updated_at)\n SELECT u.version, $2, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)" }, - "b4cd15d430b423cd5bad80199abf0f67c698ca469e55557f20d5c7460ed40b0d": { + "c2cf96a9eb6893c5ba7d9e5418d9f24084ccd87980cb6ee05de1b3bde5c654bd": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Text", - "Int4", - "Bytea", - "Int4", - "Text" + "ByteaArray", + "ByteaArray" ] } }, - "query": "\n INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, sequence_number) DO NOTHING\n " - }, - "b4da918ee3b36b56d95c8834edebe65eb48ebb8270fa1e6ccf73ad354fd71134": { - "describe": { - "columns": [ - { - "name": "l1_address", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "l2_address", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT l1_address, l2_address FROM tokens WHERE well_known = true" + "query": "\n INSERT INTO call_traces (tx_hash, call_trace)\n SELECT u.tx_hash, u.call_trace\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(tx_hash, call_trace)\n " }, - "b6c8e0827b2389a14433c031332962495311562ae9652ae7e9409a4bf48dc55b": { + "c49a6925e9462cc85a6e1cc850f2e147e0a5d990efed56f27792698e6cf9ff0c": { "describe": { "columns": [ { - "name": "id", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Int4" - }, - { - "name": "nonce", - "ordinal": 1, "type_info": "Int8" }, { - "name": "raw_tx", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "contract_address", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "tx_type", - "ordinal": 4, + "name": "status", + "ordinal": 1, "type_info": "Text" }, { - "name": "gas_used", - "ordinal": 5, - "type_info": "Int8" - }, - { - "name": "created_at", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "has_failed", - "ordinal": 8, - "type_info": "Bool" - }, - { - "name": "sent_at_block", - "ordinal": 9, - "type_info": "Int4" - }, - { - "name": "confirmed_eth_tx_history_id", - "ordinal": 10, - "type_info": "Int4" - }, - { - "name": "predicted_gas_cost", - "ordinal": 11, - "type_info": "Int8" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - true, - false, + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + } + ], + "nullable": [ false, false, - true, - true, false ], "parameters": { - "Left": [] + "Left": [ + "Interval", + "Int2" + ] } }, - "query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id" + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING l1_batch_number, status, attempts\n " }, - "b79f02c8663c6b99d0aa46b430de32103afa0333e8293cf8661cfc1c3f9fc12e": { + "c604ee1dd86ac154d67ddb339da5f65ca849887d6a1068623e874f9df00cfdd1": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "ByteaArray", + "Int4Array", + "VarcharArray", + "JsonbArray", + "Int8Array", + "NumericArray" + ] + } + }, + "query": "\n UPDATE transactions\n SET\n miniblock_number = $1,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n in_mempool=FALSE,\n execution_info = execution_info || data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n effective_gas_price = data_table.effective_gas_price,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($2::bytea[]) AS hash,\n UNNEST($3::integer[]) AS index_in_block,\n UNNEST($4::varchar[]) AS error,\n UNNEST($5::jsonb[]) AS new_execution_info,\n UNNEST($6::bigint[]) as refunded_gas,\n UNNEST($7::numeric[]) as effective_gas_price\n ) AS data_table\n WHERE transactions.hash = data_table.hash\n " + }, + "c6109267f85f38edcd53f361cf2654f43fa45928e39324cfab8389453b4e7031": { "describe": { "columns": [ { "name": "id", "ordinal": 0, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "contract_address", + "name": "eth_tx_id", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "source_code", + "name": "tx_hash", "ordinal": 2, "type_info": "Text" }, { - "name": "contract_name", + "name": "base_fee_per_gas", "ordinal": 3, - "type_info": "Text" + "type_info": "Int8" }, { - "name": "zk_compiler_version", + "name": "priority_fee_per_gas", "ordinal": 4, - "type_info": "Text" + "type_info": "Int8" }, { - "name": "compiler_version", + "name": "signed_raw_tx", "ordinal": 5, - "type_info": "Text" - }, - { - "name": "optimization_used", - "ordinal": 6, - "type_info": "Bool" - }, - { - "name": "optimizer_mode", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "constructor_arguments", - "ordinal": 8, "type_info": "Bytea" }, { - "name": "is_system", - "ordinal": 9, - "type_info": "Bool" + "name": "nonce", + "ordinal": 6, + "type_info": "Int8" } ], "nullable": [ @@ -8524,19 +8313,16 @@ false, false, false, - false, - false, true, - false, false ], "parameters": { "Left": [] } }, - "query": "SELECT id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used,\n optimizer_mode, constructor_arguments, is_system\n FROM contract_verification_requests\n WHERE status = 'successful'\n ORDER BY id" + "query": "\n SELECT \n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM eth_txs_history \n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id \n WHERE eth_txs_history.sent_at_block IS NULL AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY eth_txs_history.id DESC" }, - "b7ab3aeee71e87c7469428ec411b410d81282ff6fed63fe5cda0e81a330d2ac5": { + "c66b0e0867a1a634f984645ca576a6502b51b67aa0be2dae98e0e2adeb450963": { "describe": { "columns": [ { @@ -8552,7 +8338,7 @@ { "name": "attempts", "ordinal": 2, - "type_info": "Int2" + "type_info": "Int4" } ], "nullable": [ @@ -8563,108 +8349,250 @@ "parameters": { "Left": [ "Interval", - "Int2" + "Int4" ] } }, - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " + "query": "\n UPDATE prover_jobs\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " }, - "b7d3b30bff2ed9aabcdaed89ebfd1f0303b70c6d5483ff9183475bb232a04f21": { + "c6aadc4ec78e30f5775f7a9f866ad02984b78de3e3d1f34c144a4057ff44ea6a": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "count", "ordinal": 0, "type_info": "Int8" - }, + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT COUNT(*) FROM eth_txs WHERE has_failed = TRUE" + }, + "c6cdc9ef18fe20ef530b653c0c24c674dd74aef3701bfb5c6db23d649115f1d4": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", + "Int8" + ] + } + }, + "query": "\n UPDATE witness_inputs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE l1_batch_number = $2\n " + }, + "c766f2ee9e3054ba337873ba5ebb26d4f1a43691664372152e5eb782391f9f68": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8Array" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " + }, + "c8125b30eb64eebfa4500dc623972bf8771a83b218bd18a51e633d4cf4bf8eb3": { + "describe": { + "columns": [ { - "name": "status", - "ordinal": 1, - "type_info": "Text" + "name": "bytecode", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + } + }, + "query": "\n SELECT bytecode FROM (\n SELECT * FROM storage_logs\n WHERE\n storage_logs.hashed_key = $1 AND\n storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE value != $3\n " + }, + "c849561f88c775f2cce4d59387916793ba1623a8a714b415375477e090d86bd3": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4" + ] + } + }, + "query": "UPDATE eth_txs\n SET gas_used = $1, confirmed_eth_tx_history_id = $2\n WHERE id = $3" + }, + "c881cd7018a9f714cdc3388936e363d49bd6ae52467d382d2f2250ab4f11acf9": { + "describe": { + "columns": [ + { + "name": "address", + "ordinal": 0, + "type_info": "Bytea" }, { - "name": "attempts", - "ordinal": 2, - "type_info": "Int2" + "name": "key", + "ordinal": 1, + "type_info": "Bytea" } ], "nullable": [ - false, false, false ], "parameters": { "Left": [ - "Interval", - "Int2" + "Int8" ] } }, - "query": "\n UPDATE witness_inputs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING l1_batch_number, status, attempts\n " + "query": "SELECT address, key FROM protective_reads WHERE l1_batch_number = $1" }, - "be824de76050461afe29dfd229e524bdf113eab3ca24208782c200531db1c940": { + "c891770305cb3aba4021738e60567d977eac54435c871b5178de7c3c96d2f721": { "describe": { "columns": [ { - "name": "id", + "name": "usd_price", "ordinal": 0, - "type_info": "Int8" + "type_info": "Numeric" + }, + { + "name": "usd_price_updated_at", + "ordinal": 1, + "type_info": "Timestamp" } ], "nullable": [ - false + true, + true ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1" + }, + "c92a84c15a8641f73417a03de99a0fb7e07fd0da7b376e65b3ed61209e55a5fa": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8Array" + ] + } + }, + "query": "UPDATE witness_inputs SET is_blob_cleaned = TRUE WHERE l1_batch_number = ANY($1)" + }, + "ca8fa3521dab5ee985a837572e8625bd5b26bf79f58950698218b28110c29d1f": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int4", + "Int4", + "Int2", + "Text", + "Text", + "Int2" + ] + } + }, + "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, zone, num_gpu, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, $6, $7, now(), now())\n ON CONFLICT(instance_host, instance_port, region, zone)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, zone=$6, num_gpu=$7, updated_at=now()" + }, + "ce3666b149f7fc62a68139a8efb83ed149c7deace17b8968817941763e45a147": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + } + }, + "query": "\n DELETE FROM tokens \n WHERE l2_address IN\n (\n SELECT substring(key, 12, 20) FROM storage_logs \n WHERE storage_logs.address = $1 AND miniblock_number > $2 AND NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $3\n )\n )\n " + }, + "cea77fbe02853a7a9b1f7b5ddf2957cb23212ae5ef0f889834d796c35b583542": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "DELETE FROM miniblocks WHERE number > $1" + }, + "cf9a49dd3ef67b3515e411fd0daadd667af9a4451390b3ef47fe9f902ee9f4e2": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ "Int8", - "Int2", - "Int2", - "Int4" + "Text", + "Jsonb", + "Text" ] } }, - "query": "\n SELECT id from prover_jobs_fri\n WHERE l1_batch_number = $1\n AND circuit_id = $2\n AND aggregation_round = $3\n AND depth = $4\n AND status = 'successful'\n ORDER BY sequence_number ASC;\n " + "query": "\n UPDATE contract_verification_requests\n SET status = 'failed', updated_at = now(), error = $2, compilation_errors = $3, panic_message = $4\n WHERE id = $1\n " }, - "bef58e581dd0b658350dcdc15ebf7cf350cf088b60c916a15889e31ee7534907": { + "d0ff67e7c59684a0e4409726544cf850dbdbb36d038ebbc6a1c5bf0e76b0358c": { "describe": { "columns": [ { - "name": "bytecode", + "name": "count!", "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "bytecode_hash", - "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ - false, - false + null ], "parameters": { - "Left": [ - "ByteaArray" - ] + "Left": [] } }, - "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)" + "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches" }, - "c115b25ea0d6b33331d1737cbc4e37ed44c466782d25f3d9c5519dd886f103ee": { + "d11ff84327058721c3c36bc3371c3139f41e2a2255f64bbc5108c1876848d8bb": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "TextArray", + "Text", + "Text", + "Int4", + "Int4", + "Text", "Text" ] } }, - "query": "\n INSERT INTO compiler_versions (version, compiler, created_at, updated_at)\n SELECT u.version, $2, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)" + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = $1, updated_at = now(), queue_free_slots = $4\n WHERE instance_host = $2::text::inet\n AND instance_port = $3\n AND region = $5\n AND zone = $6\n " }, - "c1a4eb25f5493fbcc1b6d61bd7f2e74797a83b7eb0900ba16f3c3ca38f824563": { + "d5dea31f2a325bb44e8ef2cbbabbeb73fd6996a3e6cb99d62c6b97a4aa49c1ca": { "describe": { "columns": [ { @@ -8747,295 +8675,100 @@ "ordinal": 15, "type_info": "Int4" }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, { "name": "merkle_root_hash", - "ordinal": 18, + "ordinal": 16, "type_info": "Bytea" }, { "name": "l2_to_l1_logs", - "ordinal": 19, + "ordinal": 17, "type_info": "ByteaArray" }, { "name": "l2_to_l1_messages", - "ordinal": 20, + "ordinal": 18, "type_info": "ByteaArray" }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, { "name": "used_contract_hashes", - "ordinal": 25, + "ordinal": 19, "type_info": "Jsonb" }, { "name": "compressed_initial_writes", - "ordinal": 26, + "ordinal": 20, "type_info": "Bytea" }, { "name": "compressed_repeated_writes", - "ordinal": 27, + "ordinal": 21, "type_info": "Bytea" }, { "name": "l2_l1_compressed_messages", - "ordinal": 28, + "ordinal": 22, "type_info": "Bytea" }, { "name": "l2_l1_merkle_root", - "ordinal": 29, - "type_info": "Bytea" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" - }, - { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" - }, - { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" - }, - { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" - }, - { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, + "ordinal": 23, "type_info": "Bytea" }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, { "name": "l1_gas_price", - "ordinal": 41, + "ordinal": 24, "type_info": "Int8" }, { "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - true, - true, - false, - false, - false - ], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id = $1 OR eth_prove_tx_id = $1 OR eth_execute_tx_id = $1" - }, - "c2cf96a9eb6893c5ba7d9e5418d9f24084ccd87980cb6ee05de1b3bde5c654bd": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray" - ] - } - }, - "query": "\n INSERT INTO call_traces (tx_hash, call_trace)\n SELECT u.tx_hash, u.call_trace\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(tx_hash, call_trace)\n " - }, - "c321d1210799dfd29e54f18f3a3698e9bf288850f2dbd782e817d1cfd9165b16": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, + "ordinal": 25, "type_info": "Int8" }, { - "name": "l1_batch_number", - "ordinal": 1, + "name": "rollup_last_leaf_index", + "ordinal": 26, "type_info": "Int8" }, { - "name": "circuit_type", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "prover_input", - "ordinal": 3, - "type_info": "Bytea" - }, - { - "name": "status", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "error", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "processing_started_at", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "created_at", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 8, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 9, - "type_info": "Time" + "name": "zkporter_is_available", + "ordinal": 27, + "type_info": "Bool" }, { - "name": "aggregation_round", - "ordinal": 10, - "type_info": "Int4" + "name": "bootloader_code_hash", + "ordinal": 28, + "type_info": "Bytea" }, { - "name": "result", - "ordinal": 11, + "name": "default_aa_code_hash", + "ordinal": 29, "type_info": "Bytea" }, { - "name": "sequence_number", - "ordinal": 12, - "type_info": "Int4" + "name": "base_fee_per_gas", + "ordinal": 30, + "type_info": "Numeric" }, { - "name": "attempts", - "ordinal": 13, - "type_info": "Int4" + "name": "aux_data_hash", + "ordinal": 31, + "type_info": "Bytea" }, { - "name": "circuit_input_blob_url", - "ordinal": 14, - "type_info": "Text" + "name": "pass_through_data_hash", + "ordinal": 32, + "type_info": "Bytea" }, { - "name": "proccesed_by", - "ordinal": 15, - "type_info": "Text" + "name": "meta_parameters_hash", + "ordinal": 33, + "type_info": "Bytea" }, { - "name": "is_blob_cleaned", - "ordinal": 16, - "type_info": "Bool" + "name": "protocol_version", + "ordinal": 34, + "type_info": "Int4" } ], "nullable": [ @@ -9044,166 +8777,120 @@ false, false, false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, true, true, false, false, false, - false, + true, + true, + true, true, false, false, true, true, - false + true, + true, + false, + true, + true, + true, + true ], "parameters": { "Left": [ - "TextArray" + "Int8", + "Int8" + ] + } + }, + "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, protocol_version FROM (SELECT l1_batches.*, row_number() OVER (ORDER BY number ASC) AS row_number FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND l1_batches.skip_proof = TRUE AND l1_batches.number > $1 ORDER BY number LIMIT $2) inn WHERE number - row_number = $1" + }, + "d6709f3ce8f08f988e10a0e0fb5c06db9488834a85066babaf3d56cf212b4ea0": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Varchar", + "Varchar", + "Int4" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE circuit_type = ANY($1)\n AND status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " + "query": "UPDATE tokens SET token_list_name = $2, token_list_symbol = $3,\n token_list_decimals = $4, well_known = true, updated_at = now()\n WHERE l1_address = $1\n " }, - "c49a6925e9462cc85a6e1cc850f2e147e0a5d990efed56f27792698e6cf9ff0c": { + "d8515595d34dca53e50bbd4ed396f6208e33f596195a5ed02fba9e8364ceb33c": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "bytecode", "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "status", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "attempts", - "ordinal": 2, - "type_info": "Int2" + "type_info": "Bytea" } ], "nullable": [ - false, - false, false ], "parameters": { "Left": [ - "Interval", - "Int2" + "Bytea" ] } }, - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING l1_batch_number, status, attempts\n " + "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1" }, - "c604ee1dd86ac154d67ddb339da5f65ca849887d6a1068623e874f9df00cfdd1": { + "d8e0bb1a349523077356be101808340eab078979390af7d26c71489b5f303d1b": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "ByteaArray", - "Int4Array", - "VarcharArray", - "JsonbArray", - "Int8Array", - "NumericArray" + "Int8" ] } }, - "query": "\n UPDATE transactions\n SET\n miniblock_number = $1,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n in_mempool=FALSE,\n execution_info = execution_info || data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n effective_gas_price = data_table.effective_gas_price,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($2::bytea[]) AS hash,\n UNNEST($3::integer[]) AS index_in_block,\n UNNEST($4::varchar[]) AS error,\n UNNEST($5::jsonb[]) AS new_execution_info,\n UNNEST($6::bigint[]) as refunded_gas,\n UNNEST($7::numeric[]) as effective_gas_price\n ) AS data_table\n WHERE transactions.hash = data_table.hash\n " + "query": "UPDATE l1_batches SET skip_proof = TRUE WHERE number = $1" }, - "c6109267f85f38edcd53f361cf2654f43fa45928e39324cfab8389453b4e7031": { + "da01d59119023c822cffa5dc226e82b2abd4cbd46d3856d7db16289868a27fa1": { "describe": { "columns": [ { - "name": "id", + "name": "hashed_key", "ordinal": 0, - "type_info": "Int4" - }, - { - "name": "eth_tx_id", - "ordinal": 1, - "type_info": "Int4" - }, - { - "name": "tx_hash", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "base_fee_per_gas", - "ordinal": 3, - "type_info": "Int8" - }, - { - "name": "priority_fee_per_gas", - "ordinal": 4, - "type_info": "Int8" - }, - { - "name": "signed_raw_tx", - "ordinal": 5, "type_info": "Bytea" }, { - "name": "nonce", - "ordinal": 6, - "type_info": "Int8" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - true, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT \n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM eth_txs_history \n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id \n WHERE eth_txs_history.sent_at_block IS NULL AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY eth_txs_history.id DESC" - }, - "c66b0e0867a1a634f984645ca576a6502b51b67aa0be2dae98e0e2adeb450963": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "status", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Text" - }, - { - "name": "attempts", - "ordinal": 2, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ - false, false, false ], "parameters": { "Left": [ - "Interval", - "Int4" + "ByteaArray" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " + "query": "SELECT hashed_key, l1_batch_number FROM initial_writes WHERE hashed_key = ANY($1::bytea[])" }, - "c6aadc4ec78e30f5775f7a9f866ad02984b78de3e3d1f34c144a4057ff44ea6a": { + "dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572": { "describe": { "columns": [ { @@ -9219,678 +8906,669 @@ "Left": [] } }, - "query": "SELECT COUNT(*) FROM eth_txs WHERE has_failed = TRUE" - }, - "c6cdc9ef18fe20ef530b653c0c24c674dd74aef3701bfb5c6db23d649115f1d4": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Time", - "Int8" - ] - } - }, - "query": "\n UPDATE witness_inputs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE l1_batch_number = $2\n " - }, - "c766f2ee9e3054ba337873ba5ebb26d4f1a43691664372152e5eb782391f9f68": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " - }, - "c8125b30eb64eebfa4500dc623972bf8771a83b218bd18a51e633d4cf4bf8eb3": { - "describe": { - "columns": [ - { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - } - }, - "query": "\n SELECT bytecode FROM (\n SELECT * FROM storage_logs\n WHERE\n storage_logs.hashed_key = $1 AND\n storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE value != $3\n " + "query": "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" }, - "c81a1ff168b3a1e94489fb66995b0978c4c6aac92a731144cc22fcc1f4369ba9": { + "dc751a25528a272bac17416f782fce3d0aee44b1ae25be0220718b356fda02e8": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "id", "ordinal": 0, "type_info": "Int8" }, - { - "name": "merkle_tree_paths", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "created_at", - "ordinal": 2, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 3, - "type_info": "Timestamp" - }, { "name": "status", - "ordinal": 4, + "ordinal": 1, "type_info": "Text" }, - { - "name": "time_taken", - "ordinal": 5, - "type_info": "Time" - }, - { - "name": "processing_started_at", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "error", - "ordinal": 7, - "type_info": "Varchar" - }, { "name": "attempts", - "ordinal": 8, - "type_info": "Int4" - }, - { - "name": "merkel_tree_paths_blob_url", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 10, - "type_info": "Bool" + "ordinal": 2, + "type_info": "Int2" } ], "nullable": [ false, - true, - false, - false, - false, - false, - true, - true, false, - true, false ], "parameters": { "Left": [ "Interval", - "Int4", + "Int2" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " + }, + "dd330bc075a163974c59ec55ecfddd769d05801963b3e0e840e7f11e7bc6d3e9": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ "Int8" ] } }, - "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " + "query": "SELECT l1_batch_number FROM witness_inputs WHERE length(merkle_tree_paths) <> 0 ORDER BY l1_batch_number DESC LIMIT $1" }, - "c849561f88c775f2cce4d59387916793ba1623a8a714b415375477e090d86bd3": { + "dd8aa1c9d4dcea22c9a13cca5ae45e951cf963b0608046b88be40309d7379ec2": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Int4", - "Int4" + "Varchar", + "Bytea" ] } }, - "query": "UPDATE eth_txs\n SET gas_used = $1, confirmed_eth_tx_history_id = $2\n WHERE id = $3" + "query": "UPDATE transactions\n SET error = $1, updated_at = now()\n WHERE hash = $2" }, - "c891770305cb3aba4021738e60567d977eac54435c871b5178de7c3c96d2f721": { + "dd8f0bbabcd646457a9174a590c79a45d4f744624a74f79017eacbab6b4f9b0a": { "describe": { "columns": [ { - "name": "usd_price", + "name": "id", "ordinal": 0, - "type_info": "Numeric" + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT id FROM protocol_versions" + }, + "ddb3b38be2b6038b63288961f46ba7d3bb7250caff1146e13c5ee77b6a994ffc": { + "describe": { + "columns": [ + { + "name": "circuit_type", + "ordinal": 0, + "type_info": "Text" }, { - "name": "usd_price_updated_at", + "name": "result", "ordinal": 1, - "type_info": "Timestamp" + "type_info": "Bytea" } ], "nullable": [ - true, + false, true ], "parameters": { "Left": [ - "Bytea" + "Int8", + "Int4" ] } }, - "query": "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1" + "query": "\n SELECT circuit_type, result from prover_jobs\n WHERE l1_batch_number = $1 AND status = 'successful' AND aggregation_round = $2\n ORDER BY sequence_number ASC;\n " }, - "c92a84c15a8641f73417a03de99a0fb7e07fd0da7b376e65b3ed61209e55a5fa": { + "ddd8b105f5e5cf9db40b14ea47e4ba2b3875f89280019464be34f51605833f1b": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8Array" + "Text", + "Text", + "Int4", + "Text" ] } }, - "query": "UPDATE witness_inputs SET is_blob_cleaned = TRUE WHERE l1_batch_number = ANY($1)" + "query": "UPDATE gpu_prover_queue_fri SET instance_status = $1, updated_at = now() WHERE instance_host = $2::text::inet AND instance_port = $3 AND zone = $4\n " }, - "c9eefe59225b10d90b67ab92a8f9e3bad92ec02f8dfc2719903149ab9f82fe1c": { + "deaf3789ac968e299fe0e5a7f1c72494af8ecd664da9c901ec9c0c5e7c29bb65": { "describe": { - "columns": [ - { - "name": "hash", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "is_priority", - "ordinal": 1, - "type_info": "Bool" - }, - { - "name": "full_fee", - "ordinal": 2, - "type_info": "Numeric" - }, - { - "name": "layer_2_tip_fee", - "ordinal": 3, - "type_info": "Numeric" - }, - { - "name": "initiator_address", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "nonce", - "ordinal": 5, - "type_info": "Int8" - }, - { - "name": "signature", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "input", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "priority_op_id", - "ordinal": 10, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "index_in_block", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" - }, - { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" - }, - { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" - }, - { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 18, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 19, - "type_info": "Timestamp" - }, - { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" - }, - { - "name": "in_mempool", - "ordinal": 22, - "type_info": "Bool" - }, - { - "name": "l1_block_number", - "ordinal": 23, - "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, - { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" - }, + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray", + "ByteaArray", + "ByteaArray", + "ByteaArray" + ] + } + }, + "query": "INSERT INTO storage (hashed_key, address, key, value, tx_hash, created_at, updated_at) SELECT u.hashed_key, u.address, u.key, u.value, u.tx_hash, now(), now() FROM UNNEST ($1::bytea[], $2::bytea[], $3::bytea[], $4::bytea[], $5::bytea[]) AS u(hashed_key, address, key, value, tx_hash) ON CONFLICT (hashed_key) DO UPDATE SET tx_hash = excluded.tx_hash, value = excluded.value, updated_at = now()" + }, + "df857ee85c600bd90687b2ed91517d91a5dc4de3cd6c15c34119ca52a3321828": { + "describe": { + "columns": [ { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" + "name": "merkle_tree_paths", + "ordinal": 1, + "type_info": "Bytea" }, { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" + "name": "created_at", + "ordinal": 2, + "type_info": "Timestamp" }, { - "name": "l1_batch_tx_index", - "ordinal": 31, - "type_info": "Int4" + "name": "updated_at", + "ordinal": 3, + "type_info": "Timestamp" }, { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" + "name": "status", + "ordinal": 4, + "type_info": "Text" }, { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" + "name": "time_taken", + "ordinal": 5, + "type_info": "Time" }, { - "name": "l1_tx_refund_recipient", - "ordinal": 34, - "type_info": "Bytea" + "name": "processing_started_at", + "ordinal": 6, + "type_info": "Timestamp" }, { - "name": "miniblock_timestamp?", - "ordinal": 35, - "type_info": "Int8" + "name": "error", + "ordinal": 7, + "type_info": "Varchar" }, { - "name": "block_hash?", - "ordinal": 36, - "type_info": "Bytea" + "name": "attempts", + "ordinal": 8, + "type_info": "Int4" }, { - "name": "eth_commit_tx_hash?", - "ordinal": 37, + "name": "merkel_tree_paths_blob_url", + "ordinal": 9, "type_info": "Text" }, { - "name": "eth_prove_tx_hash?", - "ordinal": 38, - "type_info": "Text" + "name": "is_blob_cleaned", + "ordinal": 10, + "type_info": "Bool" }, { - "name": "eth_execute_tx_hash?", - "ordinal": 39, - "type_info": "Text" + "name": "protocol_version", + "ordinal": 11, + "type_info": "Int4" } ], "nullable": [ - false, - false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, false, true, false, - true, false, false, false, true, true, - true, - true, - true, false, true, - true, - false, - false, - false, false, - false + true ], "parameters": { "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT transactions.*,\n miniblocks.timestamp as \"miniblock_timestamp?\",\n miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " - }, - "ca8fa3521dab5ee985a837572e8625bd5b26bf79f58950698218b28110c29d1f": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Int4", - "Int4", - "Int2", - "Text", - "Text", - "Int2" - ] - } - }, - "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, zone, num_gpu, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, $6, $7, now(), now())\n ON CONFLICT(instance_host, instance_port, region, zone)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, zone=$6, num_gpu=$7, updated_at=now()" - }, - "cba131abb2965f23c392e12b7630295cb8fc4c56775f16c71e65560f74237c94": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int4", + "Interval", "Int4", "Int8", - "Bool", - "Bytea", - "ByteaArray", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Numeric", - "Int8", - "Int8", - "Bytea", - "Bytea" + "Int4Array" ] } }, - "query": "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count, timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, now(), now())" + "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n AND protocol_version = ANY($4)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " }, - "ce3666b149f7fc62a68139a8efb83ed149c7deace17b8968817941763e45a147": { + "e1235572a080ee86724da2ad5f528e27e6442ad47abd22e04af8efec2c59432b": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "aggregation_round", + "ordinal": 3, + "type_info": "Int2" + }, + { + "name": "sequence_number", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "depth", + "ordinal": 5, + "type_info": "Int4" + }, + { + "name": "is_node_final_proof", + "ordinal": 6, + "type_info": "Bool" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ], "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] + "Left": [] } }, - "query": "\n DELETE FROM tokens \n WHERE l2_address IN\n (\n SELECT substring(key, 12, 20) FROM storage_logs \n WHERE storage_logs.address = $1 AND miniblock_number > $2 AND NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $3\n )\n )\n " + "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " }, - "cea77fbe02853a7a9b1f7b5ddf2957cb23212ae5ef0f889834d796c35b583542": { + "e1879cce18ad449d58f02254aa9ae4b115152484187161647d012df798985365": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8" + "Int8", + "Text" ] } }, - "query": "DELETE FROM miniblocks WHERE number > $1" + "query": "\n INSERT INTO scheduler_witness_jobs_fri\n (l1_batch_number, scheduler_partial_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number)\n DO UPDATE SET updated_at=now()\n " }, - "cf9a49dd3ef67b3515e411fd0daadd667af9a4451390b3ef47fe9f902ee9f4e2": { + "e1ad7a51afef6bd7a95df3294f64b7b1bdc4c4fc7ae5c4195802177986f3e876": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + } + ], + "nullable": [ + false, + false, + false + ], "parameters": { "Left": [ - "Int8", - "Text", - "Jsonb", - "Text" + "Interval", + "Int2" ] } }, - "query": "\n UPDATE contract_verification_requests\n SET status = 'failed', updated_at = now(), error = $2, compilation_errors = $3, panic_message = $4\n WHERE id = $1\n " + "query": "\n UPDATE prover_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " }, - "d0ff67e7c59684a0e4409726544cf850dbdbb36d038ebbc6a1c5bf0e76b0358c": { + "e29d263f33257a37f391907b7ff588f416a0350b606f16f4779fa1d3bf4be08b": { "describe": { "columns": [ { - "name": "count!", + "name": "id", "ordinal": 0, - "type_info": "Int8" + "type_info": "Int4" + }, + { + "name": "eth_tx_id", + "ordinal": 1, + "type_info": "Int4" } ], "nullable": [ - null + false, + false ], "parameters": { - "Left": [] + "Left": [ + "Text" + ] } }, - "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches" + "query": "UPDATE eth_txs_history\n SET updated_at = now(), confirmed_at = now()\n WHERE tx_hash = $1\n RETURNING id, eth_tx_id" }, - "d11ff84327058721c3c36bc3371c3139f41e2a2255f64bbc5108c1876848d8bb": { + "e626aa2efb6ba875a12f2b4e37b0ba8052810e73fa5e2d3280f747f7b89b956f": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ "Text", - "Text", - "Int4", - "Int4", - "Text", - "Text" + "Int8" ] } }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = $1, updated_at = now(), queue_free_slots = $4\n WHERE instance_host = $2::text::inet\n AND instance_port = $3\n AND region = $5\n AND zone = $6\n " + "query": "UPDATE proof_generation_details SET status='generated', proof_blob_url = $1, updated_at = now() WHERE l1_batch_number = $2" }, - "d2f16dcd8175a337f57724ce5b2fb59d2934f60bb2d24c6ec77195dc63c26002": { + "e6fc424c622576166999df4487068cc1447b09464c48f379f882c45172f34a78": { "describe": { "columns": [ { - "name": "hash!", + "name": "number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "l1_address!", + "name": "l1_tx_count", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "l2_address!", + "name": "l2_tx_count", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "symbol!", + "name": "timestamp", "ordinal": 3, - "type_info": "Varchar" + "type_info": "Int8" }, { - "name": "name!", + "name": "is_finished", "ordinal": 4, - "type_info": "Varchar" + "type_info": "Bool" }, { - "name": "decimals!", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "usd_price?", + "name": "l2_to_l1_logs", "ordinal": 6, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 7, + "type_info": "ByteaArray" + }, + { + "name": "bloom", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "priority_ops_onchain_data", + "ordinal": 9, + "type_info": "ByteaArray" + }, + { + "name": "used_contract_hashes", + "ordinal": 10, + "type_info": "Jsonb" + }, + { + "name": "base_fee_per_gas", + "ordinal": 11, "type_info": "Numeric" + }, + { + "name": "l1_gas_price", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 13, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 14, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 15, + "type_info": "Bytea" + }, + { + "name": "protocol_version", + "ordinal": 16, + "type_info": "Int4" } ], "nullable": [ - true, - true, - true, - true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, true, true, true ], "parameters": { - "Left": [ - "ByteaArray", - "Bytea", - "Bytea" - ] + "Left": [] } }, - "query": "\n SELECT hash as \"hash!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM transactions\n INNER JOIN tokens\n ON tokens.l2_address = transactions.contract_address OR (transactions.contract_address = $2 AND tokens.l2_address = $3)\n WHERE hash = ANY($1)\n " + "query": "SELECT number, l1_tx_count, l2_tx_count, timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version FROM l1_batches ORDER BY number DESC LIMIT 1" }, - "d6654b10ce779826e565bddf67c9a1aca2767f11e858eb9aaedff4b0ea277a34": { + "e72e3eb79c4d306775cdae718db78f8f1f7c4a97b0abfcceac15efe1c3543fff": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic2!", + "name": "timestamp", "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "is_finished", + "ordinal": 2, + "type_info": "Bool" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "fee_account_address", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "bloom", + "ordinal": 6, + "type_info": "Bytea" + }, + { + "name": "priority_ops_onchain_data", + "ordinal": 7, + "type_info": "ByteaArray" + }, + { + "name": "hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "parent_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "commitment", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, + { + "name": "eth_prove_tx_id", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "merkle_root_hash", + "ordinal": 16, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 17, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 18, + "type_info": "ByteaArray" + }, + { + "name": "used_contract_hashes", + "ordinal": 19, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 20, + "type_info": "Bytea" + }, + { + "name": "compressed_repeated_writes", + "ordinal": 21, + "type_info": "Bytea" + }, + { + "name": "l2_l1_compressed_messages", + "ordinal": 22, + "type_info": "Bytea" + }, + { + "name": "l2_l1_merkle_root", + "ordinal": 23, "type_info": "Bytea" }, { - "name": "topic3!", - "ordinal": 2, + "name": "l1_gas_price", + "ordinal": 24, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 25, + "type_info": "Int8" + }, + { + "name": "rollup_last_leaf_index", + "ordinal": 26, + "type_info": "Int8" + }, + { + "name": "zkporter_is_available", + "ordinal": 27, + "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 28, "type_info": "Bytea" }, { - "name": "value!", - "ordinal": 3, + "name": "default_aa_code_hash", + "ordinal": 29, "type_info": "Bytea" }, { - "name": "l1_address!", - "ordinal": 4, - "type_info": "Bytea" + "name": "base_fee_per_gas", + "ordinal": 30, + "type_info": "Numeric" }, { - "name": "l2_address!", - "ordinal": 5, + "name": "aux_data_hash", + "ordinal": 31, "type_info": "Bytea" }, { - "name": "symbol!", - "ordinal": 6, - "type_info": "Varchar" + "name": "pass_through_data_hash", + "ordinal": 32, + "type_info": "Bytea" }, { - "name": "name!", - "ordinal": 7, - "type_info": "Varchar" + "name": "meta_parameters_hash", + "ordinal": 33, + "type_info": "Bytea" }, { - "name": "decimals!", - "ordinal": 8, + "name": "protocol_version", + "ordinal": 34, "type_info": "Int4" - }, - { - "name": "usd_price?", - "ordinal": 9, - "type_info": "Numeric" } ], "nullable": [ @@ -9902,562 +9580,575 @@ false, false, false, + true, + true, + true, + true, + true, + true, + true, + true, + true, false, - true - ], - "parameters": { - "Left": [ - "ByteaArray", - "Bytea", - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n tokens.l2_address = events.address OR (events.address = $3 AND tokens.l2_address = $4)\n WHERE tx_hash = ANY($1) AND topic1 = $2\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " - }, - "d6709f3ce8f08f988e10a0e0fb5c06db9488834a85066babaf3d56cf212b4ea0": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Varchar", - "Varchar", - "Int4" - ] - } - }, - "query": "UPDATE tokens SET token_list_name = $2, token_list_symbol = $3,\n token_list_decimals = $4, well_known = true, updated_at = now()\n WHERE l1_address = $1\n " - }, - "d8515595d34dca53e50bbd4ed396f6208e33f596195a5ed02fba9e8364ceb33c": { - "describe": { - "columns": [ - { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1" - }, - "d8e0bb1a349523077356be101808340eab078979390af7d26c71489b5f303d1b": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "UPDATE l1_batches SET skip_proof = TRUE WHERE number = $1" - }, - "da01d59119023c822cffa5dc226e82b2abd4cbd46d3856d7db16289868a27fa1": { - "describe": { - "columns": [ - { - "name": "hashed_key", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "l1_batch_number", - "ordinal": 1, - "type_info": "Int8" - } - ], - "nullable": [ false, - false - ], - "parameters": { - "Left": [ - "ByteaArray" - ] - } - }, - "query": "SELECT hashed_key, l1_batch_number FROM initial_writes WHERE hashed_key = ANY($1::bytea[])" - }, - "dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572": { - "describe": { - "columns": [ - { - "name": "count", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" - }, - "dc751a25528a272bac17416f782fce3d0aee44b1ae25be0220718b356fda02e8": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "status", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "attempts", - "ordinal": 2, - "type_info": "Int2" - } - ], - "nullable": [ false, + true, + true, + true, + true, false, - false - ], - "parameters": { - "Left": [ - "Interval", - "Int2" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " - }, - "dd330bc075a163974c59ec55ecfddd769d05801963b3e0e840e7f11e7bc6d3e9": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false + false, + true, + true, + true, + true, + false, + true, + true, + true, + true ], "parameters": { "Left": [ + "Int8", + "Int8", "Int8" ] } }, - "query": "SELECT l1_batch_number FROM witness_inputs WHERE length(merkle_tree_paths) <> 0 ORDER BY l1_batch_number DESC LIMIT $1" + "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, protocol_version FROM l1_batches WHERE number BETWEEN $1 AND $2 ORDER BY number LIMIT $3" }, - "dd8aa1c9d4dcea22c9a13cca5ae45e951cf963b0608046b88be40309d7379ec2": { + "e900682a160af90d532da47a1222fc1d7c9962ee8996dbd9b9bb63f13820cf2b": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Varchar", - "Bytea" - ] - } - }, - "query": "UPDATE transactions\n SET error = $1, updated_at = now()\n WHERE hash = $2" - }, - "ddb3b38be2b6038b63288961f46ba7d3bb7250caff1146e13c5ee77b6a994ffc": { - "describe": { - "columns": [ - { - "name": "circuit_type", - "ordinal": 0, - "type_info": "Text" - }, - { - "name": "result", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - true - ], - "parameters": { - "Left": [ - "Int8", - "Int4" + "ByteaArray" ] } }, - "query": "\n SELECT circuit_type, result from prover_jobs\n WHERE l1_batch_number = $1 AND status = 'successful' AND aggregation_round = $2\n ORDER BY sequence_number ASC;\n " + "query": "DELETE FROM transactions WHERE in_mempool = TRUE AND initiator_address = ANY($1)" }, - "deaf3789ac968e299fe0e5a7f1c72494af8ecd664da9c901ec9c0c5e7c29bb65": { + "e90688187953eb3c8f5ff4b25c4a6b838e6717c720643b441dece5079b441fc2": { "describe": { "columns": [], "nullable": [], "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "ByteaArray", - "ByteaArray", - "ByteaArray" - ] + "Left": [] } }, - "query": "INSERT INTO storage (hashed_key, address, key, value, tx_hash, created_at, updated_at) SELECT u.hashed_key, u.address, u.key, u.value, u.tx_hash, now(), now() FROM UNNEST ($1::bytea[], $2::bytea[], $3::bytea[], $4::bytea[], $5::bytea[]) AS u(hashed_key, address, key, value, tx_hash) ON CONFLICT (hashed_key) DO UPDATE SET tx_hash = excluded.tx_hash, value = excluded.value, updated_at = now()" + "query": "DELETE FROM eth_txs WHERE id >=\n (SELECT MIN(id) FROM eth_txs WHERE has_failed = TRUE)" }, - "e1235572a080ee86724da2ad5f528e27e6442ad47abd22e04af8efec2c59432b": { + "ea17481cab38d370e06e7cf8598daa39faf4414152456aab89695e3133477d3e": { "describe": { "columns": [ { - "name": "id", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l1_batch_number", + "name": "is_priority", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "circuit_id", + "name": "full_fee", "ordinal": 2, - "type_info": "Int2" + "type_info": "Numeric" }, { - "name": "aggregation_round", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Int2" + "type_info": "Numeric" }, { - "name": "sequence_number", + "name": "initiator_address", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "depth", + "name": "nonce", "ordinal": 5, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "is_node_final_proof", + "name": "signature", "ordinal": 6, - "type_info": "Bool" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " - }, - "e14338281eb639856f1c7a8ba6b60fe3914d3f30d0b55cea8fb287209892df03": { - "describe": { - "columns": [ + "type_info": "Bytea" + }, { - "name": "key_address", - "ordinal": 0, + "name": "input", + "ordinal": 7, "type_info": "Bytea" }, { - "name": "bytecode", - "ordinal": 1, + "name": "data", + "ordinal": 8, + "type_info": "Jsonb" + }, + { + "name": "received_at", + "ordinal": 9, + "type_info": "Timestamp" + }, + { + "name": "priority_op_id", + "ordinal": 10, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 11, + "type_info": "Int8" + }, + { + "name": "index_in_block", + "ordinal": 12, + "type_info": "Int4" + }, + { + "name": "error", + "ordinal": 13, + "type_info": "Varchar" + }, + { + "name": "gas_limit", + "ordinal": 14, + "type_info": "Numeric" + }, + { + "name": "gas_per_storage_limit", + "ordinal": 15, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 16, + "type_info": "Numeric" + }, + { + "name": "tx_format", + "ordinal": 17, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 18, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 19, + "type_info": "Timestamp" + }, + { + "name": "execution_info", + "ordinal": 20, + "type_info": "Jsonb" + }, + { + "name": "contract_address", + "ordinal": 21, "type_info": "Bytea" }, { - "name": "creator_address?", - "ordinal": 2, + "name": "in_mempool", + "ordinal": 22, + "type_info": "Bool" + }, + { + "name": "l1_block_number", + "ordinal": 23, + "type_info": "Int4" + }, + { + "name": "value", + "ordinal": 24, + "type_info": "Numeric" + }, + { + "name": "paymaster", + "ordinal": 25, "type_info": "Bytea" }, { - "name": "creator_tx_hash?", - "ordinal": 3, + "name": "paymaster_input", + "ordinal": 26, "type_info": "Bytea" }, { - "name": "created_in_block_number", - "ordinal": 4, + "name": "max_fee_per_gas", + "ordinal": 27, + "type_info": "Numeric" + }, + { + "name": "max_priority_fee_per_gas", + "ordinal": 28, + "type_info": "Numeric" + }, + { + "name": "effective_gas_price", + "ordinal": 29, + "type_info": "Numeric" + }, + { + "name": "miniblock_number", + "ordinal": 30, "type_info": "Int8" }, { - "name": "verification_info", - "ordinal": 5, - "type_info": "Jsonb" + "name": "l1_batch_tx_index", + "ordinal": 31, + "type_info": "Int4" + }, + { + "name": "refunded_gas", + "ordinal": 32, + "type_info": "Int8" + }, + { + "name": "l1_tx_mint", + "ordinal": 33, + "type_info": "Numeric" + }, + { + "name": "l1_tx_refund_recipient", + "ordinal": 34, + "type_info": "Bytea" + }, + { + "name": "upgrade_id", + "ordinal": 35, + "type_info": "Int4" } ], "nullable": [ false, false, + true, + true, + false, + true, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + false, + true, + false, false, false, + true, + true, + true, + true, + true, false, + true, + true, true ], "parameters": { "Left": [ - "Bytea", - "Bytea", "Bytea" ] } - }, - "query": "\n WITH sl AS (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n )\n SELECT\n sl.key as \"key_address\",\n fd.bytecode,\n txs.initiator_address as \"creator_address?\",\n txs.hash as \"creator_tx_hash?\",\n sl.miniblock_number as \"created_in_block_number\",\n c.verification_info\n FROM sl\n JOIN factory_deps fd ON fd.bytecode_hash = sl.value\n LEFT JOIN transactions txs ON txs.hash = sl.tx_hash\n LEFT JOIN contracts_verification_info c ON c.address = $2\n WHERE sl.value != $3\n " - }, - "e1879cce18ad449d58f02254aa9ae4b115152484187161647d012df798985365": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - } - }, - "query": "\n INSERT INTO scheduler_witness_jobs_fri\n (l1_batch_number, scheduler_partial_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number)\n DO UPDATE SET updated_at=now()\n " - }, - "e1ad7a51afef6bd7a95df3294f64b7b1bdc4c4fc7ae5c4195802177986f3e876": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "status", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "attempts", - "ordinal": 2, - "type_info": "Int2" - } - ], - "nullable": [ - false, - false, - false - ], + }, + "query": "\n SELECT * FROM transactions\n WHERE hash = $1\n " + }, + "eb95c3daeffd23d35d4e047e3bb8dc44e93492a6d41cf0fd1624d3ea4a2267c9": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Interval", - "Int2" + "Int8", + "Int8" ] } }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " + "query": "UPDATE l1_batches SET predicted_commit_gas_cost = $2, updated_at = now() WHERE number = $1" }, - "e29d263f33257a37f391907b7ff588f416a0350b606f16f4779fa1d3bf4be08b": { + "ed50c609371b4588964e29f8757c41973706710090a80eb025ec263ce3d019b4": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int4", + "Int2", + "Text" + ] + } + }, + "query": "INSERT INTO gpu_prover_queue_fri (instance_host, instance_port, instance_status, specialized_prover_group_id, zone, created_at, updated_at) VALUES (cast($1::text as inet), $2, 'available', $3, $4, now(), now()) ON CONFLICT(instance_host, instance_port, zone) DO UPDATE SET instance_status='available', specialized_prover_group_id=$3, zone=$4, updated_at=now()" + }, + "eda61fd8012aadc27a2952e96d4238bccb21ec47a17e326a7ae9182d5358d733": { "describe": { "columns": [ { - "name": "id", + "name": "timestamp", "ordinal": 0, - "type_info": "Int4" - }, - { - "name": "eth_tx_id", - "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ - false, false ], "parameters": { - "Left": [ - "Text" - ] + "Left": [] } }, - "query": "UPDATE eth_txs_history\n SET updated_at = now(), confirmed_at = now()\n WHERE tx_hash = $1\n RETURNING id, eth_tx_id" + "query": "SELECT timestamp FROM l1_batches WHERE eth_prove_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" }, - "e900682a160af90d532da47a1222fc1d7c9962ee8996dbd9b9bb63f13820cf2b": { + "edc9e374698c57ba9f65f83f0e1945e4785d8b4bc95f46ed4d16c095e5511709": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "ByteaArray" + "Int8Array" ] } }, - "query": "DELETE FROM transactions WHERE in_mempool = TRUE AND initiator_address = ANY($1)" - }, - "e90688187953eb3c8f5ff4b25c4a6b838e6717c720643b441dece5079b441fc2": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [] - } - }, - "query": "DELETE FROM eth_txs WHERE id >=\n (SELECT MIN(id) FROM eth_txs WHERE has_failed = TRUE)" + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " }, - "ea1477a0c1509f989c0e2aa308cb59bd34b7ec841d5c6c242257ee8bde27ba83": { + "ee20215e53d1467e96d3db3a95dbb9a8cbb1c9407d0c914b4168637604d91c29": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "number", "ordinal": 0, "type_info": "Int8" }, { - "name": "scheduler_witness", + "name": "timestamp", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "final_node_aggregations", + "name": "is_finished", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "status", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "processing_started_at", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "time_taken", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Time" + "type_info": "Bytea" }, { - "name": "error", + "name": "bloom", "ordinal": 6, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "ByteaArray" }, { - "name": "updated_at", + "name": "hash", "ordinal": 8, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "attempts", + "name": "parent_hash", "ordinal": 9, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "aggregation_result_coords", + "name": "commitment", "ordinal": 10, "type_info": "Bytea" }, { - "name": "scheduler_witness_blob_url", + "name": "compressed_write_logs", "ordinal": 11, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "final_node_aggregations_blob_url", + "name": "compressed_contracts", "ordinal": 12, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "is_blob_cleaned", + "name": "eth_prove_tx_id", "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "merkle_root_hash", + "ordinal": 16, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 17, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 18, + "type_info": "ByteaArray" + }, + { + "name": "used_contract_hashes", + "ordinal": 19, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 20, + "type_info": "Bytea" + }, + { + "name": "compressed_repeated_writes", + "ordinal": 21, + "type_info": "Bytea" + }, + { + "name": "l2_l1_compressed_messages", + "ordinal": 22, + "type_info": "Bytea" + }, + { + "name": "l2_l1_merkle_root", + "ordinal": 23, + "type_info": "Bytea" + }, + { + "name": "l1_gas_price", + "ordinal": 24, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 25, + "type_info": "Int8" + }, + { + "name": "rollup_last_leaf_index", + "ordinal": 26, + "type_info": "Int8" + }, + { + "name": "zkporter_is_available", + "ordinal": 27, "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 28, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 29, + "type_info": "Bytea" + }, + { + "name": "base_fee_per_gas", + "ordinal": 30, + "type_info": "Numeric" + }, + { + "name": "aux_data_hash", + "ordinal": 31, + "type_info": "Bytea" + }, + { + "name": "pass_through_data_hash", + "ordinal": 32, + "type_info": "Bytea" + }, + { + "name": "meta_parameters_hash", + "ordinal": 33, + "type_info": "Bytea" + }, + { + "name": "protocol_version", + "ordinal": 34, + "type_info": "Int4" } ], "nullable": [ false, false, - true, false, - true, - true, - true, + false, + false, false, false, false, true, true, true, - false - ], - "parameters": { - "Left": [ - "Interval", - "Int4", - "Int8" - ] - } - }, - "query": "\n UPDATE scheduler_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs.*\n " - }, - "eb95c3daeffd23d35d4e047e3bb8dc44e93492a6d41cf0fd1624d3ea4a2267c9": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - } - }, - "query": "UPDATE l1_batches SET predicted_commit_gas_cost = $2, updated_at = now() WHERE number = $1" - }, - "eda61fd8012aadc27a2952e96d4238bccb21ec47a17e326a7ae9182d5358d733": { - "describe": { - "columns": [ - { - "name": "timestamp", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + true, + true, + true, + false, + false, + true, + true, + true, + true, + false, + true, + true, + true, + true ], "parameters": { "Left": [] } }, - "query": "SELECT timestamp FROM l1_batches WHERE eth_prove_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" - }, - "edc9e374698c57ba9f65f83f0e1945e4785d8b4bc95f46ed4d16c095e5511709": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " + "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, protocol_version FROM l1_batches WHERE number = 0 OR eth_commit_tx_id IS NOT NULL AND commitment IS NOT NULL ORDER BY number DESC LIMIT 1" }, "ee5727dc06a7385969e834556b96bbfdf12a5049a1a1c270f203ef3fa0e8cb94": { "describe": { @@ -10485,6 +10176,26 @@ }, "query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, commitment = $3, default_aa_code_hash = $4, compressed_repeated_writes = $5, compressed_initial_writes = $6, l2_l1_compressed_messages = $7, l2_l1_merkle_root = $8, zkporter_is_available = $9, bootloader_code_hash = $10, rollup_last_leaf_index = $11, aux_data_hash = $12, pass_through_data_hash = $13, meta_parameters_hash = $14, updated_at = now() WHERE number = $15" }, + "ee74b42d1a6a52784124751dae6c7eca3fd36f5a3bb26de56efc2b810da7033a": { + "describe": { + "columns": [ + { + "name": "initial_bootloader_heap_content", + "ordinal": 0, + "type_info": "Jsonb" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT initial_bootloader_heap_content FROM l1_batches WHERE number = $1" + }, "ee7bd820bf35c5c714092494c386eccff25457cff6dc00eb81d9809eaeb95670": { "describe": { "columns": [ @@ -10700,6 +10411,11 @@ "name": "l1_tx_refund_recipient", "ordinal": 34, "type_info": "Bytea" + }, + { + "name": "upgrade_id", + "ordinal": 35, + "type_info": "Int4" } ], "nullable": [ @@ -10737,6 +10453,7 @@ true, false, true, + true, true ], "parameters": { @@ -10801,76 +10518,6 @@ }, "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'available', updated_at = now(), queue_free_slots = $3\n WHERE instance_host = $1::text::inet\n AND instance_port = $2\n AND instance_status = 'full'\n AND region = $4\n AND zone = $5\n " }, - "f3f7ceb708cc072d66e8609d64ba99e6faa80bf58ff0ce0ef49e882af63522d4": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n INSERT INTO node_aggregation_witness_jobs\n (l1_batch_number, status, created_at, updated_at)\n VALUES ($1, 'waiting_for_artifacts', now(), now())\n " - }, - "f5abda9631a44b209b759c6800970d9669a8b5f0280e20ee9901f7c831ab4762": { - "describe": { - "columns": [ - { - "name": "value!", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "l1_address!", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "l2_address!", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "symbol!", - "ordinal": 3, - "type_info": "Varchar" - }, - { - "name": "name!", - "ordinal": 4, - "type_info": "Varchar" - }, - { - "name": "decimals!", - "ordinal": 5, - "type_info": "Int4" - }, - { - "name": "usd_price?", - "ordinal": 6, - "type_info": "Numeric" - } - ], - "nullable": [ - true, - true, - true, - true, - true, - true, - true - ], - "parameters": { - "Left": [ - "ByteaArray", - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT storage.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM storage\n INNER JOIN tokens ON\n storage.address = tokens.l2_address OR (storage.address = $2 AND tokens.l2_address = $3)\n WHERE storage.hashed_key = ANY($1)\n " - }, "f5e3c4b23fa0d0686b400b64c42cf78b2219f0cbcf1c9240b77e4132513e36ef": { "describe": { "columns": [ @@ -10903,27 +10550,6 @@ }, "query": "SELECT address, key, value FROM storage_logs WHERE miniblock_number BETWEEN (SELECT MIN(number) FROM miniblocks WHERE l1_batch_number = $1) AND (SELECT MAX(number) FROM miniblocks WHERE l1_batch_number = $1) ORDER BY miniblock_number, operation_number" }, - "f76f7d03cce064c0240da83a4ba75a0ce3fb57a18723c278a3d05eaf085f8994": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - } - }, - "query": "SELECT COUNT(*) as \"count!\" FROM transactions\n WHERE miniblock_number BETWEEN $1 AND $2" - }, "f78960549e6201527454d060d5b483db032f4df80b4269a624f0309ed9a6a38e": { "describe": { "columns": [], @@ -11191,23 +10817,6 @@ }, "query": "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, created_at, updated_at) VALUES ($1, $2, $3, 'queued', now(), now())\n ON CONFLICT (l1_batch_number) DO NOTHING" }, - "fb016c8fa4983478572c0e9dcffd058d226cec181f4336925fb2d19752fd7427": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Bytea", - "Text", - "Text", - "Int4" - ] - } - }, - "query": "\n INSERT INTO leaf_aggregation_witness_jobs\n (l1_batch_number, basic_circuits, basic_circuits_inputs, basic_circuits_blob_url, basic_circuits_inputs_blob_url, number_of_basic_circuits, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, 'waiting_for_proofs', now(), now())\n " - }, "fc52c356fd09d82da89a435d08398d9b773494491404b5c84fc14c1c1d374b59": { "describe": { "columns": [], @@ -11219,105 +10828,5 @@ } }, "query": "\n UPDATE contract_verification_requests\n SET status = 'successful', updated_at = now()\n WHERE id = $1\n " - }, - "ff56f2104af03e232748debd5ec2c71495934682fa6ce9212e93084f1eb1087b": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "leaf_layer_subqueues", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "aggregation_outputs", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "number_of_leaf_circuits", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "status", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "processing_started_at", - "ordinal": 5, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 6, - "type_info": "Time" - }, - { - "name": "error", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "created_at", - "ordinal": 8, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "attempts", - "ordinal": 10, - "type_info": "Int4" - }, - { - "name": "leaf_layer_subqueues_blob_url", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "aggregation_outputs_blob_url", - "ordinal": 12, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 13, - "type_info": "Bool" - } - ], - "nullable": [ - false, - true, - true, - true, - false, - true, - true, - true, - false, - false, - false, - true, - true, - false - ], - "parameters": { - "Left": [ - "Interval", - "Int4", - "Int8" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM node_aggregation_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs.*\n " } } \ No newline at end of file diff --git a/core/lib/dal/src/accounts_dal.rs b/core/lib/dal/src/accounts_dal.rs new file mode 100644 index 000000000000..bbd43c80ac0b --- /dev/null +++ b/core/lib/dal/src/accounts_dal.rs @@ -0,0 +1,72 @@ +use std::collections::HashMap; + +use zksync_types::{ + tokens::ETHEREUM_ADDRESS, utils::storage_key_for_standard_token_balance, AccountTreeId, + Address, L2_ETH_TOKEN_ADDRESS, U256, +}; + +use crate::{SqlxError, StorageProcessor}; + +#[derive(Debug)] +pub struct AccountsDal<'a, 'c> { + pub(super) storage: &'a mut StorageProcessor<'c>, +} + +impl AccountsDal<'_, '_> { + pub async fn get_balances_for_address( + &mut self, + address: Address, + ) -> Result, SqlxError> { + let token_l2_addresses: Vec
= self + .storage + .tokens_dal() + .get_well_known_token_addresses() + .await + .into_iter() + .map(|(_, l2_address)| l2_address) + .collect(); + + let hashed_keys: Vec> = token_l2_addresses + .into_iter() + .map(|mut l2_token_address| { + if l2_token_address == ETHEREUM_ADDRESS { + l2_token_address = L2_ETH_TOKEN_ADDRESS; + } + storage_key_for_standard_token_balance( + AccountTreeId::new(l2_token_address), + &address, + ) + .hashed_key() + .0 + .to_vec() + }) + .collect(); + let rows = sqlx::query!( + r#" + SELECT storage.value as "value!", + tokens.l1_address as "l1_address!", tokens.l2_address as "l2_address!", + tokens.symbol as "symbol!", tokens.name as "name!", tokens.decimals as "decimals!", tokens.usd_price as "usd_price?" + FROM storage + INNER JOIN tokens ON + storage.address = tokens.l2_address OR (storage.address = $2 AND tokens.l2_address = $3) + WHERE storage.hashed_key = ANY($1) AND storage.value != $4 + "#, + &hashed_keys, + L2_ETH_TOKEN_ADDRESS.as_bytes(), + ETHEREUM_ADDRESS.as_bytes(), + vec![0u8; 32] + ) + .fetch_all(self.storage.conn()) + .await?; + + let result: HashMap = rows + .into_iter() + .map(|row| { + let balance = U256::from_big_endian(&row.value); + (Address::from_slice(&row.l2_address), balance) + }) + .collect(); + + Ok(result) + } +} diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 6225460f01ac..d8e928b28382 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -1,7 +1,7 @@ use std::{ collections::HashMap, convert::{Into, TryInto}, - time::Instant, + ops, }; use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive}; @@ -10,12 +10,13 @@ use sqlx::Row; use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, - commitment::{BlockMetadata, BlockWithMetadata}, - L1BatchNumber, MiniblockNumber, H256, MAX_GAS_PER_PUBDATA_BYTE, + commitment::{L1BatchMetadata, L1BatchWithMetadata}, + L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, MAX_GAS_PER_PUBDATA_BYTE, U256, }; use crate::{ - models::storage_block::{StorageBlock, StorageMiniblockHeader}, + instrument::InstrumentExt, + models::storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageMiniblockHeader}, StorageProcessor, }; @@ -34,119 +35,174 @@ impl BlocksDal<'_, '_> { count == 0 } - pub async fn get_sealed_block_number(&mut self) -> L1BatchNumber { - let started_at = Instant::now(); + pub async fn get_sealed_l1_batch_number(&mut self) -> L1BatchNumber { let number = sqlx::query!( "SELECT MAX(number) as \"number\" FROM l1_batches WHERE is_finished = TRUE" ) + .instrument("get_sealed_block_number") + .report_latency() .fetch_one(self.storage.conn()) .await .unwrap() .number .expect("DAL invocation before genesis"); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); L1BatchNumber(number as u32) } pub async fn get_sealed_miniblock_number(&mut self) -> MiniblockNumber { - let started_at = Instant::now(); let number: i64 = sqlx::query!("SELECT MAX(number) as \"number\" FROM miniblocks") + .instrument("get_sealed_miniblock_number") + .report_latency() .fetch_one(self.storage.conn()) .await .unwrap() .number .unwrap_or(0); - - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_miniblock_number"); MiniblockNumber(number as u32) } - pub async fn get_last_block_number_with_metadata(&mut self) -> L1BatchNumber { - let started_at = Instant::now(); + pub async fn get_last_l1_batch_number_with_metadata(&mut self) -> L1BatchNumber { let number: i64 = sqlx::query!("SELECT MAX(number) as \"number\" FROM l1_batches WHERE hash IS NOT NULL") + .instrument("get_last_block_number_with_metadata") + .report_latency() .fetch_one(self.storage.conn()) .await .unwrap() .number .expect("DAL invocation before genesis"); - - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_last_block_number_with_metadata"); L1BatchNumber(number as u32) } - pub async fn get_blocks_for_eth_tx_id(&mut self, eth_tx_id: u32) -> Vec { - let blocks = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ - WHERE eth_commit_tx_id = $1 OR eth_prove_tx_id = $1 OR eth_execute_tx_id = $1", + pub async fn get_l1_batches_for_eth_tx_id(&mut self, eth_tx_id: u32) -> Vec { + let l1_batches = sqlx::query_as!( + StorageL1BatchHeader, + "SELECT number, l1_tx_count, l2_tx_count, \ + timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, \ + bloom, priority_ops_onchain_data, \ + used_contract_hashes, base_fee_per_gas, l1_gas_price, \ + l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version \ + FROM l1_batches \ + WHERE eth_commit_tx_id = $1 \ + OR eth_prove_tx_id = $1 \ + OR eth_execute_tx_id = $1", eth_tx_id as i32 ) + .instrument("get_l1_batches_for_eth_tx_id") + .with_arg("eth_tx_id", ð_tx_id) .fetch_all(self.storage.conn()) .await .unwrap(); - blocks.into_iter().map(Into::into).collect() + l1_batches.into_iter().map(Into::into).collect() } - pub async fn get_storage_block(&mut self, number: L1BatchNumber) -> Option { + pub async fn get_storage_l1_batch(&mut self, number: L1BatchNumber) -> Option { sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches WHERE number = $1", + StorageL1Batch, + "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, \ + bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, \ + compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, \ + merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, \ + used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, \ + l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, \ + rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, \ + default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, \ + meta_parameters_hash, protocol_version \ + FROM l1_batches \ + WHERE number = $1", number.0 as i64 ) + .instrument("get_storage_l1_batch") + .with_arg("number", &number) .fetch_optional(self.storage.conn()) .await .unwrap() } - pub async fn get_block_header(&mut self, number: L1BatchNumber) -> Option { - self.get_storage_block(number).await.map(Into::into) + pub async fn get_l1_batch_header(&mut self, number: L1BatchNumber) -> Option { + sqlx::query_as!( + StorageL1BatchHeader, + "SELECT number, l1_tx_count, l2_tx_count, \ + timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, \ + bloom, priority_ops_onchain_data, \ + used_contract_hashes, base_fee_per_gas, l1_gas_price, \ + l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version \ + FROM l1_batches \ + WHERE number = $1", + number.0 as i64 + ) + .instrument("get_l1_batch_header") + .with_arg("number", &number) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(Into::into) + } + + /// Returns initial bootloader heap content for the specified L1 batch. + pub async fn get_initial_bootloader_heap( + &mut self, + number: L1BatchNumber, + ) -> Option> { + let row = sqlx::query!( + "SELECT initial_bootloader_heap_content FROM l1_batches WHERE number = $1", + number.0 as i64 + ) + .instrument("get_initial_bootloader_heap") + .report_latency() + .with_arg("number", &number) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + + let heap = serde_json::from_value(row.initial_bootloader_heap_content) + .expect("invalid value for initial_bootloader_heap_content in the DB"); + Some(heap) } pub async fn set_eth_tx_id( &mut self, - first_block: L1BatchNumber, - last_block: L1BatchNumber, + number_range: ops::RangeInclusive, eth_tx_id: u32, aggregation_type: AggregatedActionType, ) { match aggregation_type { - AggregatedActionType::CommitBlocks => { + AggregatedActionType::Commit => { sqlx::query!( "UPDATE l1_batches \ SET eth_commit_tx_id = $1, updated_at = now() \ WHERE number BETWEEN $2 AND $3", eth_tx_id as i32, - first_block.0 as i64, - last_block.0 as i64 + number_range.start().0 as i64, + number_range.end().0 as i64 ) .execute(self.storage.conn()) .await .unwrap(); } - AggregatedActionType::PublishProofBlocksOnchain => { + AggregatedActionType::PublishProofOnchain => { sqlx::query!( "UPDATE l1_batches \ SET eth_prove_tx_id = $1, updated_at = now() \ WHERE number BETWEEN $2 AND $3", eth_tx_id as i32, - first_block.0 as i64, - last_block.0 as i64 + number_range.start().0 as i64, + number_range.end().0 as i64 ) .execute(self.storage.conn()) .await .unwrap(); } - AggregatedActionType::ExecuteBlocks => { + AggregatedActionType::Execute => { sqlx::query!( "UPDATE l1_batches \ SET eth_execute_tx_id = $1, updated_at = now() \ WHERE number BETWEEN $2 AND $3", eth_tx_id as i32, - first_block.0 as i64, - last_block.0 as i64 + number_range.start().0 as i64, + number_range.end().0 as i64 ) .execute(self.storage.conn()) .await @@ -157,25 +213,26 @@ impl BlocksDal<'_, '_> { pub async fn insert_l1_batch( &mut self, - block: &L1BatchHeader, + header: &L1BatchHeader, + initial_bootloader_contents: &[(usize, U256)], predicted_block_gas: BlockGasCount, ) { - let priority_onchain_data: Vec> = block + let priority_onchain_data: Vec> = header .priority_ops_onchain_data .iter() .map(|data| data.clone().into()) .collect(); - let l2_to_l1_logs: Vec<_> = block + let l2_to_l1_logs: Vec<_> = header .l2_to_l1_logs .iter() .map(|log| log.to_bytes().to_vec()) .collect(); - let initial_bootloader_contents = serde_json::to_value(&block.initial_bootloader_contents) + let initial_bootloader_contents = serde_json::to_value(initial_bootloader_contents) .expect("failed to serialize initial_bootloader_contents to JSON value"); - let used_contract_hashes = serde_json::to_value(&block.used_contract_hashes) + let used_contract_hashes = serde_json::to_value(&header.used_contract_hashes) .expect("failed to serialize used_contract_hashes to JSON value"); - let base_fee_per_gas = BigDecimal::from_u64(block.base_fee_per_gas) + let base_fee_per_gas = BigDecimal::from_u64(header.base_fee_per_gas) .expect("block.base_fee_per_gas should fit in u64"); sqlx::query!( @@ -185,18 +242,18 @@ impl BlocksDal<'_, '_> { bloom, priority_ops_onchain_data, \ predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, \ initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, \ - l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, \ - created_at, updated_at\ - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, now(), now())", - block.number.0 as i64, - block.l1_tx_count as i32, - block.l2_tx_count as i32, - block.timestamp as i64, - block.is_finished, - block.fee_account_address.as_bytes(), + l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, \ + created_at, updated_at \ + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, now(), now())", + header.number.0 as i64, + header.l1_tx_count as i32, + header.l2_tx_count as i32, + header.timestamp as i64, + header.is_finished, + header.fee_account_address.as_bytes(), &l2_to_l1_logs, - &block.l2_to_l1_messages, - block.bloom.as_bytes(), + &header.l2_to_l1_messages, + header.bloom.as_bytes(), &priority_onchain_data, predicted_block_gas.commit as i64, predicted_block_gas.prove as i64, @@ -204,16 +261,17 @@ impl BlocksDal<'_, '_> { initial_bootloader_contents, used_contract_hashes, base_fee_per_gas, - block.l1_gas_price as i64, - block.l2_fair_gas_price as i64, - block + header.l1_gas_price as i64, + header.l2_fair_gas_price as i64, + header .base_system_contracts_hashes .bootloader .as_bytes(), - block + header .base_system_contracts_hashes .default_aa - .as_bytes() + .as_bytes(), + header.protocol_version.map(|v| v as i32), ) .execute(self.storage.conn()) .await @@ -223,14 +281,13 @@ impl BlocksDal<'_, '_> { pub async fn insert_miniblock(&mut self, miniblock_header: &MiniblockHeader) { let base_fee_per_gas = BigDecimal::from_u64(miniblock_header.base_fee_per_gas) .expect("base_fee_per_gas should fit in u64"); - sqlx::query!( - "INSERT INTO miniblocks (\ + "INSERT INTO miniblocks ( \ number, timestamp, hash, l1_tx_count, l2_tx_count, \ base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, \ - bootloader_code_hash, default_aa_code_hash, \ - created_at, updated_at\ - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now())", + bootloader_code_hash, default_aa_code_hash, protocol_version, \ + created_at, updated_at \ + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, now(), now())", miniblock_header.number.0 as i64, miniblock_header.timestamp as i64, miniblock_header.hash.as_bytes(), @@ -248,6 +305,7 @@ impl BlocksDal<'_, '_> { .base_system_contracts_hashes .default_aa .as_bytes(), + miniblock_header.protocol_version.map(|v| v as i32), ) .execute(self.storage.conn()) .await @@ -259,7 +317,7 @@ impl BlocksDal<'_, '_> { StorageMiniblockHeader, "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, \ base_fee_per_gas, l1_gas_price, l2_fair_gas_price, \ - bootloader_code_hash, default_aa_code_hash \ + bootloader_code_hash, default_aa_code_hash, protocol_version \ FROM miniblocks \ ORDER BY number DESC \ LIMIT 1", @@ -278,7 +336,7 @@ impl BlocksDal<'_, '_> { StorageMiniblockHeader, "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, \ base_fee_per_gas, l1_gas_price, l2_fair_gas_price, \ - bootloader_code_hash, default_aa_code_hash \ + bootloader_code_hash, default_aa_code_hash, protocol_version \ FROM miniblocks \ WHERE number = $1", miniblock_number.0 as i64, @@ -304,11 +362,7 @@ impl BlocksDal<'_, '_> { .unwrap(); } - pub async fn save_block_metadata( - &mut self, - block_number: L1BatchNumber, - block_metadata: &BlockMetadata, - ) { + pub async fn save_genesis_l1_batch_metadata(&mut self, metadata: &L1BatchMetadata) { sqlx::query!( "UPDATE l1_batches \ SET hash = $1, merkle_root_hash = $2, commitment = $3, default_aa_code_hash = $4, \ @@ -318,34 +372,33 @@ impl BlocksDal<'_, '_> { aux_data_hash = $12, pass_through_data_hash = $13, meta_parameters_hash = $14, \ updated_at = now() \ WHERE number = $15", - block_metadata.root_hash.as_bytes(), - block_metadata.merkle_root_hash.as_bytes(), - block_metadata.commitment.as_bytes(), - block_metadata.block_meta_params.default_aa_code_hash.as_bytes(), - block_metadata.repeated_writes_compressed, - block_metadata.initial_writes_compressed, - block_metadata.l2_l1_messages_compressed, - block_metadata.l2_l1_merkle_root.as_bytes(), - block_metadata.block_meta_params.zkporter_is_available, - block_metadata.block_meta_params.bootloader_code_hash.as_bytes(), - block_metadata.rollup_last_leaf_index as i64, - block_metadata.aux_data_hash.as_bytes(), - block_metadata.pass_through_data_hash.as_bytes(), - block_metadata.meta_parameters_hash.as_bytes(), - block_number.0 as i64, + metadata.root_hash.as_bytes(), + metadata.merkle_root_hash.as_bytes(), + metadata.commitment.as_bytes(), + metadata.block_meta_params.default_aa_code_hash.as_bytes(), + metadata.repeated_writes_compressed, + metadata.initial_writes_compressed, + metadata.l2_l1_messages_compressed, + metadata.l2_l1_merkle_root.as_bytes(), + metadata.block_meta_params.zkporter_is_available, + metadata.block_meta_params.bootloader_code_hash.as_bytes(), + metadata.rollup_last_leaf_index as i64, + metadata.aux_data_hash.as_bytes(), + metadata.pass_through_data_hash.as_bytes(), + metadata.meta_parameters_hash.as_bytes(), + 0, ) .execute(self.storage.conn()) .await .unwrap(); } - pub async fn save_blocks_metadata( + pub async fn save_l1_batch_metadata( &mut self, - block_number: L1BatchNumber, - block_metadata: &BlockMetadata, + number: L1BatchNumber, + metadata: &L1BatchMetadata, previous_root_hash: H256, ) { - let started_at = Instant::now(); let update_result = sqlx::query!( "UPDATE l1_batches \ SET hash = $1, merkle_root_hash = $2, commitment = $3, \ @@ -355,21 +408,24 @@ impl BlocksDal<'_, '_> { aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, \ updated_at = now() \ WHERE number = $14 AND hash IS NULL", - block_metadata.root_hash.as_bytes(), - block_metadata.merkle_root_hash.as_bytes(), - block_metadata.commitment.as_bytes(), - block_metadata.repeated_writes_compressed, - block_metadata.initial_writes_compressed, - block_metadata.l2_l1_messages_compressed, - block_metadata.l2_l1_merkle_root.as_bytes(), - block_metadata.block_meta_params.zkporter_is_available, - previous_root_hash.0.to_vec(), - block_metadata.rollup_last_leaf_index as i64, - block_metadata.aux_data_hash.as_bytes(), - block_metadata.pass_through_data_hash.as_bytes(), - block_metadata.meta_parameters_hash.as_bytes(), - block_number.0 as i64, + metadata.root_hash.as_bytes(), + metadata.merkle_root_hash.as_bytes(), + metadata.commitment.as_bytes(), + metadata.repeated_writes_compressed, + metadata.initial_writes_compressed, + metadata.l2_l1_messages_compressed, + metadata.l2_l1_merkle_root.as_bytes(), + metadata.block_meta_params.zkporter_is_available, + previous_root_hash.as_bytes(), + metadata.rollup_last_leaf_index as i64, + metadata.aux_data_hash.as_bytes(), + metadata.pass_through_data_hash.as_bytes(), + metadata.meta_parameters_hash.as_bytes(), + number.0 as i64, ) + .instrument("save_blocks_metadata") + .with_arg("number", &number) + .report_latency() .execute(self.storage.conn()) .await .unwrap(); @@ -378,12 +434,12 @@ impl BlocksDal<'_, '_> { vlog::debug!( "L1 batch {} info wasn't updated. Details: root_hash: {:?}, merkle_root_hash: {:?}, \ parent_hash: {:?}, commitment: {:?}, l2_l1_merkle_root: {:?}", - block_number.0 as i64, - block_metadata.root_hash, - block_metadata.merkle_root_hash, + number.0 as i64, + metadata.root_hash, + metadata.merkle_root_hash, previous_root_hash, - block_metadata.commitment, - block_metadata.l2_l1_merkle_root + metadata.commitment, + metadata.l2_l1_merkle_root ); // block was already processed. Verify that existing hashes match @@ -392,12 +448,15 @@ impl BlocksDal<'_, '_> { FROM l1_batches \ WHERE number = $1 AND hash = $2 AND merkle_root_hash = $3 \ AND parent_hash = $4 AND l2_l1_merkle_root = $5", - block_number.0 as i64, - block_metadata.root_hash.as_bytes(), - block_metadata.merkle_root_hash.as_bytes(), + number.0 as i64, + metadata.root_hash.as_bytes(), + metadata.merkle_root_hash.as_bytes(), previous_root_hash.as_bytes(), - block_metadata.l2_l1_merkle_root.as_bytes(), + metadata.l2_l1_merkle_root.as_bytes(), ) + .instrument("get_matching_blocks_metadata") + .with_arg("number", &number) + .report_latency() .fetch_one(self.storage.conn()) .await .unwrap() @@ -407,33 +466,43 @@ impl BlocksDal<'_, '_> { matched == 1, "Root hash verification failed. Hashes for L1 batch #{} do not match the expected values \ (expected state hash: {:?}, L2 to L1 logs hash: {:?})", - block_number, - block_metadata.root_hash, - block_metadata.l2_l1_merkle_root + number, + metadata.root_hash, + metadata.l2_l1_merkle_root ); } - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_blocks_metadata"); } - pub async fn get_last_committed_to_eth_block(&mut self) -> Option { + pub async fn get_last_committed_to_eth_l1_batch(&mut self) -> Option { // We can get 0 block for the first transaction let block = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ + StorageL1Batch, + "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, \ + bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, \ + compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, \ + merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, \ + used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, \ + l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, \ + rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, \ + default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, \ + meta_parameters_hash, protocol_version \ + FROM l1_batches \ WHERE number = 0 OR eth_commit_tx_id IS NOT NULL AND commitment IS NOT NULL \ - ORDER BY number DESC LIMIT 1", + ORDER BY number DESC \ + LIMIT 1", ) + .instrument("get_last_committed_to_eth_l1_batch") .fetch_one(self.storage.conn()) .await .unwrap(); // genesis block is first generated without commitment, we should wait for the tree to set it. block.commitment.as_ref()?; - self.get_block_with_metadata(block).await + self.get_l1_batch_with_metadata(block).await } - /// Returns the number of the last block for which an Ethereum commit tx was sent and confirmed. - pub async fn get_number_of_last_block_committed_on_eth(&mut self) -> Option { + /// Returns the number of the last L1 batch for which an Ethereum commit tx was sent and confirmed. + pub async fn get_number_of_last_l1_batch_committed_on_eth(&mut self) -> Option { sqlx::query!( "SELECT number FROM l1_batches \ LEFT JOIN eth_txs_history AS commit_tx \ @@ -447,7 +516,7 @@ impl BlocksDal<'_, '_> { .map(|row| L1BatchNumber(row.number as u32)) } - /// Returns the number of the last block for which an Ethereum prove tx exists in the database. + /// Returns the number of the last L1 batch for which an Ethereum prove tx exists in the database. pub async fn get_last_l1_batch_with_prove_tx(&mut self) -> L1BatchNumber { let row = sqlx::query!( "SELECT COALESCE(MAX(number), 0) AS \"number!\" \ @@ -461,8 +530,8 @@ impl BlocksDal<'_, '_> { L1BatchNumber(row.number as u32) } - /// Returns the number of the last block for which an Ethereum prove tx was sent and confirmed. - pub async fn get_number_of_last_block_proven_on_eth(&mut self) -> Option { + /// Returns the number of the last L1 batch for which an Ethereum prove tx was sent and confirmed. + pub async fn get_number_of_last_l1_batch_proven_on_eth(&mut self) -> Option { sqlx::query!( "SELECT number FROM l1_batches \ LEFT JOIN eth_txs_history AS prove_tx \ @@ -476,8 +545,8 @@ impl BlocksDal<'_, '_> { .map(|record| L1BatchNumber(record.number as u32)) } - /// Returns the number of the last block for which an Ethereum execute tx was sent and confirmed. - pub async fn get_number_of_last_block_executed_on_eth(&mut self) -> Option { + /// Returns the number of the last L1 batch for which an Ethereum execute tx was sent and confirmed. + pub async fn get_number_of_last_l1_batch_executed_on_eth(&mut self) -> Option { sqlx::query!( "SELECT number FROM l1_batches \ LEFT JOIN eth_txs_history as execute_tx \ @@ -491,18 +560,29 @@ impl BlocksDal<'_, '_> { .map(|row| L1BatchNumber(row.number as u32)) } - /// This method returns blocks that are confirmed on L1. That is, it doesn't wait for the proofs to be generated. - pub async fn get_ready_for_dummy_proof_blocks( + /// This method returns batches that are confirmed on L1. That is, it doesn't wait for the proofs to be generated. + pub async fn get_ready_for_dummy_proof_l1_batches( &mut self, limit: usize, - ) -> Vec { + ) -> Vec { let raw_batches = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ + StorageL1Batch, + "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, \ + bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, \ + compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, \ + merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, \ + used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, \ + l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, \ + rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, \ + default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, \ + meta_parameters_hash, protocol_version \ + FROM l1_batches \ WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL \ ORDER BY number LIMIT $1", limit as i32 ) + .instrument("get_ready_for_dummy_proof_l1_batches") + .with_arg("limit", &limit) .fetch_all(self.storage.conn()) .await .unwrap(); @@ -510,11 +590,14 @@ impl BlocksDal<'_, '_> { self.map_l1_batches(raw_batches).await } - async fn map_l1_batches(&mut self, raw_batches: Vec) -> Vec { + async fn map_l1_batches( + &mut self, + raw_batches: Vec, + ) -> Vec { let mut l1_batches = Vec::with_capacity(raw_batches.len()); for raw_batch in raw_batches { let block = self - .get_block_with_metadata(raw_batch) + .get_l1_batch_with_metadata(raw_batch) .await .expect("Block should be complete"); l1_batches.push(block); @@ -532,25 +615,25 @@ impl BlocksDal<'_, '_> { .unwrap(); } - /// This method returns blocks that are committed on L1 and witness jobs for them are skipped. - pub async fn get_skipped_for_proof_blocks(&mut self, limit: usize) -> Vec { + /// This method returns batches that are committed on L1 and witness jobs for them are skipped. + pub async fn get_skipped_for_proof_l1_batches( + &mut self, + limit: usize, + ) -> Vec { let last_proved_block_number = self.get_last_l1_batch_with_prove_tx().await; // Witness jobs can be processed out of order, so `WHERE l1_batches.number - row_number = $1` // is used to avoid having gaps in the list of blocks to send dummy proofs for. - // We need to manually list all the columns in `l1_batches` table here - we cannot use `*` - // because there is one extra column (`row_number`). let raw_batches = sqlx::query_as!( - StorageBlock, + StorageL1Batch, "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, \ bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, \ - compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, \ - updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, \ - predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, \ + compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, \ + merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, \ used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, \ l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, \ rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, \ default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, \ - meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit \ + meta_parameters_hash, protocol_version \ FROM \ (SELECT l1_batches.*, row_number() OVER (ORDER BY number ASC) AS row_number \ FROM l1_batches \ @@ -563,6 +646,8 @@ impl BlocksDal<'_, '_> { last_proved_block_number.0 as i32, limit as i32 ) + .instrument("get_skipped_for_proof_l1_batches") + .with_arg("limit", &limit) .fetch_all(self.storage.conn()) .await .unwrap(); @@ -570,19 +655,30 @@ impl BlocksDal<'_, '_> { self.map_l1_batches(raw_batches).await } - pub async fn get_ready_for_execute_blocks( + pub async fn get_ready_for_execute_l1_batches( &mut self, limit: usize, max_l1_batch_timestamp_millis: Option, - ) -> Vec { + ) -> Vec { let raw_batches = match max_l1_batch_timestamp_millis { None => sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ + StorageL1Batch, + "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, \ + bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, \ + compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, \ + merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, \ + used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, \ + l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, \ + rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, \ + default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, \ + meta_parameters_hash, protocol_version \ + FROM l1_batches \ WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL \ ORDER BY number LIMIT $1", limit as i32, ) + .instrument("get_ready_for_execute_l1_batches/no_max_timestamp") + .with_arg("limit", &limit) .fetch_all(self.storage.conn()) .await .unwrap(), @@ -591,7 +687,7 @@ impl BlocksDal<'_, '_> { // Do not lose the precision here, otherwise we can skip some L1 batches. // Mostly needed for tests. let max_l1_batch_timestamp_seconds = max_l1_batch_timestamp_millis as f64 / 1_000.0; - self.raw_ready_for_execute_blocks(max_l1_batch_timestamp_seconds, limit) + self.raw_ready_for_execute_l1_batches(max_l1_batch_timestamp_seconds, limit) .await } }; @@ -599,11 +695,11 @@ impl BlocksDal<'_, '_> { self.map_l1_batches(raw_batches).await } - async fn raw_ready_for_execute_blocks( + async fn raw_ready_for_execute_l1_batches( &mut self, max_l1_batch_timestamp_seconds: f64, limit: usize, - ) -> Vec { + ) -> Vec { // We need to find the first L1 batch that is supposed to be executed. // Here we ignore the time delay, so we just take the first L1 batch that is ready for execution. let row = sqlx::query!( @@ -618,6 +714,11 @@ impl BlocksDal<'_, '_> { let Some(row) = row else { return vec![] }; let expected_started_point = row.number; + // After Postgres 12->14 upgrade this field is now f64 + let max_l1_batch_timestamp_seconds_bd = + BigDecimal::from_f64(max_l1_batch_timestamp_seconds) + .expect("Failed to convert f64 to BigDecimal"); + // Find the last L1 batch that is ready for execution. let row = sqlx::query!( "SELECT max(l1_batches.number) FROM l1_batches \ @@ -627,7 +728,7 @@ impl BlocksDal<'_, '_> { AND eth_prove_tx_id IS NOT NULL \ AND eth_execute_tx_id IS NULL \ AND EXTRACT(epoch FROM commit_tx.confirmed_at) < $1", - max_l1_batch_timestamp_seconds, + max_l1_batch_timestamp_seconds_bd, ) .fetch_one(self.storage.conn()) .await @@ -638,14 +739,26 @@ impl BlocksDal<'_, '_> { // the expected started point and the max ready to send block because we send them to the L1 sequentially. assert!(max_ready_to_send_block >= expected_started_point); sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ + StorageL1Batch, + "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, \ + bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, \ + compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, \ + merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, \ + used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, \ + l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, \ + rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, \ + default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, \ + meta_parameters_hash, protocol_version \ + FROM l1_batches \ WHERE number BETWEEN $1 AND $2 \ ORDER BY number LIMIT $3", expected_started_point as i32, max_ready_to_send_block, limit as i32, ) + .instrument("get_ready_for_execute_l1_batches") + .with_arg("numbers", &(expected_started_point..=max_ready_to_send_block)) + .with_arg("limit", &limit) .fetch_all(self.storage.conn()) .await .unwrap() @@ -654,24 +767,42 @@ impl BlocksDal<'_, '_> { } } - pub async fn get_ready_for_commit_blocks( + pub async fn get_ready_for_commit_l1_batches( &mut self, limit: usize, bootloader_hash: H256, default_aa_hash: H256, - ) -> Vec { + protocol_version_id: ProtocolVersionId, + ) -> Vec { let raw_batches = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ + StorageL1Batch, + "SELECT number, l1_batches.timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, \ + bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, \ + compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, \ + merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, \ + used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, \ + l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, \ + rollup_last_leaf_index, zkporter_is_available, l1_batches.bootloader_code_hash, \ + l1_batches.default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, \ + meta_parameters_hash, protocol_version \ + FROM l1_batches \ + JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version \ WHERE eth_commit_tx_id IS NULL \ AND number != 0 \ - AND bootloader_code_hash = $1 AND default_aa_code_hash = $2 \ + AND protocol_versions.bootloader_code_hash = $1 AND protocol_versions.default_account_code_hash = $2 \ AND commitment IS NOT NULL \ - ORDER BY number LIMIT $3", + AND (protocol_versions.id = $3 OR protocol_versions.upgrade_tx_hash IS NULL) \ + ORDER BY number LIMIT $4", bootloader_hash.as_bytes(), default_aa_hash.as_bytes(), + protocol_version_id as i32, limit as i64, ) + .instrument("get_ready_for_commit_l1_batches") + .with_arg("limit", &limit) + .with_arg("bootloader_hash", &bootloader_hash) + .with_arg("default_aa_hash", &default_aa_hash) + .with_arg("protocol_version_id", &protocol_version_id) .fetch_all(self.storage.conn()) .await .unwrap(); @@ -679,7 +810,7 @@ impl BlocksDal<'_, '_> { self.map_l1_batches(raw_batches).await } - pub async fn get_block_state_root(&mut self, number: L1BatchNumber) -> Option { + pub async fn get_l1_batch_state_root(&mut self, number: L1BatchNumber) -> Option { sqlx::query!( "SELECT hash FROM l1_batches WHERE number = $1", number.0 as i64 @@ -691,7 +822,7 @@ impl BlocksDal<'_, '_> { .map(|hash| H256::from_slice(&hash)) } - pub async fn get_block_state_root_and_timestamp( + pub async fn get_l1_batch_state_root_and_timestamp( &mut self, number: L1BatchNumber, ) -> Option<(H256, u64)> { @@ -706,48 +837,47 @@ impl BlocksDal<'_, '_> { Some((H256::from_slice(&row.hash?), row.timestamp as u64)) } - pub async fn get_newest_block_header(&mut self) -> L1BatchHeader { - let last_block = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches ORDER BY number DESC LIMIT 1" + pub async fn get_newest_l1_batch_header(&mut self) -> L1BatchHeader { + let last_l1_batch = sqlx::query_as!( + StorageL1BatchHeader, + "SELECT number, l1_tx_count, l2_tx_count, \ + timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, \ + bloom, priority_ops_onchain_data, \ + used_contract_hashes, base_fee_per_gas, l1_gas_price, \ + l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version \ + FROM l1_batches \ + ORDER BY number DESC \ + LIMIT 1" ) + .instrument("get_newest_l1_batch_header") .fetch_one(self.storage.conn()) .await .unwrap(); - last_block.into() + last_l1_batch.into() } - pub async fn get_block_metadata(&mut self, number: L1BatchNumber) -> Option { - let l1_batch: Option = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches WHERE number = $1", - number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap(); - - if let Some(bl) = l1_batch { - self.get_block_with_metadata(bl).await - } else { - None - } + pub async fn get_l1_batch_metadata( + &mut self, + number: L1BatchNumber, + ) -> Option { + let l1_batch = self.get_storage_l1_batch(number).await?; + self.get_l1_batch_with_metadata(l1_batch).await } - pub async fn get_block_with_metadata( + pub async fn get_l1_batch_with_metadata( &mut self, - storage_block: StorageBlock, - ) -> Option { + storage_batch: StorageL1Batch, + ) -> Option { let unsorted_factory_deps = self - .get_l1_batch_factory_deps(L1BatchNumber(storage_block.number as u32)) + .get_l1_batch_factory_deps(L1BatchNumber(storage_batch.number as u32)) .await; - let block_header = storage_block.clone().into(); - let block_metadata = storage_block.try_into().ok()?; + let header = storage_batch.clone().into(); + let metadata = storage_batch.try_into().ok()?; - Some(BlockWithMetadata::new( - block_header, - block_metadata, + Some(L1BatchWithMetadata::new( + header, + metadata, unsorted_factory_deps, )) } @@ -797,26 +927,25 @@ impl BlocksDal<'_, '_> { .unwrap(); } - /// Returns sum of predicted gas costs or given block range. - /// Panics if the sum doesn't fit into usize. - pub async fn get_blocks_predicted_gas( + /// Returns sum of predicted gas costs on the given L1 batch range. + /// Panics if the sum doesn't fit into `u32`. + pub async fn get_l1_batches_predicted_gas( &mut self, - from_block: L1BatchNumber, - to_block: L1BatchNumber, + number_range: ops::RangeInclusive, op_type: AggregatedActionType, ) -> u32 { let column_name = match op_type { - AggregatedActionType::CommitBlocks => "predicted_commit_gas_cost", - AggregatedActionType::PublishProofBlocksOnchain => "predicted_prove_gas_cost", - AggregatedActionType::ExecuteBlocks => "predicted_execute_gas_cost", + AggregatedActionType::Commit => "predicted_commit_gas_cost", + AggregatedActionType::PublishProofOnchain => "predicted_prove_gas_cost", + AggregatedActionType::Execute => "predicted_execute_gas_cost", }; let sql_query_str = format!( "SELECT COALESCE(SUM({column_name}), 0) AS sum FROM l1_batches \ WHERE number BETWEEN $1 AND $2" ); sqlx::query(&sql_query_str) - .bind(from_block.0 as i64) - .bind(to_block.0 as i64) + .bind(number_range.start().0 as i64) + .bind(number_range.end().0 as i64) .fetch_one(self.storage.conn()) .await .unwrap() @@ -825,16 +954,16 @@ impl BlocksDal<'_, '_> { .expect("Sum of predicted gas costs should fit into u32") } - pub async fn update_predicted_block_commit_gas( + pub async fn update_predicted_l1_batch_commit_gas( &mut self, - l1_batch_number: L1BatchNumber, + number: L1BatchNumber, predicted_gas_cost: u32, ) { sqlx::query!( "UPDATE l1_batches \ SET predicted_commit_gas_cost = $2, updated_at = now() \ WHERE number = $1", - l1_batch_number.0 as i64, + number.0 as i64, predicted_gas_cost as i64 ) .execute(self.storage.conn()) @@ -978,6 +1107,36 @@ impl BlocksDal<'_, '_> { .unwrap() .map(|row| row.timestamp as u64) } + + pub async fn get_batch_protocol_version_id( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> Option { + { + let row = sqlx::query!( + "SELECT protocol_version FROM l1_batches WHERE number = $1", + l1_batch_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + row.protocol_version.map(|v| (v as u16).try_into().unwrap()) + } + } + + pub async fn get_miniblock_timestamp( + &mut self, + miniblock_number: MiniblockNumber, + ) -> Option { + sqlx::query!( + "SELECT timestamp FROM miniblocks WHERE number = $1", + miniblock_number.0 as i64, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| row.timestamp as u64) + } } /// These functions should only be used for tests. @@ -1005,21 +1164,78 @@ impl BlocksDal<'_, '_> { mod tests { use db_test_macro::db_test; use zksync_contracts::BaseSystemContractsHashes; - use zksync_types::Address; + use zksync_types::{l2_to_l1_log::L2ToL1Log, Address, ProtocolVersion, ProtocolVersionId}; use super::*; use crate::ConnectionPool; #[db_test(dal_crate)] - async fn getting_predicted_gas(pool: ConnectionPool) { + async fn loading_l1_batch_header(pool: ConnectionPool) { let mut conn = pool.access_storage().await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; + + let mut header = L1BatchHeader::new( + L1BatchNumber(1), + 100, + Address::default(), + BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(42), + }, + ProtocolVersionId::latest(), + ); + header.l1_tx_count = 3; + header.l2_tx_count = 5; + header.l2_to_l1_logs.push(L2ToL1Log { + shard_id: 0, + is_service: false, + tx_number_in_block: 2, + sender: Address::repeat_byte(2), + key: H256::repeat_byte(3), + value: H256::zero(), + }); + header.l2_to_l1_messages.push(vec![22; 22]); + header.l2_to_l1_messages.push(vec![33; 33]); + + conn.blocks_dal() + .insert_l1_batch(&header, &[], BlockGasCount::default()) + .await; + let loaded_header = conn + .blocks_dal() + .get_l1_batch_header(L1BatchNumber(1)) + .await + .unwrap(); + assert_eq!(loaded_header.number, header.number); + assert_eq!(loaded_header.timestamp, header.timestamp); + assert_eq!(loaded_header.l1_tx_count, header.l1_tx_count); + assert_eq!(loaded_header.l2_tx_count, header.l2_tx_count); + assert_eq!(loaded_header.l2_to_l1_logs, header.l2_to_l1_logs); + assert_eq!(loaded_header.l2_to_l1_messages, header.l2_to_l1_messages); + + assert!(conn + .blocks_dal() + .get_l1_batch_header(L1BatchNumber(2)) + .await + .is_none()); + } + + #[db_test(dal_crate)] + async fn getting_predicted_gas(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; let mut header = L1BatchHeader::new( L1BatchNumber(1), 100, Address::default(), BaseSystemContractsHashes::default(), + ProtocolVersionId::default(), ); let mut predicted_gas = BlockGasCount { commit: 2, @@ -1027,37 +1243,37 @@ mod tests { execute: 10, }; conn.blocks_dal() - .insert_l1_batch(&header, predicted_gas) + .insert_l1_batch(&header, &[], predicted_gas) .await; header.number = L1BatchNumber(2); header.timestamp += 100; predicted_gas += predicted_gas; conn.blocks_dal() - .insert_l1_batch(&header, predicted_gas) + .insert_l1_batch(&header, &[], predicted_gas) .await; let action_types_and_predicted_gas = [ - (AggregatedActionType::ExecuteBlocks, 10), - (AggregatedActionType::CommitBlocks, 2), - (AggregatedActionType::PublishProofBlocksOnchain, 3), + (AggregatedActionType::Execute, 10), + (AggregatedActionType::Commit, 2), + (AggregatedActionType::PublishProofOnchain, 3), ]; for (action_type, expected_gas) in action_types_and_predicted_gas { let gas = conn .blocks_dal() - .get_blocks_predicted_gas(L1BatchNumber(1), L1BatchNumber(1), action_type) + .get_l1_batches_predicted_gas(L1BatchNumber(1)..=L1BatchNumber(1), action_type) .await; assert_eq!(gas, expected_gas); let gas = conn .blocks_dal() - .get_blocks_predicted_gas(L1BatchNumber(2), L1BatchNumber(2), action_type) + .get_l1_batches_predicted_gas(L1BatchNumber(2)..=L1BatchNumber(2), action_type) .await; assert_eq!(gas, 2 * expected_gas); let gas = conn .blocks_dal() - .get_blocks_predicted_gas(L1BatchNumber(1), L1BatchNumber(2), action_type) + .get_l1_batches_predicted_gas(L1BatchNumber(1)..=L1BatchNumber(2), action_type) .await; assert_eq!(gas, 3 * expected_gas); } diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 1321695cfa34..3c492065e02c 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -1,24 +1,26 @@ use bigdecimal::BigDecimal; use sqlx::Row; -use std::time::Instant; - use zksync_config::constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, + ethabi::Address, l2_to_l1_log::L2ToL1Log, vm_trace::Call, web3::types::{BlockHeader, U64}, zk_evm::zkevm_opcode_defs::system_params, Bytes, L1BatchNumber, L2ChainId, MiniblockNumber, H160, H2048, H256, U256, }; -use zksync_utils::{bigdecimal_to_u256, miniblock_hash}; +use zksync_utils::bigdecimal_to_u256; use crate::models::{ - storage_block::{bind_block_where_sql_params, web3_block_number_to_sql, web3_block_where_sql}, + storage_block::{ + bind_block_where_sql_params, web3_block_number_to_sql, web3_block_where_sql, + StorageBlockDetails, StorageL1BatchDetails, + }, storage_transaction::{extract_web3_transaction, web3_transaction_select_sql, CallTrace}, }; -use crate::{SqlxError, StorageProcessor}; +use crate::{instrument::InstrumentExt, SqlxError, StorageProcessor}; const BLOCK_GAS_LIMIT: u32 = system_params::VM_INITIAL_FRAME_ERGS; @@ -29,24 +31,24 @@ pub struct BlocksWeb3Dal<'a, 'c> { impl BlocksWeb3Dal<'_, '_> { pub async fn get_sealed_miniblock_number(&mut self) -> Result { - let started_at = Instant::now(); - let number: i64 = sqlx::query!("SELECT MAX(number) as \"number\" FROM miniblocks") + let number = sqlx::query!("SELECT MAX(number) as \"number\" FROM miniblocks") + .instrument("get_sealed_block_number") + .report_latency() .fetch_one(self.storage.conn()) .await? .number .expect("DAL invocation before genesis"); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); Ok(MiniblockNumber(number as u32)) } pub async fn get_sealed_l1_batch_number(&mut self) -> Result { - let started_at = Instant::now(); - let number: i64 = sqlx::query!("SELECT MAX(number) as \"number\" FROM l1_batches") + let number = sqlx::query!("SELECT MAX(number) as \"number\" FROM l1_batches") + .instrument("get_sealed_block_number") + .report_latency() .fetch_one(self.storage.conn()) .await? .number .expect("DAL invocation before genesis"); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); Ok(L1BatchNumber(number as u32)) } @@ -69,11 +71,14 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.l1_batch_number, miniblocks.timestamp, miniblocks.base_fee_per_gas, + prev_miniblock.hash as parent_hash, l1_batches.timestamp as l1_batch_timestamp, transactions.gas_limit as gas_limit, transactions.refunded_gas as refunded_gas, {} FROM miniblocks + LEFT JOIN miniblocks prev_miniblock + ON prev_miniblock.number = miniblocks.number - 1 LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number LEFT JOIN transactions @@ -102,10 +107,9 @@ impl BlocksWeb3Dal<'_, '_> { .try_get::("l1_batch_timestamp") .map(U256::from) .ok(); - let parent_hash = match number.as_u32() { - 0 => H256::zero(), - number => miniblock_hash(MiniblockNumber(number - 1)), - }; + let parent_hash = db_row + .try_get("parent_hash") + .map_or_else(|_| H256::zero(), H256::from_slice); let base_fee_per_gas = db_row.get::("base_fee_per_gas"); api::Block { @@ -350,12 +354,125 @@ impl BlocksWeb3Dal<'_, '_> { .map(Call::from) .collect() } + + /// Returns `base_fee_per_gas` for miniblock range [min(newest_block - block_count + 1, 0), newest_block] + /// in descending order of miniblock numbers. + pub async fn get_fee_history( + &mut self, + newest_block: MiniblockNumber, + block_count: u64, + ) -> Result, SqlxError> { + let result: Vec<_> = sqlx::query!( + "SELECT base_fee_per_gas FROM miniblocks \ + WHERE number <= $1 \ + ORDER BY number DESC LIMIT $2", + newest_block.0 as i64, + block_count as i64 + ) + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| bigdecimal_to_u256(row.base_fee_per_gas)) + .collect(); + + Ok(result) + } + + pub async fn get_block_details( + &mut self, + block_number: MiniblockNumber, + current_operator_address: Address, + ) -> Result, SqlxError> { + { + let storage_block_details = sqlx::query_as!( + StorageBlockDetails, + r#" + SELECT miniblocks.number, + COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as "l1_batch_number!", + miniblocks.timestamp, + miniblocks.l1_tx_count, + miniblocks.l2_tx_count, + miniblocks.hash as "root_hash?", + commit_tx.tx_hash as "commit_tx_hash?", + commit_tx.confirmed_at as "committed_at?", + prove_tx.tx_hash as "prove_tx_hash?", + prove_tx.confirmed_at as "proven_at?", + execute_tx.tx_hash as "execute_tx_hash?", + execute_tx.confirmed_at as "executed_at?", + miniblocks.l1_gas_price, + miniblocks.l2_fair_gas_price, + miniblocks.bootloader_code_hash, + miniblocks.default_aa_code_hash, + miniblocks.protocol_version, + l1_batches.fee_account_address as "fee_account_address?" + FROM miniblocks + LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number + LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) + LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) + LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) + WHERE miniblocks.number = $1 + "#, + block_number.0 as i64 + ) + .instrument("get_block_details") + .with_arg("block_number", &block_number) + .report_latency() + .fetch_optional(self.storage.conn()) + .await?; + + Ok(storage_block_details.map(|storage_block_details| { + storage_block_details.into_block_details(current_operator_address) + })) + } + } + + pub async fn get_l1_batch_details( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> Result, SqlxError> { + { + let l1_batch_details: Option = sqlx::query_as!( + StorageL1BatchDetails, + r#" + SELECT l1_batches.number, + l1_batches.timestamp, + l1_batches.l1_tx_count, + l1_batches.l2_tx_count, + l1_batches.hash as "root_hash?", + commit_tx.tx_hash as "commit_tx_hash?", + commit_tx.confirmed_at as "committed_at?", + prove_tx.tx_hash as "prove_tx_hash?", + prove_tx.confirmed_at as "proven_at?", + execute_tx.tx_hash as "execute_tx_hash?", + execute_tx.confirmed_at as "executed_at?", + l1_batches.l1_gas_price, + l1_batches.l2_fair_gas_price, + l1_batches.bootloader_code_hash, + l1_batches.default_aa_code_hash + FROM l1_batches + LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) + LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) + LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) + WHERE l1_batches.number = $1 + "#, + l1_batch_number.0 as i64 + ) + .instrument("get_l1_batch_details") + .with_arg("l1_batch_number", &l1_batch_number) + .report_latency() + .fetch_optional(self.storage.conn()) + .await?; + + Ok(l1_batch_details.map(api::L1BatchDetails::from)) + } + } } #[cfg(test)] mod tests { use db_test_macro::db_test; - use zksync_types::{block::MiniblockHeader, MiniblockNumber}; + use zksync_types::{block::MiniblockHeader, MiniblockNumber, ProtocolVersion}; + use zksync_utils::miniblock_hash; use super::*; use crate::{tests::create_miniblock_header, ConnectionPool}; @@ -366,6 +483,9 @@ mod tests { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; let header = MiniblockHeader { l1_tx_count: 3, l2_tx_count: 5, @@ -430,6 +550,9 @@ mod tests { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(0)) .await; @@ -479,6 +602,9 @@ mod tests { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(0)) .await; diff --git a/core/lib/dal/src/connection/mod.rs b/core/lib/dal/src/connection/mod.rs index 9019d1213fb6..b30d80180e03 100644 --- a/core/lib/dal/src/connection/mod.rs +++ b/core/lib/dal/src/connection/mod.rs @@ -1,47 +1,117 @@ +// External imports +use sqlx::{ + pool::PoolConnection, + postgres::{PgConnectOptions, PgPool, PgPoolOptions, Postgres}, +}; // Built-in deps use std::time::{Duration, Instant}; -// External imports -use sqlx::pool::PoolConnection; -use sqlx::postgres::{PgPool, PgPoolOptions, Postgres}; +// Workspace imports +use zksync_utils::parse_env; // Local imports use crate::{ get_master_database_url, get_prover_database_url, get_replica_database_url, StorageProcessor, }; -use zksync_utils::parse_env; - -pub use self::test_pool::TestPool; pub mod holder; pub mod test_pool; -#[derive(Clone, Debug)] -pub enum ConnectionPool { - Real(PgPool), - Test(TestPool), -} +pub use self::test_pool::TestPool; -#[derive(Clone, Debug)] +#[derive(Debug, Clone, Copy)] pub enum DbVariant { Master, Replica, Prover, } -impl ConnectionPool { - /// Establishes a pool of the connections to the database and - /// creates a new `ConnectionPool` object. - /// pool_max_size - number of connections in pool, if not set env variable "DATABASE_POOL_SIZE" is going to be used. - pub async fn new(pool_max_size: Option, db: DbVariant) -> Self { - let database_url = match db { +/// Builder for [`ConnectionPool`]s. +#[derive(Debug)] +pub struct ConnectionPoolBuilder { + db: DbVariant, + max_size: Option, + statement_timeout: Option, +} + +impl ConnectionPoolBuilder { + /// Sets the maximum size of the created pool. If not specified, the max pool size will be + /// taken from the `DATABASE_POOL_SIZE` env variable. + pub fn set_max_size(&mut self, max_size: Option) -> &mut Self { + self.max_size = max_size; + self + } + + /// Sets the statement timeout for the pool. See [Postgres docs] for semantics. + /// If not specified, the statement timeout will not be set. + /// + /// [Postgres docs]: https://www.postgresql.org/docs/14/runtime-config-client.html + pub fn set_statement_timeout(&mut self, timeout: Option) -> &mut Self { + self.statement_timeout = timeout; + self + } + + /// Builds a connection pool from this builder. + pub async fn build(&self) -> ConnectionPool { + let database_url = match self.db { DbVariant::Master => get_master_database_url(), DbVariant::Replica => get_replica_database_url(), DbVariant::Prover => get_prover_database_url(), }; - let max_connections = pool_max_size.unwrap_or_else(|| parse_env("DATABASE_POOL_SIZE")); + self.build_inner(&database_url).await + } + + pub async fn build_inner(&self, database_url: &str) -> ConnectionPool { + let max_connections = self + .max_size + .unwrap_or_else(|| parse_env("DATABASE_POOL_SIZE")); let options = PgPoolOptions::new().max_connections(max_connections); - let pool = options.connect(&database_url).await.unwrap(); - Self::Real(pool) + let mut connect_options: PgConnectOptions = database_url.parse().unwrap_or_else(|err| { + panic!("Failed parsing {:?} database URL: {}", self.db, err); + }); + if let Some(timeout) = self.statement_timeout { + let timeout_string = format!("{}s", timeout.as_secs()); + connect_options = connect_options.options([("statement_timeout", timeout_string)]); + } + let pool = options + .connect_with(connect_options) + .await + .unwrap_or_else(|err| { + panic!("Failed connecting to {:?} database: {}", self.db, err); + }); + vlog::info!( + "Created pool for {db:?} database with {max_connections} max connections \ + and {statement_timeout:?} statement timeout", + db = self.db, + statement_timeout = self.statement_timeout + ); + ConnectionPool::Real(pool) + } +} + +#[derive(Debug, Clone)] +pub enum ConnectionPool { + Real(PgPool), + Test(TestPool), +} + +impl ConnectionPool { + /// Initializes a builder for connection pools. + pub fn builder(db: DbVariant) -> ConnectionPoolBuilder { + ConnectionPoolBuilder { + db, + max_size: None, + statement_timeout: None, + } + } + + /// Initializes a builder for connection pools with a single connection. This is equivalent + /// to calling `Self::builder(db).set_max_size(Some(1))`. + pub fn singleton(db: DbVariant) -> ConnectionPoolBuilder { + ConnectionPoolBuilder { + db, + max_size: Some(1), + statement_timeout: None, + } } /// Creates a `StorageProcessor` entity over a recoverable connection. @@ -82,26 +152,47 @@ impl ConnectionPool { async fn acquire_connection_retried(pool: &PgPool) -> PoolConnection { const DB_CONNECTION_RETRIES: u32 = 3; + const BACKOFF_INTERVAL: Duration = Duration::from_secs(1); let mut retry_count = 0; - while retry_count < DB_CONNECTION_RETRIES { metrics::histogram!("sql.connection_pool.size", pool.size() as f64); metrics::histogram!("sql.connection_pool.idle", pool.num_idle() as f64); let connection = pool.acquire().await; - match connection { + let connection_err = match connection { Ok(connection) => return connection, - Err(_) => retry_count += 1, - } + Err(err) => { + retry_count += 1; + err + } + }; - // Backing off for one second if facing an error - vlog::warn!("Failed to get connection to db. Backing off for 1 second"); - tokio::time::sleep(Duration::from_secs(1)).await; + Self::report_connection_error(&connection_err); + vlog::warn!( + "Failed to get connection to DB, backing off for {BACKOFF_INTERVAL:?}: {connection_err}" + ); + tokio::time::sleep(BACKOFF_INTERVAL).await; } // Attempting to get the pooled connection for the last time - pool.acquire().await.unwrap() + pool.acquire().await.unwrap_or_else(|connection_err| { + Self::report_connection_error(&connection_err); + panic!( + "Run out of retries getting a DB connection; last error: {}", + connection_err + ); + }) + } + + fn report_connection_error(err: &sqlx::Error) { + let kind = match err { + sqlx::Error::PoolTimedOut => "timeout", + sqlx::Error::Database(_) => "database", + sqlx::Error::Io(_) => "io", + _ => "other", + }; + metrics::increment_counter!("sql.connection_pool.acquire_error", "kind" => kind); } pub async fn access_test_storage(&self) -> StorageProcessor<'static> { @@ -113,3 +204,34 @@ impl ConnectionPool { } } } + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::*; + use crate::get_test_database_url; + + #[tokio::test] + async fn setting_statement_timeout() { + // We cannot use an ordinary test pool here because it isn't created using `ConnectionPoolBuilder`. + // Since we don't need to mutate the DB for the test, using a real DB connection is OK. + let database_url = get_test_database_url(); + let pool = ConnectionPool::builder(DbVariant::Master) + .set_statement_timeout(Some(Duration::from_secs(1))) + .build_inner(&database_url) + .await; + + // NB. We must not mutate the database below! Doing so may break other tests. + let mut conn = pool.access_storage().await; + let err = sqlx::query("SELECT pg_sleep(2)") + .map(drop) + .fetch_optional(conn.conn()) + .await + .unwrap_err(); + assert_matches!( + err, + sqlx::Error::Database(db_err) if db_err.message().contains("statement timeout") + ); + } +} diff --git a/core/lib/dal/src/explorer/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs similarity index 95% rename from core/lib/dal/src/explorer/contract_verification_dal.rs rename to core/lib/dal/src/contract_verification_dal.rs index 02b47eee53e4..c635d169218b 100644 --- a/core/lib/dal/src/explorer/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -2,7 +2,7 @@ use std::fmt::{Display, Formatter}; use std::time::Duration; use zksync_types::{ - explorer_api::{ + contract_verification_api::{ DeployContractCalldata, VerificationIncomingRequest, VerificationInfo, VerificationRequest, VerificationRequestStatus, }, @@ -17,7 +17,7 @@ use crate::StorageProcessor; #[derive(Debug)] pub struct ContractVerificationDal<'a, 'c> { - pub(super) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } #[derive(Debug)] @@ -407,4 +407,20 @@ impl ContractVerificationDal<'_, '_> { Ok(result) } } + + pub async fn get_contract_verification_info( + &mut self, + address: Address, + ) -> Result, SqlxError> { + let row = sqlx::query!( + "SELECT verification_info FROM contracts_verification_info WHERE address = $1", + address.as_bytes(), + ) + .fetch_optional(self.storage.conn()) + .await?; + Ok(row.and_then(|row| { + row.verification_info + .map(|info| serde_json::from_value(info).unwrap()) + })) + } } diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 7505d0e0c939..74469f831195 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -63,9 +63,9 @@ impl EthSenderDal<'_, '_> { for record in records { let batch_number = L1BatchNumber(record.get::("number") as u32); let aggregation_action = match tx_type { - "execute_tx" => AggregatedActionType::ExecuteBlocks, - "commit_tx" => AggregatedActionType::CommitBlocks, - "prove_tx" => AggregatedActionType::PublishProofBlocksOnchain, + "execute_tx" => AggregatedActionType::Execute, + "commit_tx" => AggregatedActionType::Commit, + "prove_tx" => AggregatedActionType::PublishProofOnchain, _ => unreachable!(), }; if record.get::("confirmed") { @@ -357,7 +357,7 @@ impl EthSenderDal<'_, '_> { super::BlocksDal { storage: &mut transaction, } - .set_eth_tx_id(l1_batch, l1_batch, eth_tx_id as u32, tx_type) + .set_eth_tx_id(l1_batch..=l1_batch, eth_tx_id as u32, tx_type) .await; transaction.commit().await; diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 82141315f03f..9550a68dd62b 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -197,7 +197,7 @@ mod tests { use super::*; use crate::{tests::create_miniblock_header, ConnectionPool}; use db_test_macro::db_test; - use zksync_types::{Address, L1BatchNumber}; + use zksync_types::{Address, L1BatchNumber, ProtocolVersion}; fn create_vm_event(index: u8, topic_count: u8) -> VmEvent { assert!(topic_count <= 4); @@ -216,6 +216,9 @@ mod tests { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(1)) .await; @@ -288,6 +291,9 @@ mod tests { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(1)) .await; diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 83edea20a152..2142a66a4ba5 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -1,16 +1,15 @@ -use std::time::Instant; - use sqlx::Row; -use crate::models::storage_block::web3_block_number_to_sql; use zksync_types::{ api::{GetLogsFilter, Log}, - MiniblockNumber, + Address, MiniblockNumber, H256, }; -use crate::models::storage_event::StorageWeb3Log; -use crate::SqlxError; -use crate::StorageProcessor; +use crate::{ + instrument::InstrumentExt, + models::{storage_block::web3_block_number_to_sql, storage_event::StorageWeb3Log}, + SqlxError, StorageProcessor, +}; #[derive(Debug)] pub struct EventsWeb3Dal<'a, 'c> { @@ -26,7 +25,6 @@ impl EventsWeb3Dal<'_, '_> { offset: usize, ) -> Result, SqlxError> { { - let started_at = Instant::now(); let (where_sql, arg_index) = self.build_get_logs_where_clause(&filter); let query = format!( @@ -43,23 +41,23 @@ impl EventsWeb3Dal<'_, '_> { let mut query = sqlx::query(&query); if !filter.addresses.is_empty() { - let addresses: Vec<_> = filter - .addresses - .into_iter() - .map(|address| address.0.to_vec()) - .collect(); + let addresses: Vec<_> = filter.addresses.iter().map(Address::as_bytes).collect(); query = query.bind(addresses); } - for (_, topics) in filter.topics { - let topics: Vec<_> = topics.into_iter().map(|topic| topic.0.to_vec()).collect(); + for (_, topics) in &filter.topics { + let topics: Vec<_> = topics.iter().map(H256::as_bytes).collect(); query = query.bind(topics); } query = query.bind(offset as i32); - let log = query.fetch_optional(self.storage.conn()).await?; - - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_log_block_number"); + let log = query + .instrument("get_log_block_number") + .report_latency() + .with_arg("filter", &filter) + .with_arg("offset", &offset) + .fetch_optional(self.storage.conn()) + .await?; - Ok(log.map(|row| MiniblockNumber(row.get::("miniblock_number") as u32))) + Ok(log.map(|row| MiniblockNumber(row.get::("miniblock_number") as u32))) } } @@ -71,7 +69,6 @@ impl EventsWeb3Dal<'_, '_> { limit: usize, ) -> Result, SqlxError> { { - let started_at = Instant::now(); let (where_sql, arg_index) = self.build_get_logs_where_clause(&filter); let query = format!( @@ -96,22 +93,23 @@ impl EventsWeb3Dal<'_, '_> { let mut query = sqlx::query_as(&query); if !filter.addresses.is_empty() { - let addresses: Vec<_> = filter - .addresses - .into_iter() - .map(|address| address.0.to_vec()) - .collect(); + let addresses: Vec<_> = filter.addresses.iter().map(Address::as_bytes).collect(); query = query.bind(addresses); } - for (_, topics) in filter.topics { - let topics: Vec<_> = topics.into_iter().map(|topic| topic.0.to_vec()).collect(); + for (_, topics) in &filter.topics { + let topics: Vec<_> = topics.iter().map(H256::as_bytes).collect(); query = query.bind(topics); } query = query.bind(limit as i32); - let db_logs: Vec = query.fetch_all(self.storage.conn()).await?; + let db_logs: Vec = query + .instrument("get_logs") + .report_latency() + .with_arg("filter", &filter) + .with_arg("limit", &limit) + .fetch_all(self.storage.conn()) + .await?; let logs = db_logs.into_iter().map(Into::into).collect(); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_logs"); Ok(logs) } } diff --git a/core/lib/dal/src/explorer/explorer_accounts_dal.rs b/core/lib/dal/src/explorer/explorer_accounts_dal.rs deleted file mode 100644 index b3b4e0c6c3f0..000000000000 --- a/core/lib/dal/src/explorer/explorer_accounts_dal.rs +++ /dev/null @@ -1,151 +0,0 @@ -use std::collections::HashMap; - -use zksync_types::{ - api, - explorer_api::{AccountType, BalanceItem, ExplorerTokenInfo}, - get_code_key, - tokens::ETHEREUM_ADDRESS, - utils::storage_key_for_standard_token_balance, - AccountTreeId, Address, Nonce, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, L2_ETH_TOKEN_ADDRESS, - U256, -}; - -use crate::{SqlxError, StorageProcessor}; - -#[derive(Debug)] -pub struct ExplorerAccountsDal<'a, 'c> { - pub(super) storage: &'a mut StorageProcessor<'c>, -} - -impl ExplorerAccountsDal<'_, '_> { - pub async fn get_balances_for_address( - &mut self, - address: Address, - ) -> Result, SqlxError> { - { - let token_l2_addresses = self - .storage - .explorer() - .misc_dal() - .get_well_known_token_l2_addresses() - .await?; - let hashed_keys: Vec> = token_l2_addresses - .into_iter() - .map(|mut l2_token_address| { - if l2_token_address == ETHEREUM_ADDRESS { - l2_token_address = L2_ETH_TOKEN_ADDRESS; - } - storage_key_for_standard_token_balance( - AccountTreeId::new(l2_token_address), - &address, - ) - .hashed_key() - .0 - .to_vec() - }) - .collect(); - let rows = sqlx::query!( - r#" - SELECT storage.value as "value!", - tokens.l1_address as "l1_address!", tokens.l2_address as "l2_address!", - tokens.symbol as "symbol!", tokens.name as "name!", tokens.decimals as "decimals!", tokens.usd_price as "usd_price?" - FROM storage - INNER JOIN tokens ON - storage.address = tokens.l2_address OR (storage.address = $2 AND tokens.l2_address = $3) - WHERE storage.hashed_key = ANY($1) - "#, - &hashed_keys, - L2_ETH_TOKEN_ADDRESS.as_bytes(), - ETHEREUM_ADDRESS.as_bytes(), - ) - .fetch_all(self.storage.conn()) - .await?; - let result = rows - .into_iter() - .filter_map(|row| { - let balance = U256::from_big_endian(&row.value); - if balance.is_zero() { - None - } else { - let l2_address = Address::from_slice(&row.l2_address); - let token_info = ExplorerTokenInfo { - l1_address: Address::from_slice(&row.l1_address), - l2_address, - address: l2_address, - symbol: row.symbol, - name: row.name, - decimals: row.decimals as u8, - usd_price: row.usd_price, - }; - let balance_item = BalanceItem { - token_info, - balance, - }; - Some((l2_address, balance_item)) - } - }) - .collect(); - Ok(result) - } - } - - /// Returns sealed and verified nonces for address. - pub async fn get_account_nonces( - &mut self, - address: Address, - ) -> Result<(Nonce, Nonce), SqlxError> { - let latest_block_number = self - .storage - .blocks_web3_dal() - .resolve_block_id(api::BlockId::Number(api::BlockNumber::Latest)) - .await? - .unwrap(); - let sealed_nonce = self - .storage - .storage_web3_dal() - .get_address_historical_nonce(address, latest_block_number) - .await? - .as_u32(); - - let finalized_block_number = self - .storage - .blocks_web3_dal() - .resolve_block_id(api::BlockId::Number(api::BlockNumber::Finalized)) - .await? - .unwrap(); // Safe: we always have at least the genesis miniblock finalized - let verified_nonce = self - .storage - .storage_web3_dal() - .get_address_historical_nonce(address, finalized_block_number) - .await? - .as_u32(); - Ok((Nonce(sealed_nonce), Nonce(verified_nonce))) - } - - pub async fn get_account_type(&mut self, address: Address) -> Result { - let hashed_key = get_code_key(&address).hashed_key(); - { - let contract_exists = sqlx::query!( - r#" - SELECT true as "exists" - FROM ( - SELECT * FROM storage_logs - WHERE hashed_key = $1 - ORDER BY miniblock_number DESC, operation_number DESC - LIMIT 1 - ) sl - WHERE sl.value != $2 - "#, - hashed_key.as_bytes(), - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes() - ) - .fetch_optional(self.storage.conn()) - .await?; - let result = match contract_exists { - Some(_) => AccountType::Contract, - None => AccountType::EOA, - }; - Ok(result) - } - } -} diff --git a/core/lib/dal/src/explorer/explorer_blocks_dal.rs b/core/lib/dal/src/explorer/explorer_blocks_dal.rs deleted file mode 100644 index 72b96785f7a4..000000000000 --- a/core/lib/dal/src/explorer/explorer_blocks_dal.rs +++ /dev/null @@ -1,187 +0,0 @@ -use std::time::Instant; - -use zksync_types::explorer_api::{ - BlockDetails, BlockPageItem, BlocksQuery, L1BatchDetails, L1BatchPageItem, L1BatchesQuery, - PaginationDirection, -}; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber}; - -use crate::models::storage_block::{ - block_page_item_from_storage, l1_batch_page_item_from_storage, StorageBlockDetails, - StorageL1BatchDetails, -}; -use crate::SqlxError; -use crate::StorageProcessor; - -#[derive(Debug)] -pub struct ExplorerBlocksDal<'a, 'c> { - pub(super) storage: &'a mut StorageProcessor<'c>, -} - -impl ExplorerBlocksDal<'_, '_> { - pub async fn get_blocks_page( - &mut self, - query: BlocksQuery, - last_verified: MiniblockNumber, - ) -> Result, SqlxError> { - { - let (cmp_sign, order_str) = match query.pagination.direction { - PaginationDirection::Older => ("<", "DESC"), - PaginationDirection::Newer => (">", "ASC"), - }; - let cmp_str = if query.from.is_some() { - format!("WHERE miniblocks.number {} $3", cmp_sign) - } else { - "".to_string() - }; - let sql_query_str = format!( - " - SELECT number, l1_tx_count, l2_tx_count, hash, timestamp FROM miniblocks - {} - ORDER BY miniblocks.number {} - LIMIT $1 - OFFSET $2 - ", - cmp_str, order_str - ); - - let mut sql_query = sqlx::query_as(&sql_query_str).bind(query.pagination.limit as i32); - sql_query = sql_query.bind(query.pagination.offset as i32); - if let Some(from) = query.from { - sql_query = sql_query.bind(from.0 as i64); - } - let result = sql_query - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| block_page_item_from_storage(row, last_verified)) - .collect(); - Ok(result) - } - } - - pub async fn get_block_details( - &mut self, - block_number: MiniblockNumber, - current_operator_address: Address, - ) -> Result, SqlxError> { - { - let started_at = Instant::now(); - let storage_block_details: Option = sqlx::query_as!( - StorageBlockDetails, - r#" - SELECT miniblocks.number, - COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as "l1_batch_number!", - miniblocks.timestamp, - miniblocks.l1_tx_count, - miniblocks.l2_tx_count, - miniblocks.hash as "root_hash?", - commit_tx.tx_hash as "commit_tx_hash?", - commit_tx.confirmed_at as "committed_at?", - prove_tx.tx_hash as "prove_tx_hash?", - prove_tx.confirmed_at as "proven_at?", - execute_tx.tx_hash as "execute_tx_hash?", - execute_tx.confirmed_at as "executed_at?", - miniblocks.l1_gas_price, - miniblocks.l2_fair_gas_price, - miniblocks.bootloader_code_hash, - miniblocks.default_aa_code_hash, - l1_batches.fee_account_address as "fee_account_address?" - FROM miniblocks - LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number - LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) - WHERE miniblocks.number = $1 - "#, - block_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await?; - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "explorer_get_block_details"); - Ok(storage_block_details.map(|storage_block_details| { - storage_block_details.into_block_details(current_operator_address) - })) - } - } - - pub async fn get_l1_batches_page( - &mut self, - query: L1BatchesQuery, - last_verified: L1BatchNumber, - ) -> Result, SqlxError> { - { - let (cmp_sign, order_str) = match query.pagination.direction { - PaginationDirection::Older => ("<", "DESC"), - PaginationDirection::Newer => (">", "ASC"), - }; - let cmp_str = if query.from.is_some() { - format!("AND l1_batches.number {} $3", cmp_sign) - } else { - "".to_string() - }; - let sql_query_str = format!( - " - SELECT number, l1_tx_count, l2_tx_count, hash, timestamp FROM l1_batches - WHERE l1_batches.hash IS NOT NULL {} - ORDER BY l1_batches.number {} - LIMIT $1 - OFFSET $2 - ", - cmp_str, order_str - ); - - let mut sql_query = sqlx::query_as(&sql_query_str).bind(query.pagination.limit as i32); - sql_query = sql_query.bind(query.pagination.offset as i32); - if let Some(from) = query.from { - sql_query = sql_query.bind(from.0 as i64); - } - let result = sql_query - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| l1_batch_page_item_from_storage(row, last_verified)) - .collect(); - Ok(result) - } - } - - pub async fn get_l1_batch_details( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> Result, SqlxError> { - { - let started_at = Instant::now(); - let l1_batch_details: Option = sqlx::query_as!( - StorageL1BatchDetails, - r#" - SELECT l1_batches.number, - l1_batches.timestamp, - l1_batches.l1_tx_count, - l1_batches.l2_tx_count, - l1_batches.hash as "root_hash?", - commit_tx.tx_hash as "commit_tx_hash?", - commit_tx.confirmed_at as "committed_at?", - prove_tx.tx_hash as "prove_tx_hash?", - prove_tx.confirmed_at as "proven_at?", - execute_tx.tx_hash as "execute_tx_hash?", - execute_tx.confirmed_at as "executed_at?", - l1_batches.l1_gas_price, - l1_batches.l2_fair_gas_price, - l1_batches.bootloader_code_hash, - l1_batches.default_aa_code_hash - FROM l1_batches - LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) - WHERE l1_batches.number = $1 - "#, - l1_batch_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await?; - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "explorer_get_l1_batch_details"); - Ok(l1_batch_details.map(L1BatchDetails::from)) - } - } -} diff --git a/core/lib/dal/src/explorer/explorer_events_dal.rs b/core/lib/dal/src/explorer/explorer_events_dal.rs deleted file mode 100644 index 02db9bb90fc3..000000000000 --- a/core/lib/dal/src/explorer/explorer_events_dal.rs +++ /dev/null @@ -1,117 +0,0 @@ -use zksync_types::api::Log; -use zksync_types::explorer_api::{EventsQuery, EventsResponse, PaginationDirection}; - -use sqlx::Row; - -use crate::models::storage_event::StorageWeb3Log; -use crate::{SqlxError, StorageProcessor}; - -#[derive(Debug)] -pub struct ExplorerEventsDal<'a, 'c> { - pub(super) storage: &'a mut StorageProcessor<'c>, -} - -impl ExplorerEventsDal<'_, '_> { - pub async fn get_events_page( - &mut self, - query: EventsQuery, - max_total: usize, - ) -> Result { - { - let (cmp_sign, order_str) = match query.pagination.direction { - PaginationDirection::Older => ("<", "DESC"), - PaginationDirection::Newer => (">", "ASC"), - }; - - let mut filters = Vec::new(); - let mut bind_index = 1usize; - if query.from_block_number.is_some() { - filters.push(format!( - "(events.miniblock_number {} ${})", - cmp_sign, bind_index - )); - bind_index += 1; - } - if query.contract_address.is_some() { - filters.push(format!("(events.address = ${})", bind_index)); - bind_index += 1; - } - let filters: String = if !filters.is_empty() { - format!("WHERE {}", filters.join(" AND ")) - } else { - "".to_string() - }; - - let ordering = format!( - "events.miniblock_number {0}, events.event_index_in_block {0}", - order_str - ); - let sql_list_query_str = format!( - r#" - SELECT events.*, miniblocks.hash as "block_hash", miniblocks.l1_batch_number - FROM ( - SELECT address, topic1, topic2, topic3, topic4, value, - miniblock_number, tx_hash, tx_index_in_block, - event_index_in_block, event_index_in_tx - FROM events - {0} - ORDER BY {1} - LIMIT ${2} - OFFSET ${3} - ) as events - JOIN miniblocks ON events.miniblock_number = miniblocks.number - ORDER BY {1} - "#, - filters, - ordering, - bind_index, - bind_index + 1 - ); - - let mut sql_query = sqlx::query_as(&sql_list_query_str); - if let Some(block_number) = query.from_block_number { - sql_query = sql_query.bind(block_number.0 as i64); - } - if let Some(contract_address) = query.contract_address { - sql_query = sql_query.bind(contract_address.0.to_vec()); - } - sql_query = sql_query - .bind(query.pagination.limit as i64) - .bind(query.pagination.offset as i64); - - let storage_web3_logs: Vec = - sql_query.fetch_all(self.storage.conn()).await?; - let logs = storage_web3_logs.into_iter().map(Log::from).collect(); - - let sql_count_query_str = format!( - r#" - SELECT COUNT(*) as "count" FROM ( - SELECT true - FROM events - {0} - LIMIT ${1} - ) AS c - "#, - filters, bind_index - ); - - let mut sql_query = sqlx::query(&sql_count_query_str); - if let Some(block_number) = query.from_block_number { - sql_query = sql_query.bind(block_number.0 as i64); - } - if let Some(contract_address) = query.contract_address { - sql_query = sql_query.bind(contract_address.0.to_vec()); - } - sql_query = sql_query.bind(max_total as i64); - - let total = sql_query - .fetch_one(self.storage.conn()) - .await? - .get::("count"); - Ok(EventsResponse { - list: logs, - total: total as usize, - }) - } - } -} diff --git a/core/lib/dal/src/explorer/explorer_misc_dal.rs b/core/lib/dal/src/explorer/explorer_misc_dal.rs deleted file mode 100644 index 2946fe4d7b17..000000000000 --- a/core/lib/dal/src/explorer/explorer_misc_dal.rs +++ /dev/null @@ -1,116 +0,0 @@ -use crate::explorer::storage_contract_info::StorageContractInfo; -use crate::SqlxError; -use crate::StorageProcessor; -use zksync_types::{ - explorer_api::{ContractBasicInfo, ContractStats, ExplorerTokenInfo}, - get_code_key, Address, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, -}; - -#[derive(Debug)] -pub struct ExplorerMiscDal<'a, 'c> { - pub(super) storage: &'a mut StorageProcessor<'c>, -} - -impl ExplorerMiscDal<'_, '_> { - pub async fn get_token_details( - &mut self, - address: Address, - ) -> Result, SqlxError> { - { - let row = sqlx::query!( - r#" - SELECT l1_address, l2_address, symbol, name, decimals, usd_price - FROM tokens - WHERE l2_address = $1 - "#, - address.as_bytes() - ) - .fetch_optional(self.storage.conn()) - .await?; - let result = row.map(|row| ExplorerTokenInfo { - l1_address: Address::from_slice(&row.l1_address), - l2_address: Address::from_slice(&row.l2_address), - address: Address::from_slice(&row.l2_address), - symbol: row.symbol, - name: row.name, - decimals: row.decimals as u8, - usd_price: row.usd_price, - }); - Ok(result) - } - } - - pub async fn get_well_known_token_l2_addresses(&mut self) -> Result, SqlxError> { - { - let addresses = sqlx::query!("SELECT l2_address FROM tokens WHERE well_known = true") - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|record| Address::from_slice(&record.l2_address)) - .collect(); - Ok(addresses) - } - } - - pub async fn get_contract_info( - &mut self, - address: Address, - ) -> Result, SqlxError> { - { - let hashed_key = get_code_key(&address).hashed_key(); - let info = sqlx::query_as!( - StorageContractInfo, - r#" - WITH sl AS ( - SELECT * FROM storage_logs - WHERE storage_logs.hashed_key = $1 - ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC - LIMIT 1 - ) - SELECT - sl.key as "key_address", - fd.bytecode, - txs.initiator_address as "creator_address?", - txs.hash as "creator_tx_hash?", - sl.miniblock_number as "created_in_block_number", - c.verification_info - FROM sl - JOIN factory_deps fd ON fd.bytecode_hash = sl.value - LEFT JOIN transactions txs ON txs.hash = sl.tx_hash - LEFT JOIN contracts_verification_info c ON c.address = $2 - WHERE sl.value != $3 - "#, - hashed_key.as_bytes(), - address.as_bytes(), - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes() - ) - .fetch_optional(self.storage.conn()) - .await?; - Ok(info.map(|info| info.into())) - } - } - - pub async fn get_contract_stats( - &mut self, - address: Address, - ) -> Result { - { - let row = sqlx::query!( - r#" - SELECT COUNT(*) as "total_transactions!" - FROM transactions - WHERE contract_address = $1 - "#, - address.as_bytes() - ) - .fetch_optional(self.storage.conn()) - .await?; - let result = row - .map(|row| ContractStats { - total_transactions: row.total_transactions as usize, - }) - .unwrap_or_default(); - Ok(result) - } - } -} diff --git a/core/lib/dal/src/explorer/explorer_transactions_dal.rs b/core/lib/dal/src/explorer/explorer_transactions_dal.rs deleted file mode 100644 index 1761af88266f..000000000000 --- a/core/lib/dal/src/explorer/explorer_transactions_dal.rs +++ /dev/null @@ -1,869 +0,0 @@ -use std::collections::HashMap; -use std::time::Instant; - -use itertools::Itertools; -use once_cell::sync::Lazy; -use sqlx::Row; - -use zksync_config::constants::ERC20_TRANSFER_TOPIC; -use zksync_types::api::Log; -use zksync_types::explorer_api::{ - BalanceChangeInfo, BalanceChangeType, Erc20TransferInfo, ExplorerTokenInfo, - PaginationDirection, PaginationQuery, TransactionDetails, TransactionResponse, - TransactionsResponse, TxPosition, -}; -use zksync_types::{ - tokens::ETHEREUM_ADDRESS, tx::Execute, Address, L1BatchNumber, MiniblockNumber, H256, - L2_ETH_TOKEN_ADDRESS, U256, U64, -}; - -use crate::models::storage_event::StorageWeb3Log; -use crate::models::storage_transaction::{ - transaction_details_from_storage, StorageTransactionDetails, -}; -use crate::SqlxError; -use crate::StorageProcessor; - -#[derive(Debug)] -pub struct ExplorerTransactionsDal<'a, 'c> { - pub(super) storage: &'a mut StorageProcessor<'c>, -} - -impl ExplorerTransactionsDal<'_, '_> { - pub async fn get_transactions_count_between( - &mut self, - from_block_number: MiniblockNumber, - to_block_number: MiniblockNumber, - ) -> Result { - { - let tx_count = sqlx::query!( - r#"SELECT COUNT(*) as "count!" FROM transactions - WHERE miniblock_number BETWEEN $1 AND $2"#, - from_block_number.0 as i64, - to_block_number.0 as i64, - ) - .fetch_one(self.storage.conn()) - .await? - .count as usize; - Ok(tx_count) - } - } - - pub async fn get_transaction_details( - &mut self, - hash: H256, - l2_erc20_bridge_addr: Address, - ) -> Result, SqlxError> { - { - let tx_details: Option = sqlx::query_as!( - StorageTransactionDetails, - r#" - SELECT transactions.*, miniblocks.hash as "block_hash?", - miniblocks.timestamp as "miniblock_timestamp?", - commit_tx.tx_hash as "eth_commit_tx_hash?", - prove_tx.tx_hash as "eth_prove_tx_hash?", - execute_tx.tx_hash as "eth_execute_tx_hash?" - FROM transactions - LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number - LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number - LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) - WHERE transactions.hash = $1 - "#, - hash.as_bytes() - ) - .fetch_optional(self.storage.conn()) - .await?; - let tx = if let Some(tx_details) = tx_details { - let list = self - .storage_tx_list_to_tx_details_list(vec![tx_details], l2_erc20_bridge_addr) - .await?; - let tx = list[0].clone(); - let logs: Vec = sqlx::query_as!( - StorageWeb3Log, - r#" - SELECT - address, topic1, topic2, topic3, topic4, value, - Null::bytea as "block_hash", Null::bigint as "l1_batch_number?", - miniblock_number, tx_hash, tx_index_in_block, - event_index_in_block, event_index_in_tx - FROM events - WHERE tx_hash = $1 - ORDER BY miniblock_number ASC, event_index_in_block ASC - "#, - hash.as_bytes() - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|storage_log: StorageWeb3Log| { - let mut log = Log::from(storage_log); - log.block_hash = tx.block_hash; - log.l1_batch_number = tx.l1_batch_number.map(|n| U64::from(n.0)); - log - }) - .collect(); - Some(TransactionResponse { tx, logs }) - } else { - None - }; - Ok(tx) - } - } - - #[allow(clippy::too_many_arguments)] - pub async fn get_transactions_page( - &mut self, - from_tx_location: Option, - block_number: Option, - l1_batch_number: Option, - contract_address: Option
, - pagination: PaginationQuery, - max_total: usize, - l2_erc20_bridge_addr: Address, - ) -> Result { - { - let (cmp_sign, order_str) = match pagination.direction { - PaginationDirection::Older => ("<", "DESC"), - PaginationDirection::Newer => (">", "ASC"), - }; - let mut filters = vec!["transactions.miniblock_number IS NOT NULL".to_string()]; - if let Some(from_tx_location) = from_tx_location { - if let Some(tx_index) = from_tx_location.tx_index { - filters.push(format!( - "(transactions.miniblock_number, transactions.index_in_block) {} ({}, {})", - cmp_sign, from_tx_location.block_number, tx_index - )); - } else { - filters.push(format!( - "transactions.miniblock_number {} {}", - cmp_sign, from_tx_location.block_number - )); - } - } - if let Some(address) = contract_address { - filters.push(format!( - "(transactions.contract_address = '\\x{0}' OR transactions.initiator_address = '\\x{0}')", - hex::encode(address) - )); - } - if let Some(number) = block_number { - filters.push(format!("transactions.miniblock_number = {}", number.0)); - } - if let Some(number) = l1_batch_number { - filters.push(format!("transactions.l1_batch_number = {}", number.0)); - } - let filters: String = if !filters.is_empty() { - format!("WHERE {}", filters.join(" AND ")) - } else { - "".to_string() - }; - let ordering = format!( - "transactions.miniblock_number {0}, transactions.index_in_block {0}", - order_str - ); - - let sql_query_list_str = format!( - r#" - SELECT transactions.*, miniblocks.hash as "block_hash", - miniblocks.timestamp as "miniblock_timestamp", - commit_tx.tx_hash as eth_commit_tx_hash, - prove_tx.tx_hash as eth_prove_tx_hash, - execute_tx.tx_hash as eth_execute_tx_hash - FROM ( - SELECT * FROM transactions - {0} - ORDER BY {1} - LIMIT {2} - OFFSET {3} - ) as transactions - LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number - LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number - LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) - ORDER BY {1} - "#, - filters, ordering, pagination.limit, pagination.offset - ); - let storage_txs: Vec = sqlx::query_as(&sql_query_list_str) - .fetch_all(self.storage.conn()) - .await?; - let list = self - .storage_tx_list_to_tx_details_list(storage_txs, l2_erc20_bridge_addr) - .await?; - - let sql_query_total_str = format!( - r#" - SELECT COUNT(*) as "count" FROM ( - SELECT true FROM transactions - {} - LIMIT {} - ) as c - "#, - filters, max_total - ); - let total = sqlx::query(&sql_query_total_str) - .fetch_one(self.storage.conn()) - .await? - .get::("count") as usize; - - Ok(TransactionsResponse { list, total }) - } - } - - #[allow(clippy::too_many_arguments)] - pub async fn get_account_transactions_page( - &mut self, - account_address: Address, - from_tx_location: Option, - block_number: Option, - pagination: PaginationQuery, - max_total: usize, - l2_erc20_bridge_addr: Address, - ) -> Result { - { - let order_str = match pagination.direction { - PaginationDirection::Older => "DESC", - PaginationDirection::Newer => "ASC", - }; - - let (hashes, total) = self - .get_account_transactions_hashes_page( - account_address, - from_tx_location, - block_number, - pagination, - max_total, - ) - .await?; - let sql_query_str = format!( - r#" - SELECT transactions.*, miniblocks.hash as "block_hash", - miniblocks.timestamp as "miniblock_timestamp", - commit_tx.tx_hash as eth_commit_tx_hash, - prove_tx.tx_hash as eth_prove_tx_hash, - execute_tx.tx_hash as eth_execute_tx_hash - FROM transactions - LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number - LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number - LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) - WHERE transactions.hash = ANY($1) - ORDER BY transactions.miniblock_number {}, transactions.index_in_block {} - "#, - order_str, order_str - ); - - let sql_query = sqlx::query_as(&sql_query_str).bind(hashes); - let storage_txs: Vec = - sql_query.fetch_all(self.storage.conn()).await?; - let list = self - .storage_tx_list_to_tx_details_list(storage_txs, l2_erc20_bridge_addr) - .await?; - - Ok(TransactionsResponse { list, total }) - } - } - - async fn get_account_transactions_hashes_page( - &mut self, - account_address: Address, - from_tx_location: Option, - block_number: Option, - pagination: PaginationQuery, - max_total: usize, - ) -> Result<(Vec>, usize), SqlxError> { - { - let started_at = Instant::now(); - let (cmp_sign, order_str) = match pagination.direction { - PaginationDirection::Older => ("<", "DESC"), - PaginationDirection::Newer => (">", "ASC"), - }; - let mut optional_filters = String::new(); - if let Some(block_number) = block_number { - optional_filters += format!("AND miniblock_number = {}\n", block_number.0).as_str(); - } - if let Some(from_tx_location) = from_tx_location { - if let Some(from_tx_index) = from_tx_location.tx_index { - optional_filters += format!( - "AND (miniblock_number, tx_index_in_block) {} ({}, {})\n", - cmp_sign, from_tx_location.block_number.0, from_tx_index - ) - .as_str(); - } else { - optional_filters += format!( - "AND miniblock_number {} {}\n", - cmp_sign, from_tx_location.block_number.0 - ) - .as_str(); - } - } - - let mut padded_address = [0u8; 12].to_vec(); - padded_address.extend_from_slice(account_address.as_bytes()); - - // We query more events than `max_total`, so after deduplication we receive at least `max_total`. - let estimated_required_limit = max_total * 4; - - let mut started_at_stage = Instant::now(); - let hashes_transfer_from: Vec<(Vec, i64, i32)> = { - let sql_query_str = format!( - r#" - SELECT tx_hash, miniblock_number, tx_index_in_block FROM events - WHERE topic1 = $1 AND topic2 = $2 - {1} - ORDER BY miniblock_number {0}, tx_index_in_block {0} - LIMIT {2} - "#, - order_str, optional_filters, estimated_required_limit - ); - let sql_query = sqlx::query(&sql_query_str) - .bind(ERC20_TRANSFER_TOPIC.as_bytes().to_vec()) - .bind(padded_address.clone()); - sql_query - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| { - ( - row.get::, &str>("tx_hash"), - row.get::("miniblock_number"), - row.get::("tx_index_in_block"), - ) - }) - .collect() - }; - metrics::histogram!("dal.request", started_at_stage.elapsed(), "method" => "get_hashes_transfer_from"); - - started_at_stage = Instant::now(); - let hashes_transfer_to: Vec<(Vec, i64, i32)> = { - let sql_query_str = format!( - r#" - SELECT tx_hash, miniblock_number, tx_index_in_block FROM events - WHERE topic1 = $1 AND topic3 = $2 - {1} - ORDER BY miniblock_number {0}, tx_index_in_block {0} - LIMIT {2} - "#, - order_str, optional_filters, estimated_required_limit - ); - let sql_query = sqlx::query(&sql_query_str) - .bind(ERC20_TRANSFER_TOPIC.as_bytes().to_vec()) - .bind(padded_address.clone()); - sql_query - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| { - ( - row.get::, &str>("tx_hash"), - row.get::("miniblock_number"), - row.get::("tx_index_in_block"), - ) - }) - .collect() - }; - metrics::histogram!("dal.request", started_at_stage.elapsed(), "method" => "get_hashes_transfer_to"); - - started_at_stage = Instant::now(); - let hashes_initiated: Vec<(Vec, i64, i32)> = { - let sql_query_str = format!( - r#" - SELECT hash, miniblock_number, index_in_block FROM transactions - WHERE initiator_address = $1 AND miniblock_number IS NOT NULL - {1} - ORDER BY nonce {0} - LIMIT {2} - "#, - order_str, - optional_filters.replace("tx_index_in_block", "index_in_block"), - max_total - ); - let sql_query = - sqlx::query(&sql_query_str).bind(account_address.as_bytes().to_vec()); - sql_query - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| { - ( - row.get::, &str>("hash"), - row.get::("miniblock_number"), - row.get::("index_in_block"), - ) - }) - .collect() - }; - metrics::histogram!("dal.request", started_at_stage.elapsed(), "method" => "get_hashes_initiated"); - - let mut merged: Vec<_> = hashes_transfer_from - .into_iter() - .chain(hashes_transfer_to.into_iter()) - .chain(hashes_initiated.into_iter()) - .sorted_by(|(_, b1, i1), (_, b2, i2)| match pagination.direction { - PaginationDirection::Older => (b2, i2).cmp(&(b1, i1)), - PaginationDirection::Newer => (b1, i1).cmp(&(b2, i2)), - }) - .map(|(hash, _, _)| hash) - .collect(); - merged.dedup(); - - let total = merged.len(); - let result: Vec<_> = merged - .into_iter() - .skip(pagination.offset) - .take(pagination.limit) - .collect(); - - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_account_transactions_hashes_page"); - - Ok((result, total)) - } - } - - async fn get_erc20_transfers( - &mut self, - hashes: Vec>, - ) -> Result>, SqlxError> { - { - let transfers = sqlx::query!( - r#" - SELECT tx_hash, topic2 as "topic2!", topic3 as "topic3!", value as "value!", - tokens.l1_address as "l1_address!", tokens.l2_address as "l2_address!", - tokens.symbol as "symbol!", tokens.name as "name!", tokens.decimals as "decimals!", tokens.usd_price as "usd_price?" - FROM events - INNER JOIN tokens ON - tokens.l2_address = events.address OR (events.address = $3 AND tokens.l2_address = $4) - WHERE tx_hash = ANY($1) AND topic1 = $2 - ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC - "#, - &hashes, - ERC20_TRANSFER_TOPIC.as_bytes(), - L2_ETH_TOKEN_ADDRESS.as_bytes(), - ETHEREUM_ADDRESS.as_bytes(), - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .group_by(|row| row.tx_hash.clone()) - .into_iter() - .map(|(hash, group)| (H256::from_slice(&hash), group.map(|row| { - let token_info = ExplorerTokenInfo { - l1_address: Address::from_slice(&row.l1_address), - l2_address: Address::from_slice(&row.l2_address), - address: Address::from_slice(&row.l2_address), - symbol: row.symbol, - name: row.name, - decimals: row.decimals as u8, - usd_price: row.usd_price, - }; - let from = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic2)); - let to = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic3)); - let amount = U256::from_big_endian(&row.value); - Erc20TransferInfo { - token_info, - from, - to, - amount, - } - }).collect::>())) - .collect(); - Ok(transfers) - } - } - - async fn get_withdrawals( - &mut self, - hashes: Vec>, - l2_erc20_bridge_addr: Address, - ) -> Result>, SqlxError> { - { - static ERC20_WITHDRAW_EVENT_SIGNATURE: Lazy = Lazy::new(|| { - zksync_contracts::l2_bridge_contract() - .event("WithdrawalInitiated") - .unwrap() - .signature() - }); - - let erc20_withdrawals: HashMap> = sqlx::query!( - r#" - SELECT tx_hash, topic2 as "topic2!", topic3 as "topic3!", value as "value!", - tokens.l1_address as "l1_address!", tokens.l2_address as "l2_address!", - tokens.symbol as "symbol!", tokens.name as "name!", tokens.decimals as "decimals!", tokens.usd_price as "usd_price?" - FROM events - INNER JOIN tokens ON - events.topic4 = ('\x000000000000000000000000'::bytea || tokens.l2_address) - WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3 - ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC - "#, - &hashes, - ERC20_WITHDRAW_EVENT_SIGNATURE.as_bytes(), - l2_erc20_bridge_addr.as_bytes() - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .group_by(|row| row.tx_hash.clone()) - .into_iter() - .map(|(hash, group)| (H256::from_slice(&hash), group.map(|row| { - let token_info = ExplorerTokenInfo { - l1_address: Address::from_slice(&row.l1_address), - l2_address: Address::from_slice(&row.l2_address), - address: Address::from_slice(&row.l2_address), - symbol: row.symbol, - name: row.name, - decimals: row.decimals as u8, - usd_price: row.usd_price, - }; - let l2_sender = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic2)); - let l1_receiver = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic3)); - let amount = U256::from_big_endian(&row.value); - BalanceChangeInfo { - token_info, - from: l2_sender, - to: l1_receiver, - amount, - r#type: BalanceChangeType::Withdrawal - } - }).collect::>())) - .collect(); - - static ETH_WITHDRAW_EVENT_SIGNATURE: Lazy = Lazy::new(|| { - zksync_contracts::eth_contract() - .event("Withdrawal") - .unwrap() - .signature() - }); - - let eth_withdrawals: HashMap> = sqlx::query!( - r#" - SELECT tx_hash, topic2 as "topic2!", topic3 as "topic3!", value as "value!", - tokens.l1_address as "l1_address!", tokens.l2_address as "l2_address!", - tokens.symbol as "symbol!", tokens.name as "name!", tokens.decimals as "decimals!", tokens.usd_price as "usd_price?" - FROM events - INNER JOIN tokens ON tokens.l2_address = '\x0000000000000000000000000000000000000000' - WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3 - ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC - "#, - &hashes, - ETH_WITHDRAW_EVENT_SIGNATURE.as_bytes(), - L2_ETH_TOKEN_ADDRESS.as_bytes(), - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .group_by(|row| row.tx_hash.clone()) - .into_iter() - .map(|(hash, group)| (H256::from_slice(&hash), group.map(|row| { - let token_info = ExplorerTokenInfo { - l1_address: Address::from_slice(&row.l1_address), - l2_address: Address::from_slice(&row.l2_address), - address: Address::from_slice(&row.l2_address), - symbol: row.symbol, - name: row.name, - decimals: row.decimals as u8, - usd_price: row.usd_price, - }; - let l2_sender = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic2)); - let l1_receiver = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic3)); - let amount = U256::from_big_endian(&row.value); - BalanceChangeInfo { - token_info, - from: l2_sender, - to: l1_receiver, - amount, - r#type: BalanceChangeType::Withdrawal - } - }).collect::>())) - .collect(); - - let mut withdrawals = erc20_withdrawals; - for (hash, mut items) in eth_withdrawals { - withdrawals.entry(hash).or_default().append(&mut items); - } - - Ok(withdrawals) - } - } - - /// Returns hashmap with transactions that are deposits. - async fn get_deposits( - &mut self, - hashes: Vec>, - l2_erc20_bridge_addr: Address, - ) -> Result>, SqlxError> { - { - static ERC20_DEPOSIT_EVENT_SIGNATURE: Lazy = Lazy::new(|| { - zksync_contracts::l2_bridge_contract() - .event("FinalizeDeposit") - .unwrap() - .signature() - }); - let erc20_deposits: HashMap> = sqlx::query!( - r#" - SELECT tx_hash, topic2 as "topic2!", topic3 as "topic3!", value as "value!", - tokens.l1_address as "l1_address!", tokens.l2_address as "l2_address!", - tokens.symbol as "symbol!", tokens.name as "name!", tokens.decimals as "decimals!", tokens.usd_price as "usd_price?" - FROM events - INNER JOIN tokens ON - events.topic4 = ('\x000000000000000000000000'::bytea || tokens.l2_address) - WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3 - "#, - &hashes, - ERC20_DEPOSIT_EVENT_SIGNATURE.as_bytes(), - l2_erc20_bridge_addr.as_bytes() - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| { - let token_info = ExplorerTokenInfo { - l1_address: Address::from_slice(&row.l1_address), - l2_address: Address::from_slice(&row.l2_address), - address: Address::from_slice(&row.l2_address), - symbol: row.symbol, - name: row.name, - decimals: row.decimals as u8, - usd_price: row.usd_price, - }; - let l1_sender = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic2)); - let l2_receiver = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic3)); - let amount = U256::from_big_endian(&row.value); - let deposit_info = BalanceChangeInfo { - token_info, - from: l1_sender, - to: l2_receiver, - amount, - r#type: BalanceChangeType::Deposit - }; - (H256::from_slice(&row.tx_hash), vec![deposit_info]) - }) - .collect(); - - static ETH_MINT_EVENT_SIGNATURE: Lazy = Lazy::new(|| { - zksync_contracts::eth_contract() - .event("Mint") - .unwrap() - .signature() - }); - let eth_deposits: HashMap> = sqlx::query!( - r#" - SELECT events.tx_hash, transactions.initiator_address as "l1_sender!", events.topic2 as "topic2!", events.value as "value!", - tokens.l1_address as "l1_address!", tokens.l2_address as "l2_address!", - tokens.symbol as "symbol!", tokens.name as "name!", tokens.decimals as "decimals!", tokens.usd_price as "usd_price?" - FROM events - INNER JOIN tokens ON tokens.l2_address = '\x0000000000000000000000000000000000000000' - INNER JOIN transactions ON transactions.hash = events.tx_hash - WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3 - ORDER BY tx_hash, events.miniblock_number ASC, event_index_in_block ASC - "#, - &hashes, - ETH_MINT_EVENT_SIGNATURE.as_bytes(), - L2_ETH_TOKEN_ADDRESS.as_bytes(), - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .group_by(|row| row.tx_hash.clone()) - .into_iter() - .map(|(hash, group)| (H256::from_slice(&hash), group.map(|row| { - let token_info = ExplorerTokenInfo { - l1_address: Address::from_slice(&row.l1_address), - l2_address: Address::from_slice(&row.l2_address), - address: Address::from_slice(&row.l2_address), - symbol: row.symbol, - name: row.name, - decimals: row.decimals as u8, - usd_price: row.usd_price, - }; - let l1_sender = Address::from_slice(&row.l1_sender); - let l2_receiver = Self::erc20_decode_address_from_topic(H256::from_slice(&row.topic2)); - let amount = U256::from_big_endian(&row.value); - BalanceChangeInfo { - token_info, - from: l1_sender, - to: l2_receiver, - amount, - r#type: BalanceChangeType::Deposit - } - }).collect::>())) - .collect(); - - let mut deposits = erc20_deposits; - for (hash, mut items) in eth_deposits { - deposits.entry(hash).or_default().append(&mut items); - } - - Ok(deposits) - } - } - - /// Returns hashmap with transactions that are ERC20 transfers. - async fn filter_erc20_transfers( - &mut self, - txs: &[StorageTransactionDetails], - ) -> Result, SqlxError> { - { - let hashes: Vec> = txs.iter().map(|tx| tx.hash.clone()).collect(); - // For transaction to be ERC20 transfer 2 conditions should be met - // 1) It is an execute transaction and contract address is an ERC20 token. - let filtered_by_contract_address: HashMap = sqlx::query!( - r#" - SELECT hash as "hash!", - tokens.l1_address as "l1_address!", tokens.l2_address as "l2_address!", - tokens.symbol as "symbol!", tokens.name as "name!", tokens.decimals as "decimals!", tokens.usd_price as "usd_price?" - FROM transactions - INNER JOIN tokens - ON tokens.l2_address = transactions.contract_address OR (transactions.contract_address = $2 AND tokens.l2_address = $3) - WHERE hash = ANY($1) - "#, - &hashes, - L2_ETH_TOKEN_ADDRESS.as_bytes(), - ETHEREUM_ADDRESS.as_bytes(), - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| { - let token_info = ExplorerTokenInfo { - l1_address: Address::from_slice(&row.l1_address), - l2_address: Address::from_slice(&row.l2_address), - address: Address::from_slice(&row.l2_address), - symbol: row.symbol, - name: row.name, - decimals: row.decimals as u8, - usd_price: row.usd_price, - }; - (H256::from_slice(&row.hash), token_info) - }) - .collect(); - - // 2) Calldata is a valid ERC20 `transfer` calldata - let erc20_transfers_iter = txs.iter().filter_map(|tx| { - let hash = H256::from_slice(&tx.hash); - if let Some(token_info) = filtered_by_contract_address.get(&hash).cloned() { - let execute = serde_json::from_value::(tx.data.clone()).unwrap(); - let calldata = execute.calldata(); - Self::parse_erc20_transfer_calldata(calldata).map(|(to, amount)| { - let from = Address::from_slice(&tx.initiator_address); - ( - hash, - Erc20TransferInfo { - from, - to, - amount, - token_info, - }, - ) - }) - } else { - None - } - }); - - // Also include ETH transfers - let eth_token_info = self - .storage - .explorer() - .misc_dal() - .get_token_details(Address::zero()) - .await? - .expect("Info about ETH should be present in DB"); - let eth_transfers_iter = txs.iter().filter_map(|tx| { - let hash = H256::from_slice(&tx.hash); - let execute = serde_json::from_value::(tx.data.clone()).unwrap(); - // All transactions with an empty calldata are considered to be called "transfers". - if execute.calldata().is_empty() { - let from = Address::from_slice(&tx.initiator_address); - let to = execute.contract_address; - let amount = execute.value; - - Some(( - hash, - Erc20TransferInfo { - from, - to, - amount, - token_info: eth_token_info.clone(), - }, - )) - } else { - None - } - }); - - let result = erc20_transfers_iter.chain(eth_transfers_iter).collect(); - Ok(result) - } - } - - async fn storage_tx_list_to_tx_details_list( - &mut self, - txs: Vec, - l2_erc20_bridge_addr: Address, - ) -> Result, SqlxError> { - let hashes: Vec> = txs.iter().map(|tx| tx.hash.clone()).collect(); - let erc20_transfers_map = self.get_erc20_transfers(hashes.clone()).await?; - let withdrawals_map = self - .get_withdrawals(hashes.clone(), l2_erc20_bridge_addr) - .await?; - let erc20_transfers_filtered = self.filter_erc20_transfers(&txs).await?; - let deposits_map = self.get_deposits(hashes, l2_erc20_bridge_addr).await?; - let txs = txs - .into_iter() - .map(|tx_details| { - Self::build_transaction_details( - &erc20_transfers_map, - &withdrawals_map, - &erc20_transfers_filtered, - &deposits_map, - tx_details, - ) - }) - .collect(); - Ok(txs) - } - - fn build_transaction_details( - erc20_transfers_map: &HashMap>, - withdrawals_map: &HashMap>, - filtered_transfers: &HashMap, - deposits_map: &HashMap>, - tx_details: StorageTransactionDetails, - ) -> TransactionDetails { - let hash = H256::from_slice(&tx_details.hash); - let erc20_transfers = erc20_transfers_map.get(&hash).cloned().unwrap_or_default(); - let withdrawals = withdrawals_map.get(&hash).cloned().unwrap_or_default(); - let transfer = filtered_transfers.get(&hash).cloned(); - let deposits = deposits_map.get(&hash).cloned().unwrap_or_default(); - transaction_details_from_storage( - tx_details, - erc20_transfers, - withdrawals, - transfer, - deposits, - ) - } - - /// Checks if calldata is erc20 `transfer` calldata and parses (to, amount) from it - fn parse_erc20_transfer_calldata(calldata: Vec) -> Option<(Address, U256)> { - // Check calldata length - if calldata.len() != 68 { - return None; - } - // Check signature match - if calldata[0..4].to_vec() != vec![0xa9, 0x05, 0x9c, 0xbb] { - return None; - } - let to = Address::from_slice(&calldata[16..36]); - let amount = U256::from_big_endian(&calldata[36..68]); - Some((to, amount)) - } - - fn erc20_decode_address_from_topic(topic: H256) -> Address { - Address::from_slice(&topic.as_bytes()[12..]) - } -} diff --git a/core/lib/dal/src/explorer/mod.rs b/core/lib/dal/src/explorer/mod.rs deleted file mode 100644 index 483c904597a1..000000000000 --- a/core/lib/dal/src/explorer/mod.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::StorageProcessor; -use contract_verification_dal::ContractVerificationDal; -use explorer_accounts_dal::ExplorerAccountsDal; -use explorer_blocks_dal::ExplorerBlocksDal; -use explorer_events_dal::ExplorerEventsDal; -use explorer_misc_dal::ExplorerMiscDal; -use explorer_transactions_dal::ExplorerTransactionsDal; - -pub mod contract_verification_dal; -pub mod explorer_accounts_dal; -pub mod explorer_blocks_dal; -pub mod explorer_events_dal; -pub mod explorer_misc_dal; -pub mod explorer_transactions_dal; -pub mod storage_contract_info; - -#[derive(Debug)] -pub struct ExplorerIntermediary<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, -} - -impl<'a, 'c> ExplorerIntermediary<'a, 'c> { - pub fn contract_verification_dal(self) -> ContractVerificationDal<'a, 'c> { - ContractVerificationDal { - storage: self.storage, - } - } - - pub fn transactions_dal(self) -> ExplorerTransactionsDal<'a, 'c> { - ExplorerTransactionsDal { - storage: self.storage, - } - } - - pub fn blocks_dal(self) -> ExplorerBlocksDal<'a, 'c> { - ExplorerBlocksDal { - storage: self.storage, - } - } - - pub fn accounts_dal(self) -> ExplorerAccountsDal<'a, 'c> { - ExplorerAccountsDal { - storage: self.storage, - } - } - - pub fn misc_dal(self) -> ExplorerMiscDal<'a, 'c> { - ExplorerMiscDal { - storage: self.storage, - } - } - - pub fn events_dal(self) -> ExplorerEventsDal<'a, 'c> { - ExplorerEventsDal { - storage: self.storage, - } - } -} diff --git a/core/lib/dal/src/explorer/storage_contract_info.rs b/core/lib/dal/src/explorer/storage_contract_info.rs deleted file mode 100644 index bdd1d23383c1..000000000000 --- a/core/lib/dal/src/explorer/storage_contract_info.rs +++ /dev/null @@ -1,34 +0,0 @@ -use zksync_types::{explorer_api::ContractBasicInfo, Address, Bytes, MiniblockNumber, H256}; -use zksync_utils::h256_to_account_address; - -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageContractInfo { - pub key_address: Vec, - pub bytecode: Vec, - pub creator_address: Option>, - pub creator_tx_hash: Option>, - pub created_in_block_number: i64, - pub verification_info: Option, -} - -impl From for ContractBasicInfo { - fn from(info: StorageContractInfo) -> ContractBasicInfo { - ContractBasicInfo { - address: h256_to_account_address(&H256::from_slice(&info.key_address)), - bytecode: Bytes(info.bytecode), - creator_address: info - .creator_address - .map(|address| Address::from_slice(&address)) - .unwrap_or_else(Address::zero), - creator_tx_hash: info - .creator_tx_hash - .map(|tx_hash| H256::from_slice(&tx_hash)) - .unwrap_or_else(H256::zero), - created_in_block_number: MiniblockNumber(info.created_in_block_number as u32), - verification_info: info.verification_info.map(|verification_info| { - serde_json::from_value(verification_info) - .expect("invalid verification_info json in database") - }), - } - } -} diff --git a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs b/core/lib/dal/src/fri_gpu_prover_queue_dal.rs new file mode 100644 index 000000000000..46c46a15b73d --- /dev/null +++ b/core/lib/dal/src/fri_gpu_prover_queue_dal.rs @@ -0,0 +1,121 @@ +use std::time::Duration; +use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; + +use crate::time_utils::pg_interval_from_duration; +use crate::StorageProcessor; + +#[derive(Debug)] +pub struct FriGpuProverQueueDal<'a, 'c> { + pub(crate) storage: &'a mut StorageProcessor<'c>, +} + +impl FriGpuProverQueueDal<'_, '_> { + pub async fn lock_available_prover( + &mut self, + processing_timeout: Duration, + specialized_prover_group_id: u8, + zone: String, + ) -> Option { + let processing_timeout = pg_interval_from_duration(processing_timeout); + let result: Option = sqlx::query!( + "UPDATE gpu_prover_queue_fri \ + SET instance_status = 'reserved', \ + updated_at = now(), \ + processing_started_at = now() \ + WHERE id in ( \ + SELECT id \ + FROM gpu_prover_queue_fri \ + WHERE specialized_prover_group_id=$2 \ + AND zone=$3 \ + AND ( \ + instance_status = 'available' \ + OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval) \ + ) \ + ORDER BY updated_at ASC \ + LIMIT 1 \ + FOR UPDATE \ + SKIP LOCKED \ + ) \ + RETURNING gpu_prover_queue_fri.* + ", + &processing_timeout, + specialized_prover_group_id as i16, + zone + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| SocketAddress { + host: row.instance_host.network(), + port: row.instance_port as u16, + }); + + result + } + + pub async fn insert_prover_instance( + &mut self, + address: SocketAddress, + specialized_prover_group_id: u8, + zone: String, + ) { + sqlx::query!( + "INSERT INTO gpu_prover_queue_fri (instance_host, instance_port, instance_status, specialized_prover_group_id, zone, created_at, updated_at) \ + VALUES (cast($1::text as inet), $2, 'available', $3, $4, now(), now()) \ + ON CONFLICT(instance_host, instance_port, zone) \ + DO UPDATE SET instance_status='available', specialized_prover_group_id=$3, zone=$4, updated_at=now()", + format!("{}",address.host), + address.port as i32, + specialized_prover_group_id as i16, + zone + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn update_prover_instance_status( + &mut self, + address: SocketAddress, + status: GpuProverInstanceStatus, + zone: String, + ) { + sqlx::query!( + "UPDATE gpu_prover_queue_fri \ + SET instance_status = $1, updated_at = now() \ + WHERE instance_host = $2::text::inet \ + AND instance_port = $3 \ + AND zone = $4 + ", + format!("{:?}", status).to_lowercase(), + format!("{}", address.host), + address.port as i32, + zone + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn update_prover_instance_from_full_to_available( + &mut self, + address: SocketAddress, + zone: String, + ) { + sqlx::query!( + "UPDATE gpu_prover_queue_fri \ + SET instance_status = 'available', updated_at = now() \ + WHERE instance_host = $1::text::inet \ + AND instance_port = $2 \ + AND instance_status = 'full' \ + AND zone = $3 + ", + format!("{}", address.host), + address.port as i32, + zone + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } +} diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs index c30edc51182a..761a128407fc 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -1,13 +1,17 @@ -use std::collections::HashMap; -use std::convert::TryFrom; -use std::time::{Duration, Instant}; -use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; +use std::{collections::HashMap, convert::TryFrom, time::Duration}; -use zksync_types::proofs::{AggregationRound, FriProverJobMetadata, JobCountStatistics, StuckJobs}; -use zksync_types::L1BatchNumber; +use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; +use zksync_types::{ + proofs::{AggregationRound, FriProverJobMetadata, JobCountStatistics, StuckJobs}, + L1BatchNumber, +}; -use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; -use crate::StorageProcessor; +use crate::{ + instrument::{InstrumentExt, MethodLatency}, + time_utils::duration_to_naive_time, + time_utils::pg_interval_from_duration, + StorageProcessor, +}; #[derive(Debug)] pub struct FriProverDal<'a, 'c> { @@ -22,7 +26,7 @@ impl FriProverDal<'_, '_> { aggregation_round: AggregationRound, depth: u16, ) { - let started_at = Instant::now(); + let latency = MethodLatency::new("save_fri_prover_jobs"); for (sequence_number, (circuit_id, circuit_blob_url)) in circuit_ids_and_urls.iter().enumerate() { @@ -37,11 +41,11 @@ impl FriProverDal<'_, '_> { ) .await; } - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_fri_prover_jobs"); + drop(latency); } pub async fn get_next_job(&mut self) -> Option { - let result: Option = sqlx::query!( + sqlx::query!( " UPDATE prover_jobs_fri SET status = 'in_progress', attempts = attempts + 1, @@ -60,19 +64,18 @@ impl FriProverDal<'_, '_> { prover_jobs_fri.is_node_final_proof ", ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| FriProverJobMetadata { - id: row.id as u32, - block_number: L1BatchNumber(row.l1_batch_number as u32), - circuit_id: row.circuit_id as u8, - aggregation_round: AggregationRound::try_from(row.aggregation_round as i32).unwrap(), - sequence_number: row.sequence_number as usize, - depth: row.depth as u16, - is_node_final_proof: row.is_node_final_proof, - }); - result + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(row.aggregation_round as i32).unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + }) } pub async fn get_next_job_for_circuit_id_round( @@ -87,7 +90,7 @@ impl FriProverDal<'_, '_> { .iter() .map(|tuple| tuple.aggregation_round as i16) .collect(); - let result: Option = sqlx::query!( + sqlx::query!( " UPDATE prover_jobs_fri SET status = 'in_progress', attempts = attempts + 1, @@ -122,8 +125,7 @@ impl FriProverDal<'_, '_> { sequence_number: row.sequence_number as usize, depth: row.depth as u16, is_node_final_proof: row.is_node_final_proof, - }); - result + }) } pub async fn save_proof_error(&mut self, id: u32, error: String) { @@ -149,8 +151,7 @@ impl FriProverDal<'_, '_> { time_taken: Duration, blob_url: &str, ) -> FriProverJobMetadata { - let started_at = Instant::now(); - let result = sqlx::query!( + sqlx::query!( " UPDATE prover_jobs_fri SET status = 'successful', updated_at = now(), time_taken = $1, proof_blob_url=$2 @@ -159,26 +160,26 @@ impl FriProverDal<'_, '_> { prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth, prover_jobs_fri.is_node_final_proof ", - duration_to_naive_time(time_taken), - blob_url, - id as i64, - ) + duration_to_naive_time(time_taken), + blob_url, + id as i64, + ) + .instrument("save_fri_proof") + .report_latency() + .with_arg("id", &id) .fetch_optional(self.storage.conn()) .await .unwrap() - .map(|row| FriProverJobMetadata { - id: row.id as u32, - block_number: L1BatchNumber(row.l1_batch_number as u32), - circuit_id: row.circuit_id as u8, - aggregation_round: AggregationRound::try_from(row.aggregation_round as i32).unwrap(), - sequence_number: row.sequence_number as usize, - depth: row.depth as u16, - is_node_final_proof: row.is_node_final_proof, - }) - .unwrap(); - - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_fri_proof"); - result + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(row.aggregation_round as i32).unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + }) + .unwrap() } pub async fn requeue_stuck_jobs( @@ -226,14 +227,14 @@ impl FriProverDal<'_, '_> { ON CONFLICT(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number) DO UPDATE SET updated_at=now() ", - l1_batch_number.0 as i64, - circuit_id as i16, - circuit_blob_url, - aggregation_round as i64, - sequence_number as i64, - depth as i32, - is_node_final_proof, - ) + l1_batch_number.0 as i64, + circuit_id as i16, + circuit_blob_url, + aggregation_round as i64, + sequence_number as i64, + depth as i32, + is_node_final_proof, + ) .execute(self.storage.conn()) .await .unwrap(); @@ -295,4 +296,17 @@ impl FriProverDal<'_, '_> { .collect() } } + + pub async fn update_status(&mut self, id: u32, status: &str) { + sqlx::query!( + "UPDATE prover_jobs_fri \ + SET status = $1, updated_at = now() \ + WHERE id = $2", + status, + id as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } } diff --git a/core/lib/dal/src/fri_witness_generator_dal.rs b/core/lib/dal/src/fri_witness_generator_dal.rs index ca88abbcbc6b..801d66f8d4ed 100644 --- a/core/lib/dal/src/fri_witness_generator_dal.rs +++ b/core/lib/dal/src/fri_witness_generator_dal.rs @@ -1,15 +1,20 @@ use sqlx::Row; -use std::collections::HashMap; -use std::time::{Duration, Instant}; -use zksync_types::proofs::{ - AggregationRound, JobCountStatistics, LeafAggregationJobMetadata, NodeAggregationJobMetadata, - StuckJobs, +use std::{collections::HashMap, time::Duration}; + +use zksync_types::{ + proofs::{ + AggregationRound, JobCountStatistics, LeafAggregationJobMetadata, + NodeAggregationJobMetadata, StuckJobs, + }, + L1BatchNumber, }; -use zksync_types::L1BatchNumber; -use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; -use crate::StorageProcessor; +use crate::{ + instrument::MethodLatency, + time_utils::{duration_to_naive_time, pg_interval_from_duration}, + StorageProcessor, +}; #[derive(Debug)] pub struct FriWitnessGeneratorDal<'a, 'c> { @@ -191,7 +196,7 @@ impl FriWitnessGeneratorDal<'_, '_> { base_layer_to_recursive_layer_circuit_id: fn(u8) -> u8, ) { { - let started_at = Instant::now(); + let latency = MethodLatency::new("create_aggregation_jobs_fri"); for (circuit_id, closed_form_inputs_url, number_of_basic_circuits) in closed_form_inputs_and_urls { @@ -208,9 +213,10 @@ impl FriWitnessGeneratorDal<'_, '_> { closed_form_inputs_url, *number_of_basic_circuits as i32, ) - .execute(self.storage.conn()) - .await - .unwrap(); + .execute(self.storage.conn()) + .await + .unwrap(); + self.insert_node_aggregation_jobs( block_number, base_layer_to_recursive_layer_circuit_id(*circuit_id), @@ -229,12 +235,12 @@ impl FriWitnessGeneratorDal<'_, '_> { ON CONFLICT(l1_batch_number) DO UPDATE SET updated_at=now() ", - block_number.0 as i64, - scheduler_partial_input_blob_url, - ) - .execute(self.storage.conn()) - .await - .unwrap(); + block_number.0 as i64, + scheduler_partial_input_blob_url, + ) + .execute(self.storage.conn()) + .await + .unwrap(); sqlx::query!( " @@ -250,7 +256,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .await .unwrap(); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "create_aggregation_jobs_fri"); + drop(latency); } } @@ -275,6 +281,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .fetch_optional(self.storage.conn()) .await .unwrap()?; + let block_number = L1BatchNumber(row.l1_batch_number as u32); let proof_job_ids = self .prover_job_ids_for( @@ -340,13 +347,13 @@ impl FriWitnessGeneratorDal<'_, '_> { HAVING COUNT(*) = lawj.number_of_basic_circuits) RETURNING l1_batch_number, circuit_id; "#, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number, row.circuit_id as u8)) - .collect() + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number, row.circuit_id as u8)) + .collect() } pub async fn update_node_aggregation_jobs_url( @@ -493,13 +500,13 @@ impl FriWitnessGeneratorDal<'_, '_> { HAVING COUNT(*) = nawj.number_of_dependent_jobs) RETURNING l1_batch_number, circuit_id, depth; "#, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number, row.circuit_id as u8, row.depth as u16)) - .collect() + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number, row.circuit_id as u8, row.depth as u16)) + .collect() } pub async fn move_depth_non_zero_node_aggregation_jobs(&mut self) -> Vec<(i64, u8, u16)> { @@ -521,13 +528,13 @@ impl FriWitnessGeneratorDal<'_, '_> { HAVING COUNT(*) = nawj.number_of_dependent_jobs) RETURNING l1_batch_number, circuit_id, depth; "#, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number, row.circuit_id as u8, row.depth as u16)) - .collect() + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number, row.circuit_id as u8, row.depth as u16)) + .collect() } pub async fn requeue_stuck_leaf_aggregations_jobs( @@ -546,13 +553,13 @@ impl FriWitnessGeneratorDal<'_, '_> { ", &processing_timeout, max_attempts as i32, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { id: row.id as u64, status: row.status, attempts: row.attempts as u64 }) - .collect() + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.id as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() } pub async fn requeue_stuck_node_aggregations_jobs( @@ -571,13 +578,13 @@ impl FriWitnessGeneratorDal<'_, '_> { ", &processing_timeout, max_attempts as i32, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { id: row.id as u64, status: row.status, attempts: row.attempts as u64 }) - .collect() + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.id as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() } pub async fn mark_scheduler_jobs_as_queued(&mut self, l1_batch_number: i64) { @@ -612,13 +619,13 @@ impl FriWitnessGeneratorDal<'_, '_> { ", &processing_timeout, max_attempts as i32, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { id: row.l1_batch_number as u64, status: row.status, attempts: row.attempts as u64 }) - .collect() + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.l1_batch_number as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() } pub async fn get_next_scheduler_witness_job(&mut self) -> Option { diff --git a/core/lib/dal/src/gpu_prover_queue_dal.rs b/core/lib/dal/src/gpu_prover_queue_dal.rs index 55a998ee5c35..cc769ff30087 100644 --- a/core/lib/dal/src/gpu_prover_queue_dal.rs +++ b/core/lib/dal/src/gpu_prover_queue_dal.rs @@ -1,33 +1,15 @@ -use std::net::IpAddr; use std::time::Duration; use crate::time_utils::pg_interval_from_duration; use crate::StorageProcessor; use std::collections::HashMap; +use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; #[derive(Debug)] pub struct GpuProverQueueDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, } -#[derive(Debug, Clone)] -pub struct SocketAddress { - pub host: IpAddr, - pub port: u16, -} - -#[derive(Debug)] -pub enum GpuProverInstanceStatus { - // The instance is available for processing. - Available, - // The instance is running at full capacity. - Full, - // The instance is reserved by an synthesizer. - Reserved, - // The instance is not alive anymore. - Dead, -} - impl GpuProverQueueDal<'_, '_> { pub async fn lock_available_prover( &mut self, diff --git a/core/lib/dal/src/healthcheck.rs b/core/lib/dal/src/healthcheck.rs index 48ffe001a9b6..d932640fcb7c 100644 --- a/core/lib/dal/src/healthcheck.rs +++ b/core/lib/dal/src/healthcheck.rs @@ -1,5 +1,22 @@ +use serde::Serialize; +use sqlx::PgPool; + +use zksync_health_check::{async_trait, CheckHealth, Health, HealthStatus}; + use crate::ConnectionPool; -use zksync_health_check::{CheckHealth, CheckHealthStatus}; + +#[derive(Debug, Serialize)] +struct ConnectionPoolHealthDetails { + pool_size: u32, +} + +impl ConnectionPoolHealthDetails { + async fn new(pool: &PgPool) -> Self { + Self { + pool_size: pool.size(), + } + } +} // HealthCheck used to verify if we can connect to the database. // This guarantees that the app can use it's main "communication" channel. @@ -15,12 +32,22 @@ impl ConnectionPoolHealthCheck { } } -#[zksync_health_check::async_trait] +#[async_trait] impl CheckHealth for ConnectionPoolHealthCheck { - async fn check_health(&self) -> CheckHealthStatus { + fn name(&self) -> &'static str { + "connection_pool" + } + + async fn check_health(&self) -> Health { // This check is rather feeble, plan to make reliable here: // https://linear.app/matterlabs/issue/PLA-255/revamp-db-connection-health-check - let _ = self.connection_pool.access_storage().await; - CheckHealthStatus::Ready + self.connection_pool.access_storage().await; + + let mut health = Health::from(HealthStatus::Ready); + if let ConnectionPool::Real(pool) = &self.connection_pool { + let details = ConnectionPoolHealthDetails::new(pool).await; + health = health.with_details(details); + } + health } } diff --git a/core/lib/dal/src/instrument.rs b/core/lib/dal/src/instrument.rs new file mode 100644 index 000000000000..f5b33f2977a1 --- /dev/null +++ b/core/lib/dal/src/instrument.rs @@ -0,0 +1,305 @@ +//! DAL query instrumentation. + +use sqlx::{ + postgres::{PgConnection, PgQueryResult, PgRow}, + query::{Map, Query, QueryAs}, + FromRow, IntoArguments, Postgres, +}; +use tokio::time::{Duration, Instant}; +use zksync_types::web3::futures::pin_mut; + +use std::{fmt, future::Future, panic::Location, thread}; + +type ThreadSafeDebug<'a> = dyn fmt::Debug + Send + Sync + 'a; + +const SLOW_QUERY_TIMEOUT: Duration = Duration::from_millis(100); + +/// Reporter of latency for DAL methods consisting of multiple DB queries. If there's a single query, +/// use `.instrument().report_latency()` on it instead. +/// +/// Should be created at the start of the relevant method and dropped when the latency needs to be reported. +#[derive(Debug)] +pub(crate) struct MethodLatency { + name: &'static str, + started_at: Instant, +} + +impl MethodLatency { + pub fn new(name: &'static str) -> Self { + Self { + name, + started_at: Instant::now(), + } + } +} + +impl Drop for MethodLatency { + fn drop(&mut self) { + if !thread::panicking() { + metrics::histogram!("dal.request", self.started_at.elapsed(), "method" => self.name); + } + } +} + +/// Logged arguments for an SQL query. +#[derive(Debug, Default)] +struct QueryArgs<'a> { + inner: Vec<(&'static str, &'a ThreadSafeDebug<'a>)>, +} + +impl fmt::Display for QueryArgs<'_> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.inner.is_empty() { + Ok(()) + } else { + formatter.write_str("(")?; + for (i, (name, value)) in self.inner.iter().enumerate() { + write!(formatter, "{name}={value:?}")?; + if i + 1 < self.inner.len() { + formatter.write_str(", ")?; + } + } + formatter.write_str(")") + } + } +} + +/// Extension trait for instrumenting `sqlx::query!` outputs. +pub(crate) trait InstrumentExt: Sized { + /// Instruments a query, assigning it the provided name. + fn instrument(self, name: &'static str) -> Instrumented<'static, Self>; +} + +impl<'q, A> InstrumentExt for Query<'q, Postgres, A> +where + A: 'q + IntoArguments<'q, Postgres>, +{ + #[track_caller] + fn instrument(self, name: &'static str) -> Instrumented<'static, Self> { + Instrumented { + query: self, + data: InstrumentedData::new(name, Location::caller()), + } + } +} + +impl<'q, O, A> InstrumentExt for QueryAs<'q, Postgres, O, A> +where + A: 'q + IntoArguments<'q, Postgres>, +{ + #[track_caller] + fn instrument(self, name: &'static str) -> Instrumented<'static, Self> { + Instrumented { + query: self, + data: InstrumentedData::new(name, Location::caller()), + } + } +} + +impl<'q, F, O, A> InstrumentExt for Map<'q, Postgres, F, A> +where + F: FnMut(PgRow) -> Result + Send, + O: Send + Unpin, + A: 'q + Send + IntoArguments<'q, Postgres>, +{ + #[track_caller] + fn instrument(self, name: &'static str) -> Instrumented<'static, Self> { + Instrumented { + query: self, + data: InstrumentedData::new(name, Location::caller()), + } + } +} + +#[derive(Debug)] +struct InstrumentedData<'a> { + name: &'static str, + location: &'static Location<'static>, + args: QueryArgs<'a>, + report_latency: bool, +} + +impl<'a> InstrumentedData<'a> { + fn new(name: &'static str, location: &'static Location<'static>) -> Self { + Self { + name, + location, + args: QueryArgs::default(), + report_latency: false, + } + } + + async fn fetch( + self, + query_future: impl Future>, + ) -> Result { + let Self { + name, + location, + args, + report_latency, + } = self; + let started_at = Instant::now(); + pin_mut!(query_future); + + let mut is_slow = false; + let output = + tokio::time::timeout_at(started_at + SLOW_QUERY_TIMEOUT, &mut query_future).await; + let output = match output { + Ok(output) => output, + Err(_) => { + vlog::warn!( + "Query {name}{args} called at {file}:{line} is executing for more than {SLOW_QUERY_TIMEOUT:?}", + file = location.file(), + line = location.line() + ); + metrics::increment_counter!("dal.request.slow", "method" => name); + is_slow = true; + query_future.await + } + }; + + let elapsed = started_at.elapsed(); + if report_latency { + metrics::histogram!("dal.request", elapsed, "method" => name); + } + + if let Err(err) = &output { + vlog::warn!( + "Query {name}{args} called at {file}:{line} has resulted in error: {err}", + file = location.file(), + line = location.line() + ); + metrics::increment_counter!("dal.request.error", "method" => name); + } else if is_slow { + vlog::info!( + "Slow query {name}{args} called at {file}:{line} has finished after {elapsed:?}", + file = location.file(), + line = location.line() + ); + } + output + } +} + +/// Instrumented `sqlx` query that wraps and can be used as a drop-in replacement for `sqlx::query!` / `query_as!` output +/// (i.e., [`Map`]). +/// +/// The following instrumentation logic is included: +/// +/// - If the query executes for too long, it is logged with a `WARN` level. The logged info includes +/// the query name, its args provided via [Self::with_arg()`] and the caller location. +/// - If the query returns an error, it is logged with a `WARN` level. The logged info is everything +/// included in the case of a slow query, plus the error info. +/// - Slow and erroneous queries are also reported using metrics (`dal.request.slow` and `dal.request.error`, +/// respectively). The query name is included as a metric label; args are not included for obvious reasons. +#[derive(Debug)] +pub(crate) struct Instrumented<'a, Q> { + query: Q, + data: InstrumentedData<'a>, +} + +impl<'a, Q> Instrumented<'a, Q> { + /// Indicates that latency should be reported for all calls. + pub fn report_latency(mut self) -> Self { + self.data.report_latency = true; + self + } + + /// Adds a traced query argument. The argument will be logged (using `Debug`) if the query executes too slow + /// or finishes with an error. + pub fn with_arg(mut self, name: &'static str, value: &'a ThreadSafeDebug) -> Self { + self.data.args.inner.push((name, value)); + self + } +} + +impl<'q, A> Instrumented<'_, Query<'q, Postgres, A>> +where + A: 'q + IntoArguments<'q, Postgres>, +{ + /// Executes an SQL statement using this query. + pub async fn execute(self, conn: &mut PgConnection) -> Result { + self.data.fetch(self.query.execute(conn)).await + } + + /// Fetches an optional row using this query. + pub async fn fetch_optional( + self, + conn: &mut PgConnection, + ) -> Result, sqlx::Error> { + self.data.fetch(self.query.fetch_optional(conn)).await + } +} + +impl<'q, O, A> Instrumented<'_, QueryAs<'q, Postgres, O, A>> +where + A: 'q + IntoArguments<'q, Postgres>, + O: Send + Unpin + for<'r> FromRow<'r, PgRow>, +{ + /// Fetches all rows using this query and collects them into a `Vec`. + pub async fn fetch_all(self, conn: &mut PgConnection) -> Result, sqlx::Error> { + self.data.fetch(self.query.fetch_all(conn)).await + } +} + +impl<'q, F, O, A> Instrumented<'_, Map<'q, Postgres, F, A>> +where + F: FnMut(PgRow) -> Result + Send, + O: Send + Unpin, + A: 'q + Send + IntoArguments<'q, Postgres>, +{ + /// Fetches an optional row using this query. + pub async fn fetch_optional(self, conn: &mut PgConnection) -> Result, sqlx::Error> { + self.data.fetch(self.query.fetch_optional(conn)).await + } + + /// Fetches a single row using this query. + pub async fn fetch_one(self, conn: &mut PgConnection) -> Result { + self.data.fetch(self.query.fetch_one(conn)).await + } + + /// Fetches all rows using this query and collects them into a `Vec`. + pub async fn fetch_all(self, conn: &mut PgConnection) -> Result, sqlx::Error> { + self.data.fetch(self.query.fetch_all(conn)).await + } +} + +#[cfg(test)] +mod tests { + use db_test_macro::db_test; + use zksync_types::{MiniblockNumber, H256}; + + use super::*; + use crate::ConnectionPool; + + #[db_test(dal_crate)] + async fn instrumenting_erroneous_query(pool: ConnectionPool) { + // Add `vlog::init()` here to debug this test + + let mut conn = pool.access_storage().await; + sqlx::query("WHAT") + .map(drop) + .instrument("erroneous") + .with_arg("miniblock", &MiniblockNumber(1)) + .with_arg("hash", &H256::zero()) + .fetch_optional(conn.conn()) + .await + .unwrap_err(); + } + + #[db_test(dal_crate)] + async fn instrumenting_slow_query(pool: ConnectionPool) { + // Add `vlog::init()` here to debug this test + + let mut conn = pool.access_storage().await; + sqlx::query("SELECT pg_sleep(1.5)") + .map(drop) + .instrument("slow") + .with_arg("miniblock", &MiniblockNumber(1)) + .with_arg("hash", &H256::zero()) + .fetch_optional(conn.conn()) + .await + .unwrap(); + } +} diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 9d8f4066849d..556850ee4a44 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -10,18 +10,23 @@ use sqlx::pool::PoolConnection; pub use sqlx::types::BigDecimal; // Local imports +use crate::accounts_dal::AccountsDal; use crate::blocks_dal::BlocksDal; use crate::blocks_web3_dal::BlocksWeb3Dal; pub use crate::connection::ConnectionPool; use crate::connection::{holder::ConnectionHolder, test_pool::TestPoolLock}; +use crate::contract_verification_dal::ContractVerificationDal; use crate::eth_sender_dal::EthSenderDal; use crate::events_dal::EventsDal; use crate::events_web3_dal::EventsWeb3Dal; -use crate::explorer::ExplorerIntermediary; +use crate::fri_gpu_prover_queue_dal::FriGpuProverQueueDal; use crate::fri_prover_dal::FriProverDal; use crate::fri_scheduler_dependency_tracker_dal::FriSchedulerDependencyTrackerDal; use crate::fri_witness_generator_dal::FriWitnessGeneratorDal; use crate::gpu_prover_queue_dal::GpuProverQueueDal; +use crate::proof_generation_dal::ProofGenerationDal; +use crate::protocol_versions_dal::ProtocolVersionsDal; +use crate::protocol_versions_web3_dal::ProtocolVersionsWeb3Dal; use crate::prover_dal::ProverDal; use crate::storage_dal::StorageDal; use crate::storage_logs_dal::StorageLogsDal; @@ -36,19 +41,25 @@ use crate::witness_generator_dal::WitnessGeneratorDal; #[macro_use] mod macro_utils; +pub mod accounts_dal; pub mod blocks_dal; pub mod blocks_web3_dal; pub mod connection; +pub mod contract_verification_dal; pub mod eth_sender_dal; pub mod events_dal; pub mod events_web3_dal; -pub mod explorer; +pub mod fri_gpu_prover_queue_dal; pub mod fri_prover_dal; pub mod fri_scheduler_dependency_tracker_dal; pub mod fri_witness_generator_dal; pub mod gpu_prover_queue_dal; pub mod healthcheck; +mod instrument; mod models; +pub mod proof_generation_dal; +pub mod protocol_versions_dal; +pub mod protocol_versions_web3_dal; pub mod prover_dal; pub mod storage_dal; pub mod storage_logs_dal; @@ -171,6 +182,10 @@ impl<'a> StorageProcessor<'a> { TransactionsWeb3Dal { storage: self } } + pub fn accounts_dal(&mut self) -> AccountsDal<'_, 'a> { + AccountsDal { storage: self } + } + pub fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { BlocksDal { storage: self } } @@ -223,14 +238,22 @@ impl<'a> StorageProcessor<'a> { WitnessGeneratorDal { storage: self } } - pub fn explorer(&mut self) -> ExplorerIntermediary<'_, 'a> { - ExplorerIntermediary { storage: self } + pub fn contract_verification_dal(&mut self) -> ContractVerificationDal<'_, 'a> { + ContractVerificationDal { storage: self } } pub fn gpu_prover_queue_dal(&mut self) -> GpuProverQueueDal<'_, 'a> { GpuProverQueueDal { storage: self } } + pub fn protocol_versions_dal(&mut self) -> ProtocolVersionsDal<'_, 'a> { + ProtocolVersionsDal { storage: self } + } + + pub fn protocol_versions_web3_dal(&mut self) -> ProtocolVersionsWeb3Dal<'_, 'a> { + ProtocolVersionsWeb3Dal { storage: self } + } + pub fn fri_witness_generator_dal(&mut self) -> FriWitnessGeneratorDal<'_, 'a> { FriWitnessGeneratorDal { storage: self } } @@ -248,4 +271,12 @@ impl<'a> StorageProcessor<'a> { ) -> FriSchedulerDependencyTrackerDal<'_, 'a> { FriSchedulerDependencyTrackerDal { storage: self } } + + pub fn proof_generation_dal(&mut self) -> ProofGenerationDal<'_, 'a> { + ProofGenerationDal { storage: self } + } + + pub fn fri_gpu_prover_queue_dal(&mut self) -> FriGpuProverQueueDal<'_, 'a> { + FriGpuProverQueueDal { storage: self } + } } diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index b3f1ab7ad8c9..f6ebb6fc7810 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -1,11 +1,10 @@ pub mod storage_block; -pub mod storage_contract; pub mod storage_eth_tx; pub mod storage_event; pub mod storage_fee_monitor; pub mod storage_log; +pub mod storage_protocol_version; pub mod storage_prover_job_info; -pub mod storage_state_record; pub mod storage_sync; pub mod storage_token; pub mod storage_transaction; diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 185560d4a0ea..2dcef11ce85a 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -1,50 +1,126 @@ -use std::convert::TryInto; -use std::str::FromStr; +use std::{convert::TryInto, str::FromStr}; use bigdecimal::{BigDecimal, ToPrimitive}; -use sqlx::postgres::PgArguments; -use sqlx::query::Query; -use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; -use sqlx::Postgres; +use sqlx::{ + postgres::{PgArguments, Postgres}, + query::Query, + types::chrono::{DateTime, NaiveDateTime, Utc}, +}; use thiserror::Error; use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::api; -use zksync_types::block::MiniblockHeader; -use zksync_types::commitment::{BlockMetaParameters, BlockMetadata}; -use zksync_types::explorer_api::{BlockDetails, L1BatchDetails, L1BatchPageItem}; use zksync_types::{ - block::L1BatchHeader, - explorer_api::{BlockPageItem, BlockStatus}, + api, + block::{L1BatchHeader, MiniblockHeader}, + commitment::{L1BatchMetaParameters, L1BatchMetadata}, l2_to_l1_log::L2ToL1Log, - Address, L1BatchNumber, MiniblockNumber, H2048, H256, U256, + Address, L1BatchNumber, MiniblockNumber, H2048, H256, }; #[derive(Debug, Error)] -pub enum StorageBlockConvertError { - #[error("Incomplete block")] - IncompleteBlock, +pub enum StorageL1BatchConvertError { + #[error("Incomplete L1 batch")] + Incomplete, } +/// Projection of the `l1_batches` table corresponding to [`L1BatchHeader`]. #[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageBlock { +pub struct StorageL1BatchHeader { pub number: i64, pub timestamp: i64, pub is_finished: bool, pub l1_tx_count: i32, pub l2_tx_count: i32, pub fee_account_address: Vec, - pub bloom: Vec, pub l2_to_l1_logs: Vec>, + pub l2_to_l1_messages: Vec>, + pub bloom: Vec, pub priority_ops_onchain_data: Vec>, + pub used_contract_hashes: serde_json::Value, + pub base_fee_per_gas: BigDecimal, + pub l1_gas_price: i64, + pub l2_fair_gas_price: i64, + pub bootloader_code_hash: Option>, + pub default_aa_code_hash: Option>, + pub protocol_version: Option, +} + +impl From for L1BatchHeader { + fn from(l1_batch: StorageL1BatchHeader) -> Self { + let priority_ops_onchain_data: Vec<_> = l1_batch + .priority_ops_onchain_data + .into_iter() + .map(|raw_data| raw_data.into()) + .collect(); + + L1BatchHeader { + number: L1BatchNumber(l1_batch.number as u32), + is_finished: l1_batch.is_finished, + timestamp: l1_batch.timestamp as u64, + fee_account_address: Address::from_slice(&l1_batch.fee_account_address), + priority_ops_onchain_data, + l1_tx_count: l1_batch.l1_tx_count as u16, + l2_tx_count: l1_batch.l2_tx_count as u16, + l2_to_l1_logs: convert_l2_to_l1_logs(l1_batch.l2_to_l1_logs), + l2_to_l1_messages: l1_batch.l2_to_l1_messages, + + bloom: H2048::from_slice(&l1_batch.bloom), + used_contract_hashes: serde_json::from_value(l1_batch.used_contract_hashes) + .expect("invalid value for used_contract_hashes in the DB"), + base_fee_per_gas: l1_batch + .base_fee_per_gas + .to_u64() + .expect("base_fee_per_gas should fit in u64"), + base_system_contracts_hashes: convert_base_system_contracts_hashes( + l1_batch.bootloader_code_hash, + l1_batch.default_aa_code_hash, + ), + l1_gas_price: l1_batch.l1_gas_price as u64, + l2_fair_gas_price: l1_batch.l2_fair_gas_price as u64, + protocol_version: l1_batch + .protocol_version + .map(|v| (v as u16).try_into().unwrap()), + } + } +} + +fn convert_l2_to_l1_logs(raw_logs: Vec>) -> Vec { + raw_logs + .into_iter() + .map(|raw_log| L2ToL1Log::from_slice(&raw_log)) + .collect() +} - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, +fn convert_base_system_contracts_hashes( + bootloader_code_hash: Option>, + default_aa_code_hash: Option>, +) -> BaseSystemContractsHashes { + BaseSystemContractsHashes { + bootloader: bootloader_code_hash + .map(|hash| H256::from_slice(&hash)) + .expect("should not be none"), + default_aa: default_aa_code_hash + .map(|hash| H256::from_slice(&hash)) + .expect("should not be none"), + } +} + +/// Projection of the `l1_batches` table corresponding to [`L1BatchHeader`] + [`L1BatchMetadata`]. +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct StorageL1Batch { + pub number: i64, + pub timestamp: i64, + pub is_finished: bool, + pub l1_tx_count: i32, + pub l2_tx_count: i32, + pub fee_account_address: Vec, + pub bloom: Vec, + pub l2_to_l1_logs: Vec>, + pub priority_ops_onchain_data: Vec>, pub parent_hash: Option>, pub hash: Option>, pub merkle_root_hash: Option>, - pub commitment: Option>, pub meta_parameters_hash: Option>, pub pass_through_data_hash: Option>, @@ -67,204 +143,122 @@ pub struct StorageBlock { pub eth_commit_tx_id: Option, pub eth_execute_tx_id: Option, - pub predicted_commit_gas_cost: i64, - pub predicted_prove_gas_cost: i64, - pub predicted_execute_gas_cost: i64, - - pub initial_bootloader_heap_content: serde_json::Value, pub used_contract_hashes: serde_json::Value, pub base_fee_per_gas: BigDecimal, pub l1_gas_price: i64, pub l2_fair_gas_price: i64, - // These fields are not used, but are present for compatibility reasons - pub gas_per_pubdata_byte_in_block: Option, - pub gas_per_pubdata_limit: i64, - - pub skip_proof: bool, + pub protocol_version: Option, } -impl From for L1BatchHeader { - fn from(block: StorageBlock) -> Self { - let priority_ops_onchain_data: Vec<_> = block +impl From for L1BatchHeader { + fn from(l1_batch: StorageL1Batch) -> Self { + let priority_ops_onchain_data: Vec<_> = l1_batch .priority_ops_onchain_data .into_iter() - .map(|raw_data| raw_data.into()) - .collect(); - - let l2_to_l1_logs: Vec<_> = block - .l2_to_l1_logs - .into_iter() - .map(|raw_log| L2ToL1Log::from_slice(&raw_log)) + .map(Vec::into) .collect(); L1BatchHeader { - number: L1BatchNumber(block.number as u32), - is_finished: block.is_finished, - timestamp: block.timestamp as u64, - fee_account_address: Address::from_slice(&block.fee_account_address), + number: L1BatchNumber(l1_batch.number as u32), + is_finished: l1_batch.is_finished, + timestamp: l1_batch.timestamp as u64, + fee_account_address: Address::from_slice(&l1_batch.fee_account_address), priority_ops_onchain_data, - l1_tx_count: block.l1_tx_count as u16, - l2_tx_count: block.l2_tx_count as u16, - l2_to_l1_logs, - l2_to_l1_messages: block.l2_to_l1_messages, - - bloom: H2048::from_slice(&block.bloom), - initial_bootloader_contents: serde_json::from_value::>( - block.initial_bootloader_heap_content, - ) - .expect("invalid value for initial_bootloader_heap_content in the DB"), - used_contract_hashes: serde_json::from_value::>(block.used_contract_hashes) + l1_tx_count: l1_batch.l1_tx_count as u16, + l2_tx_count: l1_batch.l2_tx_count as u16, + l2_to_l1_logs: convert_l2_to_l1_logs(l1_batch.l2_to_l1_logs), + l2_to_l1_messages: l1_batch.l2_to_l1_messages, + + bloom: H2048::from_slice(&l1_batch.bloom), + used_contract_hashes: serde_json::from_value(l1_batch.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), - base_fee_per_gas: block + base_fee_per_gas: l1_batch .base_fee_per_gas .to_u64() .expect("base_fee_per_gas should fit in u64"), - base_system_contracts_hashes: BaseSystemContractsHashes { - bootloader: block - .bootloader_code_hash - .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("should not be none"), - default_aa: block - .default_aa_code_hash - .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("should not be none"), - }, - l1_gas_price: block.l1_gas_price as u64, - l2_fair_gas_price: block.l2_fair_gas_price as u64, + base_system_contracts_hashes: convert_base_system_contracts_hashes( + l1_batch.bootloader_code_hash, + l1_batch.default_aa_code_hash, + ), + l1_gas_price: l1_batch.l1_gas_price as u64, + l2_fair_gas_price: l1_batch.l2_fair_gas_price as u64, + protocol_version: l1_batch + .protocol_version + .map(|v| (v as u16).try_into().unwrap()), } } } -impl TryInto for StorageBlock { - type Error = StorageBlockConvertError; +impl TryInto for StorageL1Batch { + type Error = StorageL1BatchConvertError; - fn try_into(self) -> Result { - Ok(BlockMetadata { - root_hash: H256::from_slice( - &self.hash.ok_or(StorageBlockConvertError::IncompleteBlock)?, - ), + fn try_into(self) -> Result { + Ok(L1BatchMetadata { + root_hash: H256::from_slice(&self.hash.ok_or(StorageL1BatchConvertError::Incomplete)?), rollup_last_leaf_index: self .rollup_last_leaf_index - .ok_or(StorageBlockConvertError::IncompleteBlock)? + .ok_or(StorageL1BatchConvertError::Incomplete)? as u64, merkle_root_hash: H256::from_slice( &self .merkle_root_hash - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, ), initial_writes_compressed: self .compressed_initial_writes - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, repeated_writes_compressed: self .compressed_repeated_writes - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, l2_l1_messages_compressed: self .l2_l1_compressed_messages - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, l2_l1_merkle_root: H256::from_slice( &self .l2_l1_merkle_root - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, ), aux_data_hash: H256::from_slice( &self .aux_data_hash - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, ), meta_parameters_hash: H256::from_slice( &self .meta_parameters_hash - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, ), pass_through_data_hash: H256::from_slice( &self .pass_through_data_hash - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, ), commitment: H256::from_slice( &self .commitment - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, ), - block_meta_params: BlockMetaParameters { + block_meta_params: L1BatchMetaParameters { zkporter_is_available: self .zkporter_is_available - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, bootloader_code_hash: H256::from_slice( &self .bootloader_code_hash - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, ), default_aa_code_hash: H256::from_slice( &self .default_aa_code_hash - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + .ok_or(StorageL1BatchConvertError::Incomplete)?, ), }, }) } } -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageBlockPageItem { - pub number: i64, - pub l1_tx_count: i32, - pub l2_tx_count: i32, - pub hash: Option>, - pub timestamp: i64, -} - -// At the moment it has the same fields as `StorageBlockPageItem` -// but there are no guarantees it won't change in the future. -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageL1BatchPageItem { - pub number: i64, - pub l1_tx_count: i32, - pub l2_tx_count: i32, - pub hash: Option>, - pub timestamp: i64, -} - -pub fn block_page_item_from_storage( - storage: StorageBlockPageItem, - last_verified: MiniblockNumber, -) -> BlockPageItem { - let status = if storage.number > last_verified.0 as i64 { - BlockStatus::Sealed - } else { - BlockStatus::Verified - }; - BlockPageItem { - number: MiniblockNumber(storage.number as u32), - l1_tx_count: storage.l1_tx_count as usize, - l2_tx_count: storage.l2_tx_count as usize, - hash: storage.hash.map(|hash| H256::from_slice(&hash)), - status, - timestamp: storage.timestamp as u64, - } -} - -pub fn l1_batch_page_item_from_storage( - storage: StorageL1BatchPageItem, - last_verified: L1BatchNumber, -) -> L1BatchPageItem { - let status = if storage.number > last_verified.0 as i64 { - BlockStatus::Sealed - } else { - BlockStatus::Verified - }; - L1BatchPageItem { - number: L1BatchNumber(storage.number as u32), - l1_tx_count: storage.l1_tx_count as usize, - l2_tx_count: storage.l2_tx_count as usize, - root_hash: storage.hash.map(|hash| H256::from_slice(&hash)), - status, - timestamp: storage.timestamp as u64, - } -} - /// Returns block_number SQL statement pub fn web3_block_number_to_sql(block_number: api::BlockNumber) -> String { match block_number { @@ -344,18 +338,18 @@ pub struct StorageBlockDetails { pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, pub fee_account_address: Option>, // May be None if the block is not yet sealed + pub protocol_version: Option, } impl StorageBlockDetails { - pub(crate) fn into_block_details(self, current_operator_address: Address) -> BlockDetails { + pub(crate) fn into_block_details(self, current_operator_address: Address) -> api::BlockDetails { let status = if self.number == 0 || self.execute_tx_hash.is_some() { - BlockStatus::Verified + api::BlockStatus::Verified } else { - BlockStatus::Sealed + api::BlockStatus::Sealed }; - BlockDetails { - number: MiniblockNumber(self.number as u32), - l1_batch_number: L1BatchNumber(self.l1_batch_number as u32), + + let base = api::BlockDetailsBase { timestamp: self.timestamp as u64, l1_tx_count: self.l1_tx_count as usize, l2_tx_count: self.l2_tx_count as usize, @@ -384,20 +378,22 @@ impl StorageBlockDetails { .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), l1_gas_price: self.l1_gas_price as u64, l2_fair_gas_price: self.l2_fair_gas_price as u64, - base_system_contracts_hashes: BaseSystemContractsHashes { - bootloader: self - .bootloader_code_hash - .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("should not be none"), - default_aa: self - .default_aa_code_hash - .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("should not be none"), - }, + base_system_contracts_hashes: convert_base_system_contracts_hashes( + self.bootloader_code_hash, + self.default_aa_code_hash, + ), + }; + api::BlockDetails { + base, + number: MiniblockNumber(self.number as u32), + l1_batch_number: L1BatchNumber(self.l1_batch_number as u32), operator_address: self .fee_account_address .map(|fee_account_address| Address::from_slice(&fee_account_address)) .unwrap_or(current_operator_address), + protocol_version: self + .protocol_version + .map(|v| (v as u16).try_into().unwrap()), } } } @@ -421,58 +417,51 @@ pub struct StorageL1BatchDetails { pub default_aa_code_hash: Option>, } -impl From for L1BatchDetails { - fn from(storage_l1_batch_details: StorageL1BatchDetails) -> Self { - let status = if storage_l1_batch_details.number == 0 - || storage_l1_batch_details.execute_tx_hash.is_some() - { - BlockStatus::Verified +impl From for api::L1BatchDetails { + fn from(details: StorageL1BatchDetails) -> Self { + let status = if details.number == 0 || details.execute_tx_hash.is_some() { + api::BlockStatus::Verified } else { - BlockStatus::Sealed + api::BlockStatus::Sealed }; - L1BatchDetails { - number: L1BatchNumber(storage_l1_batch_details.number as u32), - timestamp: storage_l1_batch_details.timestamp as u64, - l1_tx_count: storage_l1_batch_details.l1_tx_count as usize, - l2_tx_count: storage_l1_batch_details.l2_tx_count as usize, + + let base = api::BlockDetailsBase { + timestamp: details.timestamp as u64, + l1_tx_count: details.l1_tx_count as usize, + l2_tx_count: details.l2_tx_count as usize, status, - root_hash: storage_l1_batch_details - .root_hash - .as_deref() - .map(H256::from_slice), - commit_tx_hash: storage_l1_batch_details + root_hash: details.root_hash.as_deref().map(H256::from_slice), + commit_tx_hash: details .commit_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect commit_tx hash")), - committed_at: storage_l1_batch_details + committed_at: details .committed_at .map(|committed_at| DateTime::::from_utc(committed_at, Utc)), - prove_tx_hash: storage_l1_batch_details + prove_tx_hash: details .prove_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect prove_tx hash")), - proven_at: storage_l1_batch_details + proven_at: details .proven_at .map(|proven_at| DateTime::::from_utc(proven_at, Utc)), - execute_tx_hash: storage_l1_batch_details + execute_tx_hash: details .execute_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect execute_tx hash")), - executed_at: storage_l1_batch_details + executed_at: details .executed_at .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), - l1_gas_price: storage_l1_batch_details.l1_gas_price as u64, - l2_fair_gas_price: storage_l1_batch_details.l2_fair_gas_price as u64, - base_system_contracts_hashes: BaseSystemContractsHashes { - bootloader: storage_l1_batch_details - .bootloader_code_hash - .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("should not be none"), - default_aa: storage_l1_batch_details - .default_aa_code_hash - .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("should not be none"), - }, + l1_gas_price: details.l1_gas_price as u64, + l2_fair_gas_price: details.l2_fair_gas_price as u64, + base_system_contracts_hashes: convert_base_system_contracts_hashes( + details.bootloader_code_hash, + details.default_aa_code_hash, + ), + }; + api::L1BatchDetails { + base, + number: L1BatchNumber(details.number as u32), } } } @@ -490,6 +479,7 @@ pub struct StorageMiniblockHeader { // L2 gas price assumed in the corresponding batch pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub protocol_version: Option, } impl From for MiniblockHeader { @@ -503,20 +493,33 @@ impl From for MiniblockHeader { base_fee_per_gas: row.base_fee_per_gas.to_u64().unwrap(), l1_gas_price: row.l1_gas_price as u64, l2_fair_gas_price: row.l2_fair_gas_price as u64, - base_system_contracts_hashes: BaseSystemContractsHashes { - bootloader: row - .bootloader_code_hash - .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("should not be none"), - default_aa: row - .default_aa_code_hash - .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("should not be none"), - }, + base_system_contracts_hashes: convert_base_system_contracts_hashes( + row.bootloader_code_hash, + row.default_aa_code_hash, + ), + protocol_version: row.protocol_version.map(|v| (v as u16).try_into().unwrap()), } } } +/// Information about L1 batch which a certain miniblock belongs to. +#[derive(Debug)] +pub struct ResolvedL1BatchForMiniblock { + /// L1 batch which the miniblock belongs to. `None` if the miniblock is not explicitly attached + /// (i.e., its L1 batch is not sealed). + pub miniblock_l1_batch: Option, + /// Pending (i.e., unsealed) L1 batch. + pub pending_l1_batch: L1BatchNumber, +} + +impl ResolvedL1BatchForMiniblock { + /// Returns the L1 batch number that the miniblock has now or will have in the future (provided + /// that the node will operate correctly). + pub fn expected_l1_batch(&self) -> L1BatchNumber { + self.miniblock_l1_batch.unwrap_or(self.pending_l1_batch) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/dal/src/models/storage_contract.rs b/core/lib/dal/src/models/storage_contract.rs deleted file mode 100644 index 67b9bb9e76b8..000000000000 --- a/core/lib/dal/src/models/storage_contract.rs +++ /dev/null @@ -1,17 +0,0 @@ -use zksync_types::vm_trace::ContractSourceDebugInfo; - -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageContractSource { - pub assembly_code: String, - pub pc_line_mapping: serde_json::Value, -} - -impl From for ContractSourceDebugInfo { - fn from(source: StorageContractSource) -> ContractSourceDebugInfo { - ContractSourceDebugInfo { - assembly_code: source.assembly_code, - pc_line_mapping: serde_json::from_value(source.pc_line_mapping) - .expect("invalid pc_line_mapping json in database"), - } - } -} diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs new file mode 100644 index 000000000000..93010f1b8147 --- /dev/null +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -0,0 +1,90 @@ +use std::convert::TryInto; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_types::{ + api, + protocol_version::{self, L1VerifierConfig, ProtocolUpgradeTx, VerifierParams}, + Address, H256, +}; + +use sqlx::types::chrono::NaiveDateTime; + +#[derive(sqlx::FromRow)] +pub struct StorageProtocolVersion { + pub id: i32, + pub timestamp: i64, + pub recursion_scheduler_level_vk_hash: Vec, + pub recursion_node_level_vk_hash: Vec, + pub recursion_leaf_level_vk_hash: Vec, + pub recursion_circuits_set_vks_hash: Vec, + pub bootloader_code_hash: Vec, + pub default_account_code_hash: Vec, + pub verifier_address: Vec, + pub created_at: NaiveDateTime, + pub upgrade_tx_hash: Option>, +} + +pub(crate) fn protocol_version_from_storage( + storage_version: StorageProtocolVersion, + tx: Option, +) -> protocol_version::ProtocolVersion { + protocol_version::ProtocolVersion { + id: (storage_version.id as u16).try_into().unwrap(), + timestamp: storage_version.timestamp as u64, + l1_verifier_config: L1VerifierConfig { + params: VerifierParams { + recursion_node_level_vk_hash: H256::from_slice( + &storage_version.recursion_node_level_vk_hash, + ), + recursion_leaf_level_vk_hash: H256::from_slice( + &storage_version.recursion_leaf_level_vk_hash, + ), + recursion_circuits_set_vks_hash: H256::from_slice( + &storage_version.recursion_circuits_set_vks_hash, + ), + }, + recursion_scheduler_level_vk_hash: H256::from_slice( + &storage_version.recursion_scheduler_level_vk_hash, + ), + }, + base_system_contracts_hashes: BaseSystemContractsHashes { + bootloader: H256::from_slice(&storage_version.bootloader_code_hash), + default_aa: H256::from_slice(&storage_version.default_account_code_hash), + }, + verifier_address: Address::from_slice(&storage_version.verifier_address), + tx, + } +} + +impl From for api::ProtocolVersion { + fn from(storage_protocol_version: StorageProtocolVersion) -> Self { + let l2_system_upgrade_tx_hash = storage_protocol_version + .upgrade_tx_hash + .as_ref() + .map(|hash| H256::from_slice(hash)); + api::ProtocolVersion { + version_id: storage_protocol_version.id as u16, + timestamp: storage_protocol_version.timestamp as u64, + verification_keys_hashes: L1VerifierConfig { + params: VerifierParams { + recursion_node_level_vk_hash: H256::from_slice( + &storage_protocol_version.recursion_node_level_vk_hash, + ), + recursion_leaf_level_vk_hash: H256::from_slice( + &storage_protocol_version.recursion_leaf_level_vk_hash, + ), + recursion_circuits_set_vks_hash: H256::from_slice( + &storage_protocol_version.recursion_circuits_set_vks_hash, + ), + }, + recursion_scheduler_level_vk_hash: H256::from_slice( + &storage_protocol_version.recursion_scheduler_level_vk_hash, + ), + }, + base_system_contracts: BaseSystemContractsHashes { + bootloader: H256::from_slice(&storage_protocol_version.bootloader_code_hash), + default_aa: H256::from_slice(&storage_protocol_version.default_account_code_hash), + }, + l2_system_upgrade_tx_hash, + } + } +} diff --git a/core/lib/dal/src/models/storage_state_record.rs b/core/lib/dal/src/models/storage_state_record.rs deleted file mode 100644 index 46a031b1893e..000000000000 --- a/core/lib/dal/src/models/storage_state_record.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageStateRecord { - pub address: Vec, - pub key: Vec, - pub value: Vec, -} diff --git a/core/lib/dal/src/models/storage_token.rs b/core/lib/dal/src/models/storage_token.rs index 0ebd6b95bd15..92eeff39e187 100644 --- a/core/lib/dal/src/models/storage_token.rs +++ b/core/lib/dal/src/models/storage_token.rs @@ -1,5 +1,8 @@ -use sqlx::types::chrono::{DateTime, Utc}; -use sqlx::types::{chrono::NaiveDateTime, BigDecimal}; +use sqlx::types::{ + chrono::{DateTime, NaiveDateTime, Utc}, + BigDecimal, +}; + use zksync_types::tokens::{TokenMarketVolume, TokenMetadata, TokenPrice}; use zksync_utils::big_decimal_to_ratio; diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 9cbd0d17fdf3..65db0e05918a 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -1,25 +1,27 @@ +use std::{convert::TryInto, str::FromStr}; + use crate::BigDecimal; use bigdecimal::Zero; -use itertools::Itertools; -use sqlx::postgres::PgRow; -use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; -use sqlx::Row; - -use std::str::FromStr; -use zksync_types::l2::TransactionType; -use zksync_types::transaction_request::PaymasterParams; -use zksync_types::vm_trace::Call; -use zksync_types::web3::types::U64; -use zksync_types::{api, explorer_api, L2_ETH_TOKEN_ADDRESS}; +use serde::{Deserialize, Serialize}; +use sqlx::{ + postgres::PgRow, + types::chrono::{DateTime, NaiveDateTime, Utc}, + Error, FromRow, Row, +}; + use zksync_types::{ - explorer_api::{BalanceChangeInfo, BalanceChangeType, Erc20TransferInfo, TransactionStatus}, + api::{self, TransactionDetails, TransactionStatus}, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, - Address, Execute, L1TxCommonData, L2ChainId, L2TxCommonData, Nonce, PackedEthSignature, - PriorityOpId, Transaction, BOOTLOADER_ADDRESS, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, - EIP_712_TX_TYPE, H160, H256, U256, + l2::TransactionType, + protocol_version::ProtocolUpgradeTxCommonData, + transaction_request::PaymasterParams, + vm_trace::Call, + web3::types::U64, + Address, Execute, ExecuteTransactionCommon, L1TxCommonData, L2ChainId, L2TxCommonData, Nonce, + PackedEthSignature, PriorityOpId, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, + EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, }; -use zksync_types::{ExecuteTransactionCommon, L1BatchNumber, MiniblockNumber}; use zksync_utils::bigdecimal_to_u256; #[derive(Debug, Clone, sqlx::FromRow)] @@ -63,55 +65,352 @@ pub struct StorageTransaction { pub l1_tx_mint: Option, pub l1_tx_refund_recipient: Option>, + pub upgrade_id: Option, + pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, } +impl From for L1TxCommonData { + fn from(tx: StorageTransaction) -> Self { + let gas_limit = { + let gas_limit_string = tx + .gas_limit + .as_ref() + .expect("gas limit is mandatory for transaction") + .to_string(); + + U256::from_dec_str(&gas_limit_string) + .unwrap_or_else(|_| panic!("Incorrect gas limit value in DB {}", gas_limit_string)) + }; + + let full_fee = { + let full_fee_string = tx + .full_fee + .expect("full fee is mandatory for priority operation") + .to_string(); + + U256::from_dec_str(&full_fee_string) + .unwrap_or_else(|_| panic!("Incorrect full fee value in DB {}", full_fee_string)) + }; + + let layer_2_tip_fee = { + let layer_2_tip_fee_string = tx + .layer_2_tip_fee + .expect("layer 2 tip fee is mandatory for priority operation") + .to_string(); + + U256::from_dec_str(&layer_2_tip_fee_string).unwrap_or_else(|_| { + panic!( + "Incorrect layer 2 tip fee value in DB {}", + layer_2_tip_fee_string + ) + }) + }; + + // Supporting None for compatibility with the old transactions + let to_mint = tx.l1_tx_mint.map(bigdecimal_to_u256).unwrap_or_default(); + // Supporting None for compatibility with the old transactions + let refund_recipient = tx + .l1_tx_refund_recipient + .map(|recipient| Address::from_slice(&recipient)) + .unwrap_or_default(); + + // `tx.hash` represents the transaction hash obtained from the execution results, + // and it should be exactly the same as the canonical tx hash calculated from the + // transaction data, so we don't store it as a separate "canonical_tx_hash" field. + let canonical_tx_hash = H256::from_slice(&tx.hash); + + L1TxCommonData { + full_fee, + layer_2_tip_fee, + priority_queue_type: PriorityQueueType::Deque, + op_processing_type: OpProcessingType::Common, + sender: Address::from_slice(&tx.initiator_address), + serial_id: PriorityOpId(tx.priority_op_id.unwrap() as u64), + gas_limit, + max_fee_per_gas: tx + .max_fee_per_gas + .map(bigdecimal_to_u256) + .unwrap_or_default(), + to_mint, + refund_recipient, + // Using 1 for old transactions that did not have the necessary field stored + gas_per_pubdata_limit: tx + .gas_per_pubdata_limit + .map(bigdecimal_to_u256) + .unwrap_or_else(|| U256::from(1u32)), + deadline_block: 0, + eth_hash: Default::default(), + eth_block: tx.l1_block_number.unwrap_or_default() as u64, + canonical_tx_hash, + } + } +} + +impl From for L2TxCommonData { + fn from(tx: StorageTransaction) -> Self { + let gas_limit = { + let gas_limit_string = tx + .gas_limit + .as_ref() + .expect("gas limit is mandatory for transaction") + .to_string(); + + U256::from_dec_str(&gas_limit_string) + .unwrap_or_else(|_| panic!("Incorrect gas limit value in DB {}", gas_limit_string)) + }; + let nonce = Nonce(tx.nonce.expect("no nonce in L2 tx in DB") as u32); + let max_fee_per_gas = { + let max_fee_per_gas_string = tx + .max_fee_per_gas + .as_ref() + .expect("max price per gas is mandatory for transaction") + .to_string(); + + U256::from_dec_str(&max_fee_per_gas_string).unwrap_or_else(|_| { + panic!( + "Incorrect max price per gas value in DB {}", + max_fee_per_gas_string + ) + }) + }; + + let max_priority_fee_per_gas = { + let max_priority_fee_per_gas_string = tx + .max_priority_fee_per_gas + .as_ref() + .expect("max priority fee per gas is mandatory for transaction") + .to_string(); + + U256::from_dec_str(&max_priority_fee_per_gas_string).unwrap_or_else(|_| { + panic!( + "Incorrect max priority fee per gas value in DB {}", + max_priority_fee_per_gas_string + ) + }) + }; + + let gas_per_pubdata_limit = { + let gas_per_pubdata_limit_string = tx + .gas_per_pubdata_limit + .as_ref() + .expect("gas price per pubdata limit is mandatory for transaction") + .to_string(); + U256::from_dec_str(&gas_per_pubdata_limit_string).unwrap_or_else(|_| { + panic!( + "Incorrect gas price per pubdata limit value in DB {}", + gas_per_pubdata_limit_string + ) + }) + }; + + let fee = Fee { + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + gas_per_pubdata_limit, + }; + + let tx_format = match tx.tx_format.map(|a| a as u8) { + Some(EIP_712_TX_TYPE) => TransactionType::EIP712Transaction, + Some(EIP_2930_TX_TYPE) => TransactionType::EIP2930Transaction, + Some(EIP_1559_TX_TYPE) => TransactionType::EIP1559Transaction, + Some(0) | None => TransactionType::LegacyTransaction, + Some(_) => unreachable!("Unsupported tx type"), + }; + + let StorageTransaction { + paymaster, + paymaster_input, + initiator_address, + signature, + hash, + input, + .. + } = tx; + + let paymaster_params = PaymasterParams { + paymaster: Address::from_slice(&paymaster), + paymaster_input, + }; + + L2TxCommonData::new( + nonce, + fee, + Address::from_slice(&initiator_address), + signature.unwrap_or_else(|| { + panic!("Signature is mandatory for transactions. Tx {:#?}", hash) + }), + tx_format, + input.expect("input data is mandatory for l2 transactions"), + H256::from_slice(&hash), + paymaster_params, + ) + } +} + +impl From for ProtocolUpgradeTxCommonData { + fn from(tx: StorageTransaction) -> Self { + let gas_limit = { + let gas_limit_string = tx + .gas_limit + .as_ref() + .expect("gas limit is mandatory for transaction") + .to_string(); + + U256::from_dec_str(&gas_limit_string) + .unwrap_or_else(|_| panic!("Incorrect gas limit value in DB {}", gas_limit_string)) + }; + + let to_mint = tx.l1_tx_mint.map(bigdecimal_to_u256).unwrap_or_default(); + let refund_recipient = tx + .l1_tx_refund_recipient + .map(|recipient| Address::from_slice(&recipient)) + .unwrap_or_default(); + let canonical_tx_hash = H256::from_slice(&tx.hash); + + ProtocolUpgradeTxCommonData { + sender: Address::from_slice(&tx.initiator_address), + upgrade_id: (tx.upgrade_id.unwrap() as u16).try_into().unwrap(), + gas_limit, + max_fee_per_gas: tx + .max_fee_per_gas + .map(bigdecimal_to_u256) + .unwrap_or_default(), + to_mint, + refund_recipient, + // Using 1 for old transactions that did not have the necessary field stored + gas_per_pubdata_limit: tx + .gas_per_pubdata_limit + .map(bigdecimal_to_u256) + .expect("gas_per_pubdata_limit field is missing for protocol upgrade tx"), + eth_hash: Default::default(), + eth_block: tx.l1_block_number.unwrap_or_default() as u64, + canonical_tx_hash, + } + } +} + +impl From for Transaction { + fn from(tx: StorageTransaction) -> Self { + let hash = H256::from_slice(&tx.hash); + let execute = serde_json::from_value::(tx.data.clone()) + .unwrap_or_else(|_| panic!("invalid json in database for tx {:?}", hash)); + let received_timestamp_ms = tx.received_at.timestamp_millis() as u64; + match tx.tx_format { + Some(t) if t == PRIORITY_OPERATION_L2_TX_TYPE as i32 => Transaction { + common_data: ExecuteTransactionCommon::L1(tx.into()), + execute, + received_timestamp_ms, + }, + Some(t) if t == PROTOCOL_UPGRADE_TX_TYPE as i32 => Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(tx.into()), + execute, + received_timestamp_ms, + }, + _ => Transaction { + common_data: ExecuteTransactionCommon::L2(tx.into()), + execute, + received_timestamp_ms, + }, + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct StorageApiTransaction { + #[serde(flatten)] + pub inner_api_transaction: api::Transaction, +} + +impl From for api::Transaction { + fn from(tx: StorageApiTransaction) -> Self { + tx.inner_api_transaction + } +} + +impl<'r> FromRow<'r, PgRow> for StorageApiTransaction { + fn from_row(db_row: &'r PgRow) -> Result { + let row_signature: Option> = db_row.get("signature"); + let signature = row_signature + .and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); + + Ok(StorageApiTransaction { + inner_api_transaction: api::Transaction { + hash: H256::from_slice(db_row.get("tx_hash")), + nonce: U256::from(db_row.try_get::("nonce").ok().unwrap_or(0)), + block_hash: db_row.try_get("block_hash").ok().map(H256::from_slice), + block_number: db_row + .try_get::("block_number") + .ok() + .map(U64::from), + transaction_index: db_row + .try_get::("index_in_block") + .ok() + .map(U64::from), + from: Some(H160::from_slice(db_row.get("initiator_address"))), + to: Some( + serde_json::from_value::
(db_row.get("execute_contract_address")) + .expect("incorrect address value in the database"), + ), + value: bigdecimal_to_u256(db_row.get::("value")), + // `gas_price`, `max_fee_per_gas`, `max_priority_fee_per_gas` will be zero for the priority transactions. + // For common L2 transactions `gas_price` is equal to `effective_gas_price` if the transaction is included + // in some block, or `max_fee_per_gas` otherwise. + gas_price: Some(bigdecimal_to_u256( + db_row + .try_get::("effective_gas_price") + .or_else(|_| db_row.try_get::("max_fee_per_gas")) + .unwrap_or_else(|_| BigDecimal::zero()), + )), + max_fee_per_gas: Some(bigdecimal_to_u256( + db_row + .try_get::("max_fee_per_gas") + .unwrap_or_else(|_| BigDecimal::zero()), + )), + max_priority_fee_per_gas: Some(bigdecimal_to_u256( + db_row + .try_get::("max_priority_fee_per_gas") + .unwrap_or_else(|_| BigDecimal::zero()), + )), + gas: bigdecimal_to_u256(db_row.get::("gas_limit")), + input: serde_json::from_value(db_row.get::("calldata")) + .expect("Incorrect calldata value in the database"), + raw: None, + v: signature.as_ref().map(|s| U64::from(s.v())), + r: signature.as_ref().map(|s| U256::from(s.r())), + s: signature.as_ref().map(|s| U256::from(s.s())), + transaction_type: db_row + .try_get::, &str>("tx_format") + .unwrap_or_default() + .map(U64::from), + access_list: None, + chain_id: U256::from(0), + l1_batch_number: db_row + .try_get::("l1_batch_number_tx") + .ok() + .map(U64::from), + l1_batch_tx_index: db_row + .try_get::("l1_batch_tx_index") + .ok() + .map(U64::from), + }, + }) + } +} + #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageTransactionDetails { - pub priority_op_id: Option, - pub hash: Vec, pub is_priority: bool, - pub full_fee: Option, - pub layer_2_tip_fee: Option, pub initiator_address: Vec, - pub nonce: Option, - pub signature: Option>, pub gas_limit: Option, - pub max_fee_per_gas: Option, - pub max_priority_fee_per_gas: Option, - pub gas_per_storage_limit: Option, pub gas_per_pubdata_limit: Option, - pub input: Option>, - pub tx_format: Option, - pub data: serde_json::Value, pub received_at: NaiveDateTime, - pub in_mempool: bool, - - pub l1_block_number: Option, - pub l1_batch_tx_index: Option, - pub l1_batch_number: Option, pub miniblock_number: Option, - pub miniblock_timestamp: Option, - pub block_hash: Option>, - pub index_in_block: Option, pub error: Option, pub effective_gas_price: Option, - pub contract_address: Option>, - pub value: BigDecimal, - pub paymaster: Vec, - pub paymaster_input: Vec, - - pub l1_tx_mint: Option, - pub l1_tx_refund_recipient: Option>, - pub refunded_gas: i64, - - pub execution_info: serde_json::Value, - - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, - pub eth_commit_tx_hash: Option, pub eth_prove_tx_hash: Option, pub eth_execute_tx_hash: Option, @@ -131,17 +430,16 @@ impl StorageTransactionDetails { } } -impl From for api::TransactionDetails { +impl From for TransactionDetails { fn from(tx_details: StorageTransactionDetails) -> Self { let status = tx_details.get_transaction_status(); let effective_gas_price = - bigdecimal_to_u256(tx_details.effective_gas_price.clone().unwrap_or_default()); + bigdecimal_to_u256(tx_details.effective_gas_price.unwrap_or_default()); let gas_limit = bigdecimal_to_u256( tx_details .gas_limit - .clone() .expect("gas limit is mandatory for transaction"), ); let gas_refunded = U256::from(tx_details.refunded_gas as u32); @@ -163,7 +461,7 @@ impl From for api::TransactionDetails { .eth_execute_tx_hash .map(|hash| H256::from_str(&hash).unwrap()); - api::TransactionDetails { + TransactionDetails { is_l1_originated: tx_details.is_priority, status, fee, @@ -200,452 +498,9 @@ pub fn web3_transaction_select_sql() -> &'static str { } pub fn extract_web3_transaction(db_row: PgRow, chain_id: L2ChainId) -> api::Transaction { - let row_signature: Option> = db_row.get("signature"); - let signature = - row_signature.and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); - api::Transaction { - hash: H256::from_slice(db_row.get("tx_hash")), - nonce: U256::from(db_row.try_get::("nonce").ok().unwrap_or(0)), - block_hash: db_row.try_get("block_hash").ok().map(H256::from_slice), - block_number: db_row - .try_get::("block_number") - .ok() - .map(U64::from), - transaction_index: db_row - .try_get::("index_in_block") - .ok() - .map(U64::from), - from: Some(H160::from_slice(db_row.get("initiator_address"))), - to: Some( - serde_json::from_value::
(db_row.get("execute_contract_address")) - .expect("incorrect address value in the database"), - ), - value: bigdecimal_to_u256(db_row.get::("value")), - // `gas_price`, `max_fee_per_gas`, `max_priority_fee_per_gas` will be zero for the priority transactions. - // For common L2 transactions `gas_price` is equal to `effective_gas_price` if the transaction is included - // in some block, or `max_fee_per_gas` otherwise. - gas_price: Some(bigdecimal_to_u256( - db_row - .try_get::("effective_gas_price") - .or_else(|_| db_row.try_get::("max_fee_per_gas")) - .unwrap_or_else(|_| BigDecimal::zero()), - )), - max_fee_per_gas: Some(bigdecimal_to_u256( - db_row - .try_get::("max_fee_per_gas") - .unwrap_or_else(|_| BigDecimal::zero()), - )), - max_priority_fee_per_gas: Some(bigdecimal_to_u256( - db_row - .try_get::("max_priority_fee_per_gas") - .unwrap_or_else(|_| BigDecimal::zero()), - )), - gas: bigdecimal_to_u256(db_row.get::("gas_limit")), - input: serde_json::from_value(db_row.get::("calldata")) - .expect("Incorrect calldata value in the database"), - raw: None, - v: signature.as_ref().map(|s| U64::from(s.v())), - r: signature.as_ref().map(|s| U256::from(s.r())), - s: signature.as_ref().map(|s| U256::from(s.s())), - transaction_type: db_row - .try_get::, &str>("tx_format") - .unwrap_or_default() - .map(U64::from), - access_list: None, - chain_id: U256::from(chain_id.0), - l1_batch_number: db_row - .try_get::("l1_batch_number_tx") - .ok() - .map(U64::from), - l1_batch_tx_index: db_row - .try_get::("l1_batch_tx_index") - .ok() - .map(U64::from), - } -} - -impl From for Transaction { - fn from(tx: StorageTransaction) -> Self { - let gas_limit = { - let gas_limit_string = tx - .gas_limit - .as_ref() - .expect("gas limit is mandatory for transaction") - .to_string(); - - U256::from_dec_str(&gas_limit_string) - .unwrap_or_else(|_| panic!("Incorrect gas limit value in DB {}", gas_limit_string)) - }; - - if tx.is_priority { - let full_fee = { - let full_fee_string = tx - .full_fee - .expect("full fee is mandatory for priority operation") - .to_string(); - - U256::from_dec_str(&full_fee_string).unwrap_or_else(|_| { - panic!("Incorrect full fee value in DB {}", full_fee_string) - }) - }; - - let layer_2_tip_fee = { - let layer_2_tip_fee_string = tx - .layer_2_tip_fee - .expect("layer 2 tip fee is mandatory for priority operation") - .to_string(); - - U256::from_dec_str(&layer_2_tip_fee_string).unwrap_or_else(|_| { - panic!( - "Incorrect layer 2 tip fee value in DB {}", - layer_2_tip_fee_string - ) - }) - }; - - // Supporting None for compatibility with the old transactions - let to_mint = tx.l1_tx_mint.map(bigdecimal_to_u256).unwrap_or_default(); - // Supporting None for compatibility with the old transactions - let refund_recipient = tx - .l1_tx_refund_recipient - .map(|recipient| Address::from_slice(&recipient)) - .unwrap_or_default(); - - // `tx.hash` represents the transaction hash obtained from the execution results, - // and it should be exactly the same as the canonical tx hash calculated from the - // transaction data, so we don't store it as a separate "canonical_tx_hash" field. - let canonical_tx_hash = H256::from_slice(&tx.hash); - - let tx_common_data = L1TxCommonData { - full_fee, - layer_2_tip_fee, - priority_queue_type: PriorityQueueType::Deque, - op_processing_type: OpProcessingType::Common, - sender: Address::from_slice(&tx.initiator_address), - serial_id: PriorityOpId(tx.priority_op_id.unwrap() as u64), - gas_limit, - max_fee_per_gas: tx - .max_fee_per_gas - .map(bigdecimal_to_u256) - .unwrap_or_default(), - to_mint, - refund_recipient, - // Using 1 for old transactions that did not have the necessary field stored - gas_per_pubdata_limit: tx - .gas_per_pubdata_limit - .map(bigdecimal_to_u256) - .unwrap_or_else(|| U256::from(1u32)), - deadline_block: 0, - eth_hash: Default::default(), - eth_block: tx.l1_block_number.unwrap_or_default() as u64, - canonical_tx_hash, - }; - - let hash = H256::from_slice(&tx.hash); - let inner = serde_json::from_value::(tx.data) - .unwrap_or_else(|_| panic!("invalid json in database for tx {:?}", hash)); - Transaction { - common_data: ExecuteTransactionCommon::L1(tx_common_data), - execute: inner, - received_timestamp_ms: tx.received_at.timestamp_millis() as u64, - } - } else { - let nonce = Nonce(tx.nonce.expect("no nonce in L2 tx in DB") as u32); - let max_fee_per_gas = { - let max_fee_per_gas_string = tx - .max_fee_per_gas - .as_ref() - .expect("max price per gas is mandatory for transaction") - .to_string(); - - U256::from_dec_str(&max_fee_per_gas_string).unwrap_or_else(|_| { - panic!( - "Incorrect max price per gas value in DB {}", - max_fee_per_gas_string - ) - }) - }; - - let max_priority_fee_per_gas = { - let max_priority_fee_per_gas_string = tx - .max_priority_fee_per_gas - .as_ref() - .expect("max priority fee per gas is mandatory for transaction") - .to_string(); - - U256::from_dec_str(&max_priority_fee_per_gas_string).unwrap_or_else(|_| { - panic!( - "Incorrect max priority fee per gas value in DB {}", - max_priority_fee_per_gas_string - ) - }) - }; - - let gas_per_pubdata_limit = { - let gas_per_pubdata_limit_string = tx - .gas_per_pubdata_limit - .as_ref() - .expect("gas price per pubdata limit is mandatory for transaction") - .to_string(); - U256::from_dec_str(&gas_per_pubdata_limit_string).unwrap_or_else(|_| { - panic!( - "Incorrect gas price per pubdata limit value in DB {}", - gas_per_pubdata_limit_string - ) - }) - }; - - let fee = Fee { - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - gas_per_pubdata_limit, - }; - - let tx_format = match tx.tx_format.map(|a| a as u8) { - Some(EIP_712_TX_TYPE) => TransactionType::EIP712Transaction, - Some(EIP_2930_TX_TYPE) => TransactionType::EIP2930Transaction, - Some(EIP_1559_TX_TYPE) => TransactionType::EIP1559Transaction, - Some(0) | None => TransactionType::LegacyTransaction, - Some(_) => unreachable!("Unsupported tx type"), - }; - - let StorageTransaction { - paymaster, - paymaster_input, - initiator_address, - signature, - hash, - input, - data, - received_at, - .. - } = tx; - - let paymaster_params = PaymasterParams { - paymaster: Address::from_slice(&paymaster), - paymaster_input, - }; - - let tx_common_data = L2TxCommonData::new( - nonce, - fee, - Address::from_slice(&initiator_address), - signature.unwrap_or_else(|| { - panic!("Signature is mandatory for transactions. Tx {:#?}", hash) - }), - tx_format, - input.expect("input data is mandatory for l2 transactions"), - H256::from_slice(&hash), - paymaster_params, - ); - - let inner = serde_json::from_value::(data) - .unwrap_or_else(|_| panic!("invalid json in database for tx {:?}", hash)); - Transaction { - common_data: ExecuteTransactionCommon::L2(tx_common_data), - execute: inner, - received_timestamp_ms: received_at.timestamp_millis() as u64, - } - } - } -} - -pub fn transaction_details_from_storage( - tx_details: StorageTransactionDetails, - mut erc20_transfers: Vec, - mut withdrawals: Vec, - transfer: Option, - mut deposits: Vec, -) -> explorer_api::TransactionDetails { - let status = tx_details.get_transaction_status(); - - // Dirty fix to avoid inconsistency. - // Info about the transactions is built using several DB requests. - // So, it is possible that the transaction will be included in a block between these requests. - // That will result in inconsistency with transaction's events. - // Note: `transfer` field is built based only on the calldata, so it shouldn't be touched here. - if matches!(status, TransactionStatus::Pending) { - erc20_transfers = Vec::new(); - withdrawals = Vec::new(); - deposits = Vec::new(); - } - - let block_number = tx_details - .miniblock_number - .map(|number| MiniblockNumber(number as u32)); - let miniblock_timestamp = tx_details.miniblock_timestamp.map(|number| number as u64); - let l1_batch_number = tx_details - .l1_batch_number - .map(|number| L1BatchNumber(number as u32)); - let block_hash = tx_details.block_hash.map(|hash| H256::from_slice(&hash)); - let index_in_block = tx_details.index_in_block.map(|i| i as u32); - - let eth_commit_tx_hash = tx_details - .eth_commit_tx_hash - .map(|hash| H256::from_str(&hash).unwrap()); - let eth_prove_tx_hash = tx_details - .eth_prove_tx_hash - .map(|hash| H256::from_str(&hash).unwrap()); - let eth_execute_tx_hash = tx_details - .eth_execute_tx_hash - .map(|hash| H256::from_str(&hash).unwrap()); - - let received_at = DateTime::::from_utc(tx_details.received_at, Utc); - let paymaster_address = Address::from_slice(&tx_details.paymaster); - - let storage_tx = StorageTransaction { - priority_op_id: tx_details.priority_op_id, - hash: tx_details.hash, - is_priority: tx_details.is_priority, - full_fee: tx_details.full_fee, - layer_2_tip_fee: tx_details.layer_2_tip_fee, - initiator_address: tx_details.initiator_address, - nonce: tx_details.nonce, - signature: tx_details.signature, - gas_limit: tx_details.gas_limit, - max_fee_per_gas: tx_details.max_fee_per_gas, - max_priority_fee_per_gas: tx_details.max_priority_fee_per_gas, - gas_per_storage_limit: tx_details.gas_per_storage_limit, - gas_per_pubdata_limit: tx_details.gas_per_pubdata_limit, - input: tx_details.input, - tx_format: tx_details.tx_format, - data: tx_details.data, - received_at: tx_details.received_at, - in_mempool: tx_details.in_mempool, - l1_block_number: tx_details.l1_block_number, - l1_batch_number: tx_details.l1_batch_number, - l1_batch_tx_index: tx_details.l1_batch_tx_index, - miniblock_number: tx_details.miniblock_number, - index_in_block: tx_details.index_in_block, - error: tx_details.error, - effective_gas_price: tx_details.effective_gas_price, - contract_address: tx_details.contract_address, - value: tx_details.value, - paymaster: tx_details.paymaster, - paymaster_input: tx_details.paymaster_input, - l1_tx_mint: tx_details.l1_tx_mint, - l1_tx_refund_recipient: tx_details.l1_tx_refund_recipient, - refunded_gas: tx_details.refunded_gas, - execution_info: tx_details.execution_info, - created_at: tx_details.created_at, - updated_at: tx_details.updated_at, - }; - let effective_gas_price = - bigdecimal_to_u256(storage_tx.effective_gas_price.clone().unwrap_or_default()); - let tx: Transaction = storage_tx.into(); - let fee = (tx.gas_limit() - tx_details.refunded_gas) * effective_gas_price; - - let tx_type = tx.tx_format(); - - let transaction_hash = tx.hash(); - let nonce = tx.nonce(); - let initiator_address = tx.initiator_account(); - let is_l1_originated = tx.is_l1(); - let data = tx.execute; - - let mut transfer_changes = erc20_transfers.clone(); - for withdraw in withdrawals.iter() { - // Ether is being sent to `L2_ETH_TOKEN_ADDRESS` when burning - // but other tokens are being sent to the zero address. - let to = if withdraw.token_info.l1_address == Address::zero() { - L2_ETH_TOKEN_ADDRESS - } else { - Address::zero() - }; - let burn_event_to_remove = Erc20TransferInfo { - token_info: withdraw.token_info.clone(), - from: withdraw.from, - to, - amount: withdraw.amount, - }; - let elem_to_remove = transfer_changes.iter().find_position(|event| { - event.token_info.l2_address == burn_event_to_remove.token_info.l2_address - && event.from == burn_event_to_remove.from - && event.to == burn_event_to_remove.to - && event.amount == burn_event_to_remove.amount - }); - if let Some(idx_to_remove) = elem_to_remove { - transfer_changes.remove(idx_to_remove.0); - } else { - vlog::warn!( - "Burn event for withdrawal must be present, tx hash: {:?}", - transaction_hash - ); - } - } - for deposit in deposits.iter() { - // Ether doesn't emit `Transfer` event when minting unlike other tokens. - if deposit.token_info.l1_address != Address::zero() { - let mint_event_to_remove = Erc20TransferInfo { - token_info: deposit.token_info.clone(), - from: Address::zero(), - to: deposit.to, - amount: deposit.amount, - }; - let elem_to_remove = transfer_changes.iter().find_position(|event| { - event.token_info.l2_address == mint_event_to_remove.token_info.l2_address - && event.from == mint_event_to_remove.from - && event.to == mint_event_to_remove.to - && event.amount == mint_event_to_remove.amount - }); - if let Some(idx_to_remove) = elem_to_remove { - transfer_changes.remove(idx_to_remove.0); - } else { - vlog::warn!( - "Mint event for deposit must be present, tx hash: {:?}", - transaction_hash - ); - } - } - } - let fee_receiver_address = if paymaster_address == Address::zero() { - BOOTLOADER_ADDRESS - } else { - paymaster_address - }; - let balance_changes = transfer_changes - .into_iter() - .map(|transfer_info| { - let balance_change_type = if transfer_info.to == fee_receiver_address { - BalanceChangeType::Fee - } else { - BalanceChangeType::Transfer - }; - BalanceChangeInfo { - token_info: transfer_info.token_info, - from: transfer_info.from, - to: transfer_info.to, - amount: transfer_info.amount, - r#type: balance_change_type, - } - }) - .chain(withdrawals) - .chain(deposits) - .collect(); - - explorer_api::TransactionDetails { - transaction_hash, - data, - is_l1_originated, - status, - fee, - nonce, - block_number, - l1_batch_number, - block_hash, - index_in_block, - initiator_address, - received_at, - miniblock_timestamp, - eth_commit_tx_hash, - eth_prove_tx_hash, - eth_execute_tx_hash, - erc20_transfers, - transfer, - balance_changes, - r#type: tx_type as u32, - } + let mut storage_api_tx = StorageApiTransaction::from_row(&db_row).unwrap(); + storage_api_tx.inner_api_transaction.chain_id = U256::from(chain_id.0); + storage_api_tx.into() } #[derive(Debug, Clone, sqlx::FromRow)] diff --git a/core/lib/dal/src/models/storage_verification_request.rs b/core/lib/dal/src/models/storage_verification_request.rs index 3c0864685380..47e9abd11db3 100644 --- a/core/lib/dal/src/models/storage_verification_request.rs +++ b/core/lib/dal/src/models/storage_verification_request.rs @@ -1,4 +1,4 @@ -use zksync_types::explorer_api::{ +use zksync_types::contract_verification_api::{ CompilerType, CompilerVersions, SourceCodeData, VerificationIncomingRequest, VerificationRequest, }; diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs new file mode 100644 index 000000000000..66d9c532a5d0 --- /dev/null +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -0,0 +1,62 @@ +use std::time::Duration; + +use zksync_types::L1BatchNumber; + +use crate::time_utils::pg_interval_from_duration; +use crate::{SqlxError, StorageProcessor}; + +#[derive(Debug)] +pub struct ProofGenerationDal<'a, 'c> { + pub(crate) storage: &'a mut StorageProcessor<'c>, +} + +impl ProofGenerationDal<'_, '_> { + pub async fn get_next_block_to_be_proven( + &mut self, + processing_timeout: Duration, + ) -> Option { + let processing_timeout = pg_interval_from_duration(processing_timeout); + let result: Option = sqlx::query!( + "UPDATE proof_generation_details \ + SET status = 'picked_by_prover', updated_at = now(), prover_taken_at = now() \ + WHERE l1_batch_number = ( \ + SELECT l1_batch_number \ + FROM proof_generation_details \ + WHERE status = 'ready_to_be_proven' \ + OR (status = 'picked_by_prover' AND prover_taken_at < now() - $1::interval) \ + ORDER BY l1_batch_number ASC \ + LIMIT 1 \ + FOR UPDATE \ + SKIP LOCKED \ + ) \ + RETURNING proof_generation_details.l1_batch_number", + &processing_timeout, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + + result + } + + pub async fn save_proof_artifacts_metadata( + &mut self, + block_number: L1BatchNumber, + proof_blob_url: &str, + ) -> Result<(), SqlxError> { + sqlx::query!( + "UPDATE proof_generation_details \ + SET status='generated', proof_blob_url = $1, updated_at = now() \ + WHERE l1_batch_number = $2", + proof_blob_url, + block_number.0 as i64, + ) + .execute(self.storage.conn()) + .await? + .rows_affected() + .eq(&1) + .then_some(()) + .ok_or(sqlx::Error::RowNotFound) + } +} diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs new file mode 100644 index 000000000000..65994f82c3b2 --- /dev/null +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -0,0 +1,264 @@ +use std::convert::{TryFrom, TryInto}; +use zksync_contracts::BaseSystemContracts; +use zksync_types::{ + protocol_version::{L1VerifierConfig, ProtocolUpgradeTx, ProtocolVersion, VerifierParams}, + ProtocolVersionId, H256, +}; + +use crate::models::storage_protocol_version::{ + protocol_version_from_storage, StorageProtocolVersion, +}; +use crate::StorageProcessor; + +#[derive(Debug)] +pub struct ProtocolVersionsDal<'a, 'c> { + pub storage: &'a mut StorageProcessor<'c>, +} + +impl ProtocolVersionsDal<'_, '_> { + pub async fn save_protocol_version(&mut self, version: ProtocolVersion) { + let tx_hash = version + .tx + .as_ref() + .map(|tx| tx.common_data.hash().0.to_vec()); + + let mut db_transaction = self.storage.start_transaction().await; + if let Some(tx) = version.tx { + db_transaction + .transactions_dal() + .insert_system_transaction(tx) + .await; + } + + sqlx::query!( + "INSERT INTO protocol_versions + (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, + recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, bootloader_code_hash, + default_account_code_hash, verifier_address, upgrade_tx_hash, created_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, now()) + ", + version.id as i32, + version.timestamp as i64, + version.l1_verifier_config.recursion_scheduler_level_vk_hash.as_bytes(), + version.l1_verifier_config.params.recursion_node_level_vk_hash.as_bytes(), + version.l1_verifier_config.params.recursion_leaf_level_vk_hash.as_bytes(), + version.l1_verifier_config.params.recursion_circuits_set_vks_hash.as_bytes(), + version.base_system_contracts_hashes.bootloader.as_bytes(), + version.base_system_contracts_hashes.default_aa.as_bytes(), + version.verifier_address.as_bytes(), + tx_hash + ) + .execute(db_transaction.conn()) + .await + .unwrap(); + + db_transaction.commit().await; + } + + pub async fn base_system_contracts_by_timestamp( + &mut self, + current_timestamp: u64, + ) -> (BaseSystemContracts, ProtocolVersionId) { + let row = sqlx::query!( + "SELECT bootloader_code_hash, default_account_code_hash, id FROM protocol_versions + WHERE timestamp <= $1 + ORDER BY id DESC + LIMIT 1 + ", + current_timestamp as i64 + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + let contracts = self + .storage + .storage_dal() + .get_base_system_contracts( + H256::from_slice(&row.bootloader_code_hash), + H256::from_slice(&row.default_account_code_hash), + ) + .await; + (contracts, (row.id as u16).try_into().unwrap()) + } + + pub async fn load_base_system_contracts_by_version_id( + &mut self, + version_id: u16, + ) -> Option { + let row = sqlx::query!( + "SELECT bootloader_code_hash, default_account_code_hash FROM protocol_versions + WHERE id = $1 + ", + version_id as i32 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + if let Some(row) = row { + Some( + self.storage + .storage_dal() + .get_base_system_contracts( + H256::from_slice(&row.bootloader_code_hash), + H256::from_slice(&row.default_account_code_hash), + ) + .await, + ) + } else { + None + } + } + + pub async fn load_previous_version( + &mut self, + version_id: ProtocolVersionId, + ) -> Option { + let storage_protocol_version: StorageProtocolVersion = sqlx::query_as!( + StorageProtocolVersion, + "SELECT * FROM protocol_versions + WHERE id < $1 + ORDER BY id DESC + LIMIT 1 + ", + version_id as i32 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + let tx = match storage_protocol_version.upgrade_tx_hash.as_ref() { + Some(hash) => Some( + self.storage + .transactions_dal() + .get_tx_by_hash(H256::from_slice(hash.as_slice())) + .await + .unwrap_or_else(|| { + panic!( + "Missing upgrade tx for protocol version {}", + version_id as u16 + ); + }) + .try_into() + .unwrap(), + ), + None => None, + }; + + Some(protocol_version_from_storage(storage_protocol_version, tx)) + } + + pub async fn l1_verifier_config_for_version( + &mut self, + version_id: ProtocolVersionId, + ) -> Option { + let row = sqlx::query!( + "SELECT recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash + FROM protocol_versions + WHERE id = $1 + ", + version_id as i32 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + Some(L1VerifierConfig { + params: VerifierParams { + recursion_node_level_vk_hash: H256::from_slice(&row.recursion_node_level_vk_hash), + recursion_leaf_level_vk_hash: H256::from_slice(&row.recursion_leaf_level_vk_hash), + recursion_circuits_set_vks_hash: H256::from_slice( + &row.recursion_circuits_set_vks_hash, + ), + }, + recursion_scheduler_level_vk_hash: H256::from_slice( + &row.recursion_scheduler_level_vk_hash, + ), + }) + } + + pub async fn last_version_id(&mut self) -> Option { + let id = sqlx::query!(r#"SELECT MAX(id) as "max?" FROM protocol_versions"#) + .fetch_optional(self.storage.conn()) + .await + .unwrap()? + .max?; + Some((id as u16).try_into().unwrap()) + } + + pub async fn all_version_ids(&mut self) -> Vec { + let rows = sqlx::query!("SELECT id FROM protocol_versions") + .fetch_all(self.storage.conn()) + .await + .unwrap(); + rows.into_iter() + .map(|row| (row.id as u16).try_into().unwrap()) + .collect() + } + + pub async fn get_protocol_upgrade_tx( + &mut self, + protocol_version_id: ProtocolVersionId, + ) -> Option { + let row = sqlx::query!( + " + SELECT upgrade_tx_hash FROM protocol_versions + WHERE id = $1 + ", + protocol_version_id as i32 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + if let Some(hash) = row.upgrade_tx_hash { + Some( + self.storage + .transactions_dal() + .get_tx_by_hash(H256::from_slice(&hash)) + .await + .unwrap_or_else(|| { + panic!( + "Missing upgrade tx for protocol version {}", + protocol_version_id as u16 + ); + }) + .try_into() + .unwrap(), + ) + } else { + None + } + } + + pub async fn protocol_version_for( + &mut self, + vk_commitments: &L1VerifierConfig, + ) -> Vec { + sqlx::query!( + r#" + SELECT id + FROM protocol_versions + WHERE recursion_circuits_set_vks_hash = $1 + AND recursion_leaf_level_vk_hash = $2 + AND recursion_node_level_vk_hash = $3 + AND recursion_scheduler_level_vk_hash = $4 + "#, + vk_commitments + .params + .recursion_circuits_set_vks_hash + .as_bytes(), + vk_commitments + .params + .recursion_leaf_level_vk_hash + .as_bytes(), + vk_commitments + .params + .recursion_node_level_vk_hash + .as_bytes(), + vk_commitments.recursion_scheduler_level_vk_hash.as_bytes(), + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| ProtocolVersionId::try_from(row.id as u16).unwrap()) + .collect() + } +} diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs new file mode 100644 index 000000000000..dc43dadbd22a --- /dev/null +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -0,0 +1,38 @@ +use zksync_types::api::ProtocolVersion; + +use crate::models::storage_protocol_version::StorageProtocolVersion; +use crate::StorageProcessor; + +#[derive(Debug)] +pub struct ProtocolVersionsWeb3Dal<'a, 'c> { + pub storage: &'a mut StorageProcessor<'c>, +} + +impl ProtocolVersionsWeb3Dal<'_, '_> { + pub async fn get_protocol_version_by_id(&mut self, version_id: u16) -> Option { + let storage_protocol_version: Option = sqlx::query_as!( + StorageProtocolVersion, + "SELECT * FROM protocol_versions + WHERE id = $1 + ", + version_id as i32 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + + storage_protocol_version.map(ProtocolVersion::from) + } + + pub async fn get_latest_protocol_version(&mut self) -> ProtocolVersion { + let storage_protocol_version: StorageProtocolVersion = sqlx::query_as!( + StorageProtocolVersion, + "SELECT * FROM protocol_versions ORDER BY id DESC LIMIT 1", + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + + ProtocolVersion::from(storage_protocol_version) + } +} diff --git a/core/lib/dal/src/prover_dal.rs b/core/lib/dal/src/prover_dal.rs index 71618db0881c..29bef1db6ae9 100644 --- a/core/lib/dal/src/prover_dal.rs +++ b/core/lib/dal/src/prover_dal.rs @@ -1,19 +1,30 @@ -use std::collections::HashMap; -use std::convert::{TryFrom, TryInto}; -use std::ops::Range; -use std::time::{Duration, Instant}; - -use zksync_types::aggregated_operations::BlockProofForL1; -use zksync_types::proofs::{ - AggregationRound, JobCountStatistics, JobExtendedStatistics, ProverJobInfo, ProverJobMetadata, +use sqlx::Error; + +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + ops::Range, + time::Duration, +}; + +use zksync_types::{ + aggregated_operations::L1BatchProofForL1, + proofs::{ + AggregationRound, JobCountStatistics, JobExtendedStatistics, ProverJobInfo, + ProverJobMetadata, + }, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncProof, bellman::bn256::Bn256, + }, + L1BatchNumber, ProtocolVersionId, }; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncProof; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::L1BatchNumber; -use crate::models::storage_prover_job_info::StorageProverJobInfo; -use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; -use crate::StorageProcessor; +use crate::{ + instrument::InstrumentExt, + models::storage_prover_job_info::StorageProverJobInfo, + time_utils::{duration_to_naive_time, pg_interval_from_duration}, + StorageProcessor, +}; #[derive(Debug)] pub struct ProverDal<'a, 'c> { @@ -21,37 +32,41 @@ pub struct ProverDal<'a, 'c> { } impl ProverDal<'_, '_> { - pub async fn get_next_prover_job(&mut self) -> Option { - { - let result: Option = sqlx::query!( - " + pub async fn get_next_prover_job( + &mut self, + protocol_versions: &[ProtocolVersionId], + ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); + let result: Option = sqlx::query!( + " UPDATE prover_jobs SET status = 'in_progress', attempts = attempts + 1, updated_at = now(), processing_started_at = now() WHERE id = ( - SELECT id - FROM prover_jobs - WHERE status = 'queued' - ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC - LIMIT 1 - FOR UPDATE - SKIP LOCKED + SELECT id + FROM prover_jobs + WHERE status = 'queued' + AND protocol_version = ANY($1) + ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED ) RETURNING prover_jobs.* ", - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| ProverJobMetadata { - id: row.id as u32, - block_number: L1BatchNumber(row.l1_batch_number as u32), - circuit_type: row.circuit_type.clone(), - aggregation_round: AggregationRound::try_from(row.aggregation_round).unwrap(), - sequence_number: row.sequence_number as usize, - }); - result - } + &protocol_versions[..] + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| ProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_type: row.circuit_type, + aggregation_round: AggregationRound::try_from(row.aggregation_round).unwrap(), + sequence_number: row.sequence_number as usize, + }); + result } pub async fn get_proven_l1_batches(&mut self) -> Vec<(L1BatchNumber, AggregationRound)> { @@ -79,26 +94,31 @@ impl ProverDal<'_, '_> { pub async fn get_next_prover_job_by_circuit_types( &mut self, circuit_types: Vec, + protocol_versions: &[ProtocolVersionId], ) -> Option { { + let protocol_versions: Vec = + protocol_versions.iter().map(|&id| id as i32).collect(); let result: Option = sqlx::query!( " UPDATE prover_jobs SET status = 'in_progress', attempts = attempts + 1, updated_at = now(), processing_started_at = now() WHERE id = ( - SELECT id - FROM prover_jobs - WHERE circuit_type = ANY($1) - AND status = 'queued' - ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC - LIMIT 1 - FOR UPDATE - SKIP LOCKED - ) + SELECT id + FROM prover_jobs + WHERE circuit_type = ANY($1) + AND status = 'queued' + AND protocol_version = ANY($2) + ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED + ) RETURNING prover_jobs.* ", &circuit_types[..], + &protocol_versions[..] ) .fetch_optional(self.storage.conn()) .await @@ -121,29 +141,33 @@ impl ProverDal<'_, '_> { l1_batch_number: L1BatchNumber, circuit_types_and_urls: Vec<(&'static str, String)>, aggregation_round: AggregationRound, + protocol_version: i32, ) { { - let started_at = Instant::now(); let it = circuit_types_and_urls.into_iter().enumerate(); for (sequence_number, (circuit, circuit_input_blob_url)) in it { sqlx::query!( " - INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, status, created_at, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, 'queued', now(), now()) + INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, protocol_version, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now()) ON CONFLICT(l1_batch_number, aggregation_round, sequence_number) DO NOTHING ", l1_batch_number.0 as i64, circuit, sequence_number as i64, - vec![], + &[] as &[u8], aggregation_round as i64, - circuit_input_blob_url + circuit_input_blob_url, + protocol_version ) + .instrument("save_witness") + .report_latency() + .with_arg("l1_batch_number", &l1_batch_number) + .with_arg("circuit", &circuit) + .with_arg("circuit_input_blob_url", &circuit_input_blob_url) .execute(self.storage.conn()) .await .unwrap(); - - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_witness"); } } } @@ -154,9 +178,8 @@ impl ProverDal<'_, '_> { time_taken: Duration, proof: Vec, proccesed_by: &str, - ) { + ) -> Result<(), Error> { { - let started_at = Instant::now(); sqlx::query!( " UPDATE prover_jobs @@ -164,19 +187,26 @@ impl ProverDal<'_, '_> { WHERE id = $4 ", duration_to_naive_time(time_taken), - proof, + &proof, proccesed_by, id as i64, ) - .execute(self.storage.conn()) - .await - .unwrap(); - - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_proof"); + .instrument("save_proof") + .report_latency() + .with_arg("id", &id) + .with_arg("proof.len", &proof.len()) + .execute(self.storage.conn()) + .await?; } + Ok(()) } - pub async fn save_proof_error(&mut self, id: u32, error: String, max_attempts: u32) { + pub async fn save_proof_error( + &mut self, + id: u32, + error: String, + max_attempts: u32, + ) -> Result<(), Error> { { let mut transaction = self.storage.start_transaction().await; @@ -191,8 +221,7 @@ impl ProverDal<'_, '_> { id as i64, ) .fetch_one(transaction.conn()) - .await - .unwrap(); + .await?; if row.attempts as u32 >= max_attempts { transaction @@ -202,6 +231,7 @@ impl ProverDal<'_, '_> { } transaction.commit().await; + Ok(()) } } @@ -239,7 +269,7 @@ impl ProverDal<'_, '_> { &mut self, from_block: L1BatchNumber, to_block: L1BatchNumber, - ) -> Vec { + ) -> Vec { { sqlx::query!( "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords @@ -266,7 +296,7 @@ impl ProverDal<'_, '_> { &row.aggregation_result_coords .expect("scheduler_witness_job with `successful` status has no aggregation_result_coords"), ).expect("cannot deserialize proof"); - BlockProofForL1 { + L1BatchProofForL1 { aggregation_result_coords: deserialized_aggregation_result_coords, scheduler_proof: ZkSyncProof::into_proof(deserialized_proof), } @@ -505,22 +535,22 @@ impl ProverDal<'_, '_> { .collect::>()) } - pub async fn get_prover_job_by_id(&mut self, job_id: u32) -> Option { + pub async fn get_prover_job_by_id( + &mut self, + job_id: u32, + ) -> Result, Error> { { - let result: Option = - sqlx::query!("SELECT * from prover_jobs where id=$1", job_id as i64) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| ProverJobMetadata { - id: row.id as u32, - block_number: L1BatchNumber(row.l1_batch_number as u32), - circuit_type: row.circuit_type.clone(), - aggregation_round: AggregationRound::try_from(row.aggregation_round) - .unwrap(), - sequence_number: row.sequence_number as usize, - }); - result + let row = sqlx::query!("SELECT * from prover_jobs where id=$1", job_id as i64) + .fetch_optional(self.storage.conn()) + .await?; + + Ok(row.map(|row| ProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_type: row.circuit_type, + aggregation_round: AggregationRound::try_from(row.aggregation_round).unwrap(), + sequence_number: row.sequence_number as usize, + })) } } diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index 581f070661ca..7a02c7ed164c 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -1,15 +1,13 @@ use itertools::Itertools; -use std::{ - collections::{HashMap, HashSet}, - time::Instant, -}; +use std::collections::{HashMap, HashSet}; -use crate::StorageProcessor; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_types::{MiniblockNumber, StorageKey, StorageLog, StorageValue, H256, U256}; use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; +use crate::{instrument::InstrumentExt, StorageProcessor}; + #[derive(Debug)] pub struct StorageDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, @@ -183,19 +181,19 @@ impl StorageDal<'_, '_> { /// Gets the current storage value at the specified `key`. pub async fn get_by_key(&mut self, key: &StorageKey) -> Option { - let started_at = Instant::now(); let hashed_key = key.hashed_key(); - let result = sqlx::query!( + + sqlx::query!( "SELECT value FROM storage WHERE hashed_key = $1", hashed_key.as_bytes() ) + .instrument("get_by_key") + .report_latency() + .with_arg("key", &hashed_key) .fetch_optional(self.storage.conn()) .await .unwrap() - .map(|row| H256::from_slice(&row.value)); - - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_by_key"); - result + .map(|row| H256::from_slice(&row.value)) } /// Removes all factory deps with a miniblock number strictly greater than the specified `block_number`. diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index c8d684ea88b0..90e05106b794 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -312,12 +312,15 @@ impl StorageLogsDal<'_, '_> { output } - async fn get_l1_batches_for_initial_writes( + pub async fn get_l1_batches_for_initial_writes( &mut self, hashed_keys: &[H256], ) -> HashMap { - let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); + if hashed_keys.is_empty() { + return HashMap::new(); // Shortcut to save time on communication with DB in the common case + } + let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); let rows = sqlx::query!( "SELECT hashed_key, l1_batch_number FROM initial_writes \ WHERE hashed_key = ANY($1::bytea[])", @@ -342,6 +345,11 @@ impl StorageLogsDal<'_, '_> { /// # Return value /// /// The returned map is guaranteed to contain all unique keys from `hashed_keys`. + /// + /// # Performance + /// + /// This DB query is slow, especially when used with large `hashed_keys` slices. Prefer using alternatives + /// wherever possible. pub async fn get_previous_storage_values( &mut self, hashed_keys: &[H256], @@ -391,6 +399,33 @@ impl StorageLogsDal<'_, '_> { }) .collect() } + + /// Resolves hashed keys into storage keys ((address, key) tuples). + /// Panics if there is an unknown hashed key in the input. + pub async fn resolve_hashed_keys(&mut self, hashed_keys: &[H256]) -> Vec { + let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); + sqlx::query!( + "SELECT \ + (SELECT ARRAY[address,key] FROM storage_logs \ + WHERE hashed_key = u.hashed_key \ + ORDER BY miniblock_number, operation_number \ + LIMIT 1) as \"address_and_key?\" \ + FROM UNNEST($1::bytea[]) AS u(hashed_key)", + &hashed_keys as &[&[u8]], + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + let address_and_key = row.address_and_key.unwrap(); + StorageKey::new( + AccountTreeId::new(Address::from_slice(&address_and_key[0])), + H256::from_slice(&address_and_key[1]), + ) + }) + .collect() + } } #[cfg(test)] @@ -401,8 +436,7 @@ mod tests { use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ block::{BlockGasCount, L1BatchHeader}, - zk_evm::aux_structures::{LogQuery, Timestamp}, - U256, + ProtocolVersion, ProtocolVersionId, }; async fn insert_miniblock(conn: &mut StorageProcessor<'_>, number: u32, logs: Vec) { @@ -411,10 +445,11 @@ mod tests { 0, Address::default(), BaseSystemContractsHashes::default(), + ProtocolVersionId::default(), ); header.is_finished = true; conn.blocks_dal() - .insert_l1_batch(&header, BlockGasCount::default()) + .insert_l1_batch(&header, &[], BlockGasCount::default()) .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(number)) @@ -438,6 +473,9 @@ mod tests { .delete_miniblocks(MiniblockNumber(0)) .await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; let account = AccountTreeId::new(Address::repeat_byte(1)); let first_key = StorageKey::new(account, H256::zero()); @@ -523,6 +561,9 @@ mod tests { .delete_miniblocks(MiniblockNumber(0)) .await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; let account = AccountTreeId::new(Address::repeat_byte(1)); let logs: Vec<_> = (0_u8..10) @@ -532,9 +573,9 @@ mod tests { }) .collect(); insert_miniblock(&mut conn, 1, logs.clone()).await; - let queries: Vec<_> = logs.iter().map(write_log_to_query).collect(); + let written_keys: Vec<_> = logs.iter().map(|log| log.key).collect(); conn.storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(1), &queries) + .insert_initial_writes(L1BatchNumber(1), &written_keys) .await; let new_logs: Vec<_> = (5_u64..20) @@ -544,9 +585,9 @@ mod tests { }) .collect(); insert_miniblock(&mut conn, 2, new_logs.clone()).await; - let new_queries: Vec<_> = new_logs[5..].iter().map(write_log_to_query).collect(); + let new_written_keys: Vec<_> = new_logs[5..].iter().map(|log| log.key).collect(); conn.storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(2), &new_queries) + .insert_initial_writes(L1BatchNumber(2), &new_written_keys) .await; let logs_for_revert = conn @@ -563,22 +604,6 @@ mod tests { } } - fn write_log_to_query(log: &StorageLog) -> LogQuery { - LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: 0, - aux_byte: 0, - shard_id: 0, - address: *log.key.address(), - key: U256::from_big_endian(log.key.key().as_bytes()), - read_value: U256::zero(), - written_value: U256::from_big_endian(log.value.as_bytes()), - rw_flag: true, - rollback: false, - is_service: false, - } - } - #[db_test(dal_crate)] async fn reverting_keys_without_initial_write(pool: ConnectionPool) { let mut conn = pool.access_storage().await; @@ -587,6 +612,9 @@ mod tests { .delete_miniblocks(MiniblockNumber(0)) .await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; let account = AccountTreeId::new(Address::repeat_byte(1)); let mut logs: Vec<_> = [0_u8, 1, 2, 3] @@ -605,14 +633,23 @@ mod tests { } insert_miniblock(&mut conn, l1_batch, logs.clone()).await; + let all_keys: Vec<_> = logs.iter().map(|log| log.key.hashed_key()).collect(); + let non_initial = conn + .storage_logs_dedup_dal() + .filter_written_slots(&all_keys) + .await; // Pretend that dedup logic eliminates all writes with zero values. - let queries: Vec<_> = logs + let initial_keys: Vec<_> = logs .iter() - .filter_map(|log| (!log.value.is_zero()).then(|| write_log_to_query(log))) + .filter_map(|log| { + (!log.value.is_zero() && !non_initial.contains(&log.key.hashed_key())) + .then_some(log.key) + }) .collect(); - assert!(queries.len() < logs.len()); + + assert!(initial_keys.len() < logs.len()); conn.storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(l1_batch), &queries) + .insert_initial_writes(L1BatchNumber(l1_batch), &initial_keys) .await; } diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 751f81d135ba..88cf9308cdf1 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -17,84 +17,171 @@ impl StorageLogsDedupDal<'_, '_> { l1_batch_number: L1BatchNumber, read_logs: &[LogQuery], ) { - { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY protective_reads (l1_batch_number, address, key, created_at, updated_at) - FROM STDIN WITH (DELIMITER '|')", - ) - .await - .unwrap(); + let mut copy = self + .storage + .conn() + .copy_in_raw( + "COPY protective_reads (l1_batch_number, address, key, created_at, updated_at) \ + FROM STDIN WITH (DELIMITER '|')", + ) + .await + .unwrap(); - let mut bytes: Vec = Vec::new(); - let now = Utc::now().naive_utc().to_string(); - for log in read_logs.iter() { - let address_str = format!("\\\\x{}", hex::encode(log.address.0)); - let key_str = format!("\\\\x{}", hex::encode(u256_to_h256(log.key).0)); - let row = format!( - "{}|{}|{}|{}|{}\n", - l1_batch_number, address_str, key_str, now, now - ); - bytes.extend_from_slice(row.as_bytes()); - } - copy.send(bytes).await.unwrap(); - copy.finish().await.unwrap(); + let mut bytes: Vec = Vec::new(); + let now = Utc::now().naive_utc().to_string(); + for log in read_logs.iter() { + let address_str = format!("\\\\x{}", hex::encode(log.address.0)); + let key_str = format!("\\\\x{}", hex::encode(u256_to_h256(log.key).0)); + let row = format!( + "{}|{}|{}|{}|{}\n", + l1_batch_number, address_str, key_str, now, now + ); + bytes.extend_from_slice(row.as_bytes()); } + copy.send(bytes).await.unwrap(); + copy.finish().await.unwrap(); } + /// Insert initial writes and assigns indices to them. + /// Assumes indices are already assigned for all saved initial_writes, so must be called only after the migration. pub async fn insert_initial_writes( &mut self, l1_batch_number: L1BatchNumber, - write_logs: &[LogQuery], + written_storage_keys: &[StorageKey], ) { - { - let hashed_keys: Vec<_> = write_logs - .iter() - .map(|log| { - StorageKey::raw_hashed_key(&log.address, &u256_to_h256(log.key)).to_vec() - }) - .collect(); + let hashed_keys: Vec<_> = written_storage_keys + .iter() + .map(|key| StorageKey::raw_hashed_key(key.address(), key.key()).to_vec()) + .collect(); - sqlx::query!( - "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at) - SELECT u.hashed_key, $2, now(), now() - FROM UNNEST($1::bytea[]) AS u(hashed_key) - ON CONFLICT (hashed_key) DO NOTHING - ", - &hashed_keys, - l1_batch_number.0 as i64, - ) - .execute(self.storage.conn()) + let last_index = self + .max_set_enumeration_index() .await - .unwrap(); - } + .map(|(last_index, _)| last_index) + .unwrap_or(0); + let indices: Vec<_> = ((last_index + 1)..=(last_index + hashed_keys.len() as u64)) + .map(|x| x as i64) + .collect(); + + sqlx::query!( + "INSERT INTO initial_writes (hashed_key, index, l1_batch_number, created_at, updated_at) \ + SELECT u.hashed_key, u.index, $3, now(), now() \ + FROM UNNEST($1::bytea[], $2::bigint[]) AS u(hashed_key, index)", + &hashed_keys, + &indices, + l1_batch_number.0 as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); } pub async fn get_protective_reads_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, ) -> HashSet { - { - sqlx::query!( - " - SELECT address, key FROM protective_reads - WHERE l1_batch_number = $1 - ", - l1_batch_number.0 as i64 + sqlx::query!( + "SELECT address, key FROM protective_reads WHERE l1_batch_number = $1", + l1_batch_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + StorageKey::new( + AccountTreeId::new(Address::from_slice(&row.address)), + H256::from_slice(&row.key), ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| { - StorageKey::new( - AccountTreeId::new(Address::from_slice(&row.address)), - H256::from_slice(&row.key), - ) - }) - .collect() - } + }) + .collect() + } + + pub async fn max_set_enumeration_index(&mut self) -> Option<(u64, L1BatchNumber)> { + sqlx::query!( + "SELECT index, l1_batch_number FROM initial_writes \ + WHERE index IS NOT NULL \ + ORDER BY index DESC LIMIT 1", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| { + ( + row.index.unwrap() as u64, + L1BatchNumber(row.l1_batch_number as u32), + ) + }) + } + + pub async fn initial_writes_for_batch( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> Vec<(H256, Option)> { + sqlx::query!( + "SELECT hashed_key, index FROM initial_writes \ + WHERE l1_batch_number = $1 \ + ORDER BY index", + l1_batch_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + ( + H256::from_slice(&row.hashed_key), + row.index.map(|i| i as u64), + ) + }) + .collect() + } + + pub async fn set_indices_for_initial_writes(&mut self, indexed_keys: &[(H256, u64)]) { + let (hashed_keys, indices): (Vec<_>, Vec<_>) = indexed_keys + .iter() + .map(|(hashed_key, index)| (hashed_key.as_bytes(), *index as i64)) + .unzip(); + sqlx::query!( + "UPDATE initial_writes \ + SET index = data_table.index \ + FROM ( \ + SELECT UNNEST($1::bytea[]) as hashed_key, \ + UNNEST($2::bigint[]) as index \ + ) as data_table \ + WHERE initial_writes.hashed_key = data_table.hashed_key", + &hashed_keys as &[&[u8]], + &indices, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + /// Returns `hashed_keys` that are both present in the input and in `initial_writes` table. + pub async fn filter_written_slots(&mut self, hashed_keys: &[H256]) -> HashSet { + let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); + sqlx::query!( + "SELECT hashed_key FROM initial_writes \ + WHERE hashed_key = ANY($1)", + &hashed_keys as &[&[u8]], + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| H256::from_slice(&row.hashed_key)) + .collect() + } + + // Used only for tests. + pub async fn reset_indices(&mut self) { + sqlx::query!( + "UPDATE initial_writes \ + SET index = NULL", + ) + .execute(self.storage.conn()) + .await + .unwrap(); } } diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 63e7cae24265..5ba7510b7e0e 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -1,4 +1,4 @@ -use std::time::Instant; +use std::ops; use zksync_types::{ get_code_key, get_nonce_key, @@ -8,7 +8,10 @@ use zksync_types::{ }; use zksync_utils::h256_to_u256; -use crate::{SqlxError, StorageProcessor}; +use crate::{ + instrument::InstrumentExt, models::storage_block::ResolvedL1BatchForMiniblock, SqlxError, + StorageProcessor, +}; #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { @@ -50,11 +53,12 @@ impl StorageWeb3Dal<'_, '_> { block_number: MiniblockNumber, ) -> Result { { - let started_at = Instant::now(); // We need to proper distinguish if the value is zero or None // for the VM to correctly determine initial writes. // So, we accept that the value is None if it's zero and it wasn't initially written at the moment. - let result = sqlx::query!( + let hashed_key = key.hashed_key(); + + sqlx::query!( r#" SELECT value FROM storage_logs @@ -62,29 +66,29 @@ impl StorageWeb3Dal<'_, '_> { ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC LIMIT 1 "#, - key.hashed_key().0.to_vec(), + hashed_key.as_bytes(), block_number.0 as i64 ) + .instrument("get_historical_value_unchecked") + .report_latency() + .with_arg("key", &hashed_key) .fetch_optional(self.storage.conn()) .await .map(|option_row| { option_row .map(|row| H256::from_slice(&row.value)) .unwrap_or_else(H256::zero) - }); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_historical_value_unchecked"); - - result + }) } } - /// Gets the L1 batch number that the miniblock has now or will have in the future (provided - /// that the node will operate correctly). Assumes that the miniblock is present in the DB; - /// this is not checked, and if this is false, the returned value will be meaningless. - pub async fn get_provisional_l1_batch_number_of_miniblock_unchecked( + /// Provides information about the L1 batch that the specified miniblock is a part of. + /// Assumes that the miniblock is present in the DB; this is not checked, and if this is false, + /// the returned value will be meaningless. + pub async fn resolve_l1_batch_number_of_miniblock( &mut self, miniblock_number: MiniblockNumber, - ) -> Result { + ) -> Result { let row = sqlx::query!( "SELECT \ (SELECT l1_batch_number FROM miniblocks WHERE number = $1) as \"block_batch?\", \ @@ -94,32 +98,49 @@ impl StorageWeb3Dal<'_, '_> { .fetch_one(self.storage.conn()) .await?; - let batch_number = row.block_batch.or(row.max_batch).unwrap_or(0); - Ok(L1BatchNumber(batch_number as u32)) + Ok(ResolvedL1BatchForMiniblock { + miniblock_l1_batch: row.block_batch.map(|n| L1BatchNumber(n as u32)), + pending_l1_batch: L1BatchNumber(row.max_batch.unwrap_or(0) as u32), + }) } pub async fn get_l1_batch_number_for_initial_write( &mut self, key: &StorageKey, ) -> Result, SqlxError> { - let started_at = Instant::now(); let hashed_key = key.hashed_key(); let row = sqlx::query!( "SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1", hashed_key.as_bytes(), ) + .instrument("get_l1_batch_number_for_initial_write") + .report_latency() + .with_arg("key", &hashed_key) .fetch_optional(self.storage.conn()) .await?; let l1_batch_number = row.map(|record| L1BatchNumber(record.l1_batch_number as u32)); - metrics::histogram!( - "dal.request", - started_at.elapsed(), - "method" => "get_l1_batch_number_for_initial_write" - ); Ok(l1_batch_number) } + /// Returns distinct hashed storage keys that were modified in the specified miniblock range. + pub async fn modified_keys_in_miniblocks( + &mut self, + miniblock_numbers: ops::RangeInclusive, + ) -> Vec { + sqlx::query!( + "SELECT DISTINCT hashed_key FROM storage_logs WHERE miniblock_number BETWEEN $1 and $2", + miniblock_numbers.start().0 as i64, + miniblock_numbers.end().0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| H256::from_slice(&row.hashed_key)) + .collect() + } + /// This method doesn't check if block with number equals to `block_number` /// is present in the database. For such blocks `None` will be returned. pub async fn get_contract_code_unchecked( @@ -163,7 +184,7 @@ impl StorageWeb3Dal<'_, '_> { { sqlx::query!( "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1 AND miniblock_number <= $2", - &hash.0.to_vec(), + hash.as_bytes(), block_number.0 as i64 ) .fetch_optional(self.storage.conn()) diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index b34ca121a584..09dcf7d853cb 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -1,12 +1,10 @@ -use std::time::Instant; +use zksync_types::{api::en::SyncBlock, Address, MiniblockNumber, Transaction}; -use crate::models::storage_sync::StorageSyncBlock; -use crate::models::storage_transaction::StorageTransaction; -use crate::SqlxError; -use crate::StorageProcessor; -use zksync_types::api::en::SyncBlock; -use zksync_types::MiniblockNumber; -use zksync_types::{Address, Transaction}; +use crate::{ + instrument::{InstrumentExt, MethodLatency}, + models::{storage_sync::StorageSyncBlock, storage_transaction::StorageTransaction}, + SqlxError, StorageProcessor, +}; /// DAL subset dedicated to the EN synchronization. #[derive(Debug)] @@ -21,8 +19,8 @@ impl SyncDal<'_, '_> { current_operator_address: Address, include_transactions: bool, ) -> Result, SqlxError> { - let started_at = Instant::now(); - let storage_block_details: Option = sqlx::query_as!( + let latency = MethodLatency::new("sync_dal_sync_block"); + let storage_block_details = sqlx::query_as!( StorageSyncBlock, r#" SELECT miniblocks.number, @@ -50,6 +48,8 @@ impl SyncDal<'_, '_> { "#, block_number.0 as i64 ) + .instrument("sync_dal_sync_block.block") + .with_arg("block_number", &block_number) .fetch_optional(self.storage.conn()) .await?; @@ -60,6 +60,8 @@ impl SyncDal<'_, '_> { r#"SELECT * FROM transactions WHERE miniblock_number = $1 ORDER BY index_in_block"#, block_number.0 as i64 ) + .instrument("sync_dal_sync_block.transactions") + .with_arg("block_number", &block_number) .fetch_all(self.storage.conn()) .await? .into_iter() @@ -74,7 +76,7 @@ impl SyncDal<'_, '_> { None }; - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "sync_dal_sync_block"); + drop(latency); Ok(res) } } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index d57ddc7cb12e..69843c93382a 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -12,12 +12,13 @@ use zksync_types::{ proofs::AggregationRound, tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, Execute, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, - PriorityOpId, H160, H256, MAX_GAS_PER_PUBDATA_BYTE, U256, + PriorityOpId, ProtocolVersion, ProtocolVersionId, H160, H256, MAX_GAS_PER_PUBDATA_BYTE, U256, }; use zksync_utils::miniblock_hash; use crate::blocks_dal::BlocksDal; use crate::connection::ConnectionPool; +use crate::protocol_versions_dal::ProtocolVersionsDal; use crate::prover_dal::{GetProverJobsParams, ProverDal}; use crate::transactions_dal::L2TxSubmissionResult; use crate::transactions_dal::TransactionsDal; @@ -41,6 +42,7 @@ pub(crate) fn create_miniblock_header(number: u32) -> MiniblockHeader { l1_gas_price: 100, l2_fair_gas_price: 100, base_system_contracts_hashes: BaseSystemContractsHashes::default(), + protocol_version: Some(ProtocolVersionId::default()), } } @@ -164,6 +166,12 @@ async fn workflow_with_submit_tx_diff_hashes(connection_pool: ConnectionPool) { #[db_test(dal_crate)] async fn remove_stuck_txs(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; + let mut protocol_versions_dal = ProtocolVersionsDal { storage }; + protocol_versions_dal + .save_protocol_version(Default::default()) + .await; + + let storage = protocol_versions_dal.storage; let mut transactions_dal = TransactionsDal { storage }; // Stuck tx @@ -263,16 +271,21 @@ fn create_circuits() -> Vec<(&'static str, String)> { #[db_test(dal_crate)] async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; + storage + .protocol_versions_dal() + .save_protocol_version(Default::default()) + .await; let block_number = 1; let header = L1BatchHeader::new( L1BatchNumber(block_number), 0, Default::default(), Default::default(), + Default::default(), ); storage .blocks_dal() - .insert_l1_batch(&header, Default::default()) + .insert_l1_batch(&header, &[], Default::default()) .await; let mut prover_dal = ProverDal { storage }; @@ -283,6 +296,7 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { l1_batch_number, circuits.clone(), AggregationRound::BasicCircuits, + ProtocolVersionId::latest() as i32, ) .await; @@ -292,6 +306,7 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { l1_batch_number, circuits.clone(), AggregationRound::BasicCircuits, + ProtocolVersionId::latest() as i32, ) .await; @@ -312,31 +327,46 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { #[db_test(dal_crate)] async fn test_requeue_prover_jobs(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; + let protocol_version = ProtocolVersion::default(); + storage + .protocol_versions_dal() + .save_protocol_version(protocol_version) + .await; let block_number = 1; let header = L1BatchHeader::new( L1BatchNumber(block_number), 0, Default::default(), Default::default(), + ProtocolVersionId::latest(), ); storage .blocks_dal() - .insert_l1_batch(&header, Default::default()) + .insert_l1_batch(&header, &[], Default::default()) .await; let mut prover_dal = ProverDal { storage }; let circuits = create_circuits(); let l1_batch_number = L1BatchNumber(block_number); prover_dal - .insert_prover_jobs(l1_batch_number, circuits, AggregationRound::BasicCircuits) + .insert_prover_jobs( + l1_batch_number, + circuits, + AggregationRound::BasicCircuits, + ProtocolVersionId::latest() as i32, + ) .await; // take all jobs from prover_job table for _ in 1..=4 { - let job = prover_dal.get_next_prover_job().await; + let job = prover_dal + .get_next_prover_job(&[ProtocolVersionId::latest()]) + .await; assert!(job.is_some()); } - let job = prover_dal.get_next_prover_job().await; + let job = prover_dal + .get_next_prover_job(&[ProtocolVersionId::latest()]) + .await; assert!(job.is_none()); // re-queue jobs let stuck_jobs = prover_dal @@ -345,7 +375,9 @@ async fn test_requeue_prover_jobs(connection_pool: ConnectionPool) { assert_eq!(4, stuck_jobs.len()); // re-check that all jobs can be taken again for _ in 1..=4 { - let job = prover_dal.get_next_prover_job().await; + let job = prover_dal + .get_next_prover_job(&[ProtocolVersionId::latest()]) + .await; assert!(job.is_some()); } } @@ -353,16 +385,22 @@ async fn test_requeue_prover_jobs(connection_pool: ConnectionPool) { #[db_test(dal_crate)] async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; + let protocol_version = ProtocolVersion::default(); + storage + .protocol_versions_dal() + .save_protocol_version(protocol_version) + .await; let block_number = 1; let header = L1BatchHeader::new( L1BatchNumber(block_number), 0, Default::default(), Default::default(), + ProtocolVersionId::latest(), ); storage .blocks_dal() - .insert_l1_batch(&header, Default::default()) + .insert_l1_batch(&header, &[], Default::default()) .await; let mut prover_dal = ProverDal { storage }; @@ -373,6 +411,7 @@ async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: l1_batch_number, circuits.clone(), AggregationRound::BasicCircuits, + ProtocolVersionId::latest() as i32, ) .await; let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); @@ -385,7 +424,8 @@ async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: for id in job_ids.iter() { prover_dal .save_proof(*id, Duration::from_secs(0), proof.clone(), "unit-test") - .await; + .await + .unwrap(); } let mut witness_generator_dal = WitnessGeneratorDal { storage }; @@ -396,6 +436,7 @@ async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: "basic_circuits_inputs_1.bin", circuits.len(), "scheduler_witness_1.bin", + ProtocolVersionId::latest() as i32, ) .await; @@ -406,7 +447,12 @@ async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: // Ensure get-next job gives the leaf aggregation witness job let job = witness_generator_dal - .get_next_leaf_aggregation_witness_job(Duration::from_secs(0), 10, u32::MAX) + .get_next_leaf_aggregation_witness_job( + Duration::from_secs(0), + 10, + u32::MAX, + &[ProtocolVersionId::latest()], + ) .await; assert_eq!(l1_batch_number, job.unwrap().block_number); } @@ -414,16 +460,22 @@ async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: #[db_test(dal_crate)] async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; + let protocol_version = ProtocolVersion::default(); + storage + .protocol_versions_dal() + .save_protocol_version(protocol_version) + .await; let block_number = 1; let header = L1BatchHeader::new( L1BatchNumber(block_number), 0, Default::default(), Default::default(), + ProtocolVersionId::latest(), ); storage .blocks_dal() - .insert_l1_batch(&header, Default::default()) + .insert_l1_batch(&header, &[], Default::default()) .await; let mut prover_dal = ProverDal { storage }; @@ -434,6 +486,7 @@ async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: l1_batch_number, circuits.clone(), AggregationRound::LeafAggregation, + ProtocolVersionId::latest() as i32, ) .await; let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); @@ -445,7 +498,8 @@ async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: for id in job_ids { prover_dal .save_proof(id, Duration::from_secs(0), proof.clone(), "unit-test") - .await; + .await + .unwrap(); } let mut witness_generator_dal = WitnessGeneratorDal { storage }; @@ -456,6 +510,7 @@ async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: "basic_circuits_inputs_1.bin", circuits.len(), "scheduler_witness_1.bin", + ProtocolVersionId::latest() as i32, ) .await; witness_generator_dal @@ -474,7 +529,12 @@ async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: // Ensure get-next job gives the node aggregation witness job let job = witness_generator_dal - .get_next_node_aggregation_witness_job(Duration::from_secs(0), 10, u32::MAX) + .get_next_node_aggregation_witness_job( + Duration::from_secs(0), + 10, + u32::MAX, + &[ProtocolVersionId::latest()], + ) .await; assert_eq!(l1_batch_number, job.unwrap().block_number); } @@ -482,16 +542,22 @@ async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: #[db_test(dal_crate)] async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; + let protocol_version = ProtocolVersion::default(); + storage + .protocol_versions_dal() + .save_protocol_version(protocol_version) + .await; let block_number = 1; let header = L1BatchHeader::new( L1BatchNumber(block_number), 0, Default::default(), Default::default(), + ProtocolVersionId::latest(), ); storage .blocks_dal() - .insert_l1_batch(&header, Default::default()) + .insert_l1_batch(&header, &[], Default::default()) .await; let mut prover_dal = ProverDal { storage }; @@ -505,6 +571,7 @@ async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: Connec l1_batch_number, circuits.clone(), AggregationRound::NodeAggregation, + ProtocolVersionId::latest() as i32, ) .await; let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); @@ -516,7 +583,8 @@ async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: Connec for id in &job_ids { prover_dal .save_proof(*id, Duration::from_secs(0), proof.clone(), "unit-test") - .await; + .await + .unwrap(); } let mut witness_generator_dal = WitnessGeneratorDal { storage }; @@ -527,6 +595,7 @@ async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: Connec "basic_circuits_inputs_1.bin", circuits.len(), "scheduler_witness_1.bin", + ProtocolVersionId::latest() as i32, ) .await; witness_generator_dal @@ -540,7 +609,12 @@ async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: Connec // Ensure get-next job gives the scheduler witness job let job = witness_generator_dal - .get_next_scheduler_witness_job(Duration::from_secs(0), 10, u32::MAX) + .get_next_scheduler_witness_job( + Duration::from_secs(0), + 10, + u32::MAX, + &[ProtocolVersionId::latest()], + ) .await; assert_eq!(l1_batch_number, job.unwrap().block_number); } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index d729c0682b4e..5760c7b84f2c 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -3,7 +3,6 @@ use std::collections::HashMap; use std::fmt::{self, Debug}; use std::iter::FromIterator; use std::time::{Duration, Instant}; -use zksync_types::fee::TransactionExecutionMetrics; use itertools::Itertools; use sqlx::error; @@ -11,11 +10,11 @@ use sqlx::types::chrono::NaiveDateTime; use zksync_types::tx::tx_execution_info::TxExecutionStatus; use zksync_types::vm_trace::Call; -use zksync_types::{get_nonce_key, U256}; use zksync_types::{ - l1::L1Tx, l2::L2Tx, tx::TransactionExecutionResult, vm_trace::VmExecutionTrace, Address, - ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, MiniblockNumber, Nonce, PriorityOpId, - Transaction, H256, + block::MiniblockReexecuteData, fee::TransactionExecutionMetrics, get_nonce_key, l1::L1Tx, + l2::L2Tx, protocol_version::ProtocolUpgradeTx, tx::TransactionExecutionResult, + vm_trace::VmExecutionTrace, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, + MiniblockNumber, Nonce, PriorityOpId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::{h256_to_u32, u256_to_big_decimal}; @@ -48,22 +47,24 @@ type TxLocations = Vec<(MiniblockNumber, Vec<(H256, u32, u16)>)>; impl TransactionsDal<'_, '_> { pub async fn insert_transaction_l1(&mut self, tx: L1Tx, l1_block_number: L1BlockNumber) { { - let contract_address = tx.execute.contract_address.as_bytes().to_vec(); - let tx_hash = tx.hash().0.to_vec(); + let contract_address = tx.execute.contract_address.as_bytes(); + let tx_hash = tx.hash(); + let tx_hash_bytes = tx_hash.as_bytes(); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())); let gas_limit = u256_to_big_decimal(tx.common_data.gas_limit); let max_fee_per_gas = u256_to_big_decimal(tx.common_data.max_fee_per_gas); let full_fee = u256_to_big_decimal(tx.common_data.full_fee); let layer_2_tip_fee = u256_to_big_decimal(tx.common_data.layer_2_tip_fee); - let sender = tx.common_data.sender.0.to_vec(); + let sender = tx.common_data.sender.as_bytes(); let serial_id = tx.serial_id().0 as i64; let gas_per_pubdata_limit = u256_to_big_decimal(tx.common_data.gas_per_pubdata_limit); let value = u256_to_big_decimal(tx.execute.value); let tx_format = tx.common_data.tx_format() as i32; + let empty_address = Address::default(); let to_mint = u256_to_big_decimal(tx.common_data.to_mint); - let refund_recipient = tx.common_data.refund_recipient.as_bytes().to_vec(); + let refund_recipient = tx.common_data.refund_recipient.as_bytes(); let secs = (tx.received_timestamp_ms / 1000) as i64; let nanosecs = ((tx.received_timestamp_ms % 1000) * 1_000_000) as u32; @@ -107,7 +108,7 @@ impl TransactionsDal<'_, '_> { ) ON CONFLICT (hash) DO NOTHING ", - tx_hash, + tx_hash_bytes, sender, gas_limit, max_fee_per_gas, @@ -119,6 +120,88 @@ impl TransactionsDal<'_, '_> { contract_address, l1_block_number.0 as i32, value, + empty_address.as_bytes(), + &[] as &[u8], + tx_format, + to_mint, + refund_recipient, + received_at, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + } + } + + pub async fn insert_system_transaction(&mut self, tx: ProtocolUpgradeTx) { + { + let contract_address = tx.execute.contract_address.as_bytes().to_vec(); + let tx_hash = tx.common_data.hash().0.to_vec(); + let json_data = serde_json::to_value(&tx.execute).unwrap_or_else(|_| { + panic!("cannot serialize tx {:?} to json", tx.common_data.hash()) + }); + let upgrade_id = tx.common_data.upgrade_id as i32; + let gas_limit = u256_to_big_decimal(tx.common_data.gas_limit); + let max_fee_per_gas = u256_to_big_decimal(tx.common_data.max_fee_per_gas); + let sender = tx.common_data.sender.0.to_vec(); + let gas_per_pubdata_limit = u256_to_big_decimal(tx.common_data.gas_per_pubdata_limit); + let value = u256_to_big_decimal(tx.execute.value); + let tx_format = tx.common_data.tx_format() as i32; + let l1_block_number = tx.common_data.eth_block as i32; + + let to_mint = u256_to_big_decimal(tx.common_data.to_mint); + let refund_recipient = tx.common_data.refund_recipient.as_bytes().to_vec(); + + let secs = (tx.received_timestamp_ms / 1000) as i64; + let nanosecs = ((tx.received_timestamp_ms % 1000) * 1_000_000) as u32; + let received_at = NaiveDateTime::from_timestamp_opt(secs, nanosecs).unwrap(); + + sqlx::query!( + " + INSERT INTO transactions + ( + hash, + is_priority, + initiator_address, + + gas_limit, + max_fee_per_gas, + gas_per_pubdata_limit, + + data, + upgrade_id, + contract_address, + l1_block_number, + value, + + paymaster, + paymaster_input, + tx_format, + + l1_tx_mint, + l1_tx_refund_recipient, + + received_at, + created_at, + updated_at + ) + VALUES + ( + $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, + $13, $14, $15, $16, now(), now() + ) + ON CONFLICT (hash) DO NOTHING + ", + tx_hash, + sender, + gas_limit, + max_fee_per_gas, + gas_per_pubdata_limit, + json_data, + upgrade_id, + contract_address, + l1_block_number, + value, &Address::default().0.to_vec(), &vec![], tx_format, @@ -138,8 +221,9 @@ impl TransactionsDal<'_, '_> { exec_info: TransactionExecutionMetrics, ) -> L2TxSubmissionResult { { - let contract_address = tx.execute.contract_address.as_bytes().to_vec(); - let tx_hash = tx.hash().0.to_vec(); + let tx_hash = tx.hash(); + let initiator_address = tx.initiator_account(); + let contract_address = tx.execute.contract_address.as_bytes(); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())); let gas_limit = u256_to_big_decimal(tx.common_data.fee.gas_limit); @@ -149,18 +233,12 @@ impl TransactionsDal<'_, '_> { let gas_per_pubdata_limit = u256_to_big_decimal(tx.common_data.fee.gas_per_pubdata_limit); let tx_format = tx.common_data.transaction_type as i32; - let initiator = tx.initiator_account().0.to_vec(); - let signature = tx.common_data.signature.clone(); + let signature = tx.common_data.signature; let nonce = tx.common_data.nonce.0 as i64; - let input_data = tx - .common_data - .input - .clone() - .expect("Data is mandatory") - .data; + let input_data = tx.common_data.input.expect("Data is mandatory").data; let value = u256_to_big_decimal(tx.execute.value); - let paymaster = tx.common_data.paymaster_params.paymaster.0.to_vec(); - let paymaster_input = tx.common_data.paymaster_params.paymaster_input.clone(); + let paymaster = tx.common_data.paymaster_params.paymaster.0.as_ref(); + let paymaster_input = tx.common_data.paymaster_params.paymaster_input; let secs = (tx.received_timestamp_ms / 1000) as i64; let nanosecs = ((tx.received_timestamp_ms % 1000) * 1_000_000) as u32; let received_at = NaiveDateTime::from_timestamp_opt(secs, nanosecs).unwrap(); @@ -230,8 +308,8 @@ impl TransactionsDal<'_, '_> { WHERE transactions.is_priority = FALSE AND transactions.miniblock_number IS NULL RETURNING (SELECT hash FROM transactions WHERE transactions.initiator_address = $2 AND transactions.nonce = $3) IS NOT NULL as "is_replaced!" "#, - &tx_hash, - &initiator, + tx_hash.as_bytes(), + initiator_address.as_bytes(), nonce, &signature, gas_limit, @@ -281,9 +359,9 @@ impl TransactionsDal<'_, '_> { vlog::debug!( "{:?} l2 transaction {:?} to DB. init_acc {:?} nonce {:?} returned option {:?}", l2_tx_insertion_result, - tx.hash(), - tx.initiator_account(), - tx.nonce(), + tx_hash, + initiator_address, + nonce, l2_tx_insertion_result ); @@ -341,6 +419,13 @@ impl TransactionsDal<'_, '_> { let mut l1_refunded_gas = Vec::with_capacity(transactions.len()); let mut l1_effective_gas_prices = Vec::with_capacity(transactions.len()); + let mut upgrade_hashes = Vec::new(); + let mut upgrade_indices_in_block = Vec::new(); + let mut upgrade_errors = Vec::new(); + let mut upgrade_execution_infos = Vec::new(); + let mut upgrade_refunded_gas = Vec::new(); + let mut upgrade_effective_gas_prices = Vec::new(); + let mut l2_hashes = Vec::with_capacity(transactions.len()); let mut l2_values = Vec::with_capacity(transactions.len()); let mut l2_contract_addresses = Vec::with_capacity(transactions.len()); @@ -441,6 +526,16 @@ impl TransactionsDal<'_, '_> { .push(u256_to_big_decimal(common_data.fee.gas_per_pubdata_limit)); l2_refunded_gas.push(*refunded_gas as i64); } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + upgrade_hashes.push(hash.0.to_vec()); + upgrade_indices_in_block.push(index_in_block as i32); + upgrade_errors.push(error.unwrap_or_default()); + upgrade_execution_infos + .push(serde_json::to_value(execution_info).unwrap()); + upgrade_refunded_gas.push(*refunded_gas as i64); + upgrade_effective_gas_prices + .push(u256_to_big_decimal(common_data.max_fee_per_gas)); + } } }); @@ -450,6 +545,7 @@ impl TransactionsDal<'_, '_> { // Due to the current tx replacement model, it's possible that tx has been replaced, // but the original was executed in memory, // so we have to update all fields for tx from fields stored in memory. + // Note, that transactions are updated in order of their hashes to avoid deadlocks with other UPDATE queries. sqlx::query!( r#" UPDATE transactions @@ -477,29 +573,34 @@ impl TransactionsDal<'_, '_> { updated_at = now() FROM ( - SELECT - UNNEST($1::bytea[]) AS initiator_address, - UNNEST($2::int[]) AS nonce, - UNNEST($3::bytea[]) AS hash, - UNNEST($4::bytea[]) AS signature, - UNNEST($5::numeric[]) AS gas_limit, - UNNEST($6::numeric[]) AS max_fee_per_gas, - UNNEST($7::numeric[]) AS max_priority_fee_per_gas, - UNNEST($8::numeric[]) AS gas_per_pubdata_limit, - UNNEST($9::int[]) AS tx_format, - UNNEST($10::integer[]) AS index_in_block, - UNNEST($11::varchar[]) AS error, - UNNEST($12::numeric[]) AS effective_gas_price, - UNNEST($13::jsonb[]) AS new_execution_info, - UNNEST($14::bytea[]) AS input, - UNNEST($15::jsonb[]) AS data, - UNNEST($16::bigint[]) as refunded_gas, - UNNEST($17::numeric[]) as value, - UNNEST($18::bytea[]) as contract_address, - UNNEST($19::bytea[]) as paymaster, - UNNEST($20::bytea[]) as paymaster_input + SELECT data_table_temp.* FROM ( + SELECT + UNNEST($1::bytea[]) AS initiator_address, + UNNEST($2::int[]) AS nonce, + UNNEST($3::bytea[]) AS hash, + UNNEST($4::bytea[]) AS signature, + UNNEST($5::numeric[]) AS gas_limit, + UNNEST($6::numeric[]) AS max_fee_per_gas, + UNNEST($7::numeric[]) AS max_priority_fee_per_gas, + UNNEST($8::numeric[]) AS gas_per_pubdata_limit, + UNNEST($9::int[]) AS tx_format, + UNNEST($10::integer[]) AS index_in_block, + UNNEST($11::varchar[]) AS error, + UNNEST($12::numeric[]) AS effective_gas_price, + UNNEST($13::jsonb[]) AS new_execution_info, + UNNEST($14::bytea[]) AS input, + UNNEST($15::jsonb[]) AS data, + UNNEST($16::bigint[]) as refunded_gas, + UNNEST($17::numeric[]) as value, + UNNEST($18::bytea[]) as contract_address, + UNNEST($19::bytea[]) as paymaster, + UNNEST($20::bytea[]) as paymaster_input + ) AS data_table_temp + JOIN transactions ON transactions.initiator_address = data_table_temp.initiator_address + AND transactions.nonce = data_table_temp.nonce + ORDER BY transactions.hash ) AS data_table - WHERE transactions.initiator_address=data_table.initiator_address + WHERE transactions.initiator_address=data_table.initiator_address AND transactions.nonce=data_table.nonce "#, &l2_initiators, @@ -568,6 +669,44 @@ impl TransactionsDal<'_, '_> { .unwrap(); } + if !upgrade_hashes.is_empty() { + sqlx::query!( + r#" + UPDATE transactions + SET + miniblock_number = $1, + index_in_block = data_table.index_in_block, + error = NULLIF(data_table.error, ''), + in_mempool=FALSE, + execution_info = execution_info || data_table.new_execution_info, + refunded_gas = data_table.refunded_gas, + effective_gas_price = data_table.effective_gas_price, + updated_at = now() + FROM + ( + SELECT + UNNEST($2::bytea[]) AS hash, + UNNEST($3::integer[]) AS index_in_block, + UNNEST($4::varchar[]) AS error, + UNNEST($5::jsonb[]) AS new_execution_info, + UNNEST($6::bigint[]) as refunded_gas, + UNNEST($7::numeric[]) as effective_gas_price + ) AS data_table + WHERE transactions.hash = data_table.hash + "#, + miniblock_number.0 as i32, + &upgrade_hashes, + &upgrade_indices_in_block, + &upgrade_errors, + &upgrade_execution_infos, + &upgrade_refunded_gas, + &upgrade_effective_gas_prices, + ) + .execute(transaction.conn()) + .await + .unwrap(); + } + if !bytea_call_traces.is_empty() { let started_at = Instant::now(); sqlx::query!( @@ -686,24 +825,29 @@ impl TransactionsDal<'_, '_> { .await .unwrap(); + // Note, that transactions are updated in order of their hashes to avoid deadlocks with other UPDATE queries. let transactions = sqlx::query_as!( StorageTransaction, "UPDATE transactions SET in_mempool = TRUE FROM ( - SELECT hash - FROM transactions - WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL - AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3)) - ORDER BY is_priority DESC, priority_op_id, received_at - LIMIT $1 - FOR UPDATE - ) as subquery - WHERE transactions.hash = subquery.hash + SELECT hash FROM ( + SELECT hash + FROM transactions + WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL + AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3)) + AND tx_format != $4 + ORDER BY is_priority DESC, priority_op_id, received_at + LIMIT $1 + ) as subquery1 + ORDER BY hash + ) as subquery2 + WHERE transactions.hash = subquery2.hash RETURNING transactions.*", limit as i32, BigDecimal::from(fee_per_gas), BigDecimal::from(gas_per_pubdata), + PROTOCOL_UPGRADE_TX_TYPE as i32, ) .fetch_all(self.storage.conn()) .await @@ -823,37 +967,54 @@ impl TransactionsDal<'_, '_> { } } - // Returns transactions that state_keeper needs to reexecute on restart. - // That is the transactions that are included to some miniblock, - // but not included to L1 batch. The order of the transactions is the same as it was - // during the previous execution. - pub async fn get_transactions_to_reexecute( - &mut self, - ) -> Vec<(MiniblockNumber, Vec)> { - { - sqlx::query_as!( - StorageTransaction, - " - SELECT * FROM transactions - WHERE miniblock_number IS NOT NULL AND l1_batch_number IS NULL - ORDER BY miniblock_number, index_in_block - ", + /// Returns miniblocks with their transactions that state_keeper needs to reexecute on restart. + /// These are the transactions that are included to some miniblock, + /// but not included to L1 batch. The order of the transactions is the same as it was + /// during the previous execution. + pub async fn get_miniblocks_to_reexecute(&mut self) -> Vec { + let transactions_by_miniblock: Vec<(MiniblockNumber, Vec)> = sqlx::query_as!( + StorageTransaction, + "SELECT * FROM transactions \ + WHERE miniblock_number IS NOT NULL AND l1_batch_number IS NULL \ + ORDER BY miniblock_number, index_in_block", + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .group_by(|tx| tx.miniblock_number.unwrap()) + .into_iter() + .map(|(miniblock_number, txs)| { + ( + MiniblockNumber(miniblock_number as u32), + txs.map(Transaction::from).collect::>(), ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .group_by(|tx| tx.miniblock_number) + }) + .collect(); + if transactions_by_miniblock.is_empty() { + return Vec::new(); + } + + let from_miniblock = transactions_by_miniblock.first().unwrap().0; + let to_miniblock = transactions_by_miniblock.last().unwrap().0; + let timestamps = sqlx::query!( + "SELECT timestamp FROM miniblocks WHERE number BETWEEN $1 AND $2 ORDER BY number", + from_miniblock.0 as i64, + to_miniblock.0 as i64, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + transactions_by_miniblock .into_iter() - .map(|(miniblock_number, txs)| { - ( - MiniblockNumber(miniblock_number.unwrap() as u32), - txs.map(Into::::into) - .collect::>(), - ) + .zip(timestamps) + .map(|((number, txs), row)| MiniblockReexecuteData { + number, + timestamp: row.timestamp as u64, + txs, }) .collect() - } } pub async fn get_tx_locations(&mut self, l1_batch_number: L1BatchNumber) -> TxLocations { @@ -902,43 +1063,18 @@ impl TransactionsDal<'_, '_> { } } - pub async fn migrate_l1_txs_effective_gas_price_pre_m6( - &mut self, - from_block: u32, - to_block: u32, - ) { - sqlx::query!( - " - UPDATE transactions - SET effective_gas_price = 0 - WHERE miniblock_number BETWEEN $1 AND $2 - AND is_priority = TRUE - ", - from_block as i32, - to_block as i32, - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - - pub async fn migrate_l1_txs_effective_gas_price_post_m6( - &mut self, - from_block: u32, - to_block: u32, - ) { - sqlx::query!( - " - UPDATE transactions - SET effective_gas_price = max_fee_per_gas - WHERE miniblock_number BETWEEN $1 AND $2 - AND is_priority = TRUE - ", - from_block as i32, - to_block as i32, + pub(crate) async fn get_tx_by_hash(&mut self, hash: H256) -> Option { + sqlx::query_as!( + StorageTransaction, + r#" + SELECT * FROM transactions + WHERE hash = $1 + "#, + hash.as_bytes() ) - .execute(self.storage.conn()) + .fetch_optional(self.storage.conn()) .await - .unwrap(); + .unwrap() + .map(|tx| tx.into()) } } diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 8d214f2d4bb8..a56f93d93653 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -14,7 +14,7 @@ use crate::models::{ StorageTransactionDetails, }, }; -use crate::{SqlxError, StorageProcessor}; +use crate::{instrument::InstrumentExt, SqlxError, StorageProcessor}; #[derive(Debug)] pub struct TransactionsWeb3Dal<'a, 'c> { @@ -59,9 +59,11 @@ impl TransactionsWeb3Dal<'_, '_> { WHERE transactions.hash = $2 "#, ACCOUNT_CODE_STORAGE_ADDRESS.as_bytes(), - hash.0.to_vec(), + hash.as_bytes(), FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes() ) + .instrument("get_transaction_receipt") + .with_arg("hash", &hash) .fetch_optional(self.storage.conn()) .await? .map(|db_row| { @@ -74,13 +76,11 @@ impl TransactionsWeb3Dal<'_, '_> { let tx_type = db_row.tx_format.map(U64::from).unwrap_or_default(); let transaction_index = db_row.index_in_block.map(U64::from).unwrap_or_default(); + let block_hash = db_row.block_hash.map(|bytes| H256::from_slice(&bytes)); api::TransactionReceipt { transaction_hash: H256::from_slice(&db_row.tx_hash), transaction_index, - block_hash: db_row - .block_hash - .clone() - .map(|bytes| H256::from_slice(&bytes)), + block_hash, block_number: db_row.block_number.map(U64::from), l1_batch_tx_index: db_row.l1_batch_tx_index.map(U64::from), l1_batch_number: db_row.l1_batch_number.map(U64::from), @@ -114,7 +114,7 @@ impl TransactionsWeb3Dal<'_, '_> { logs: vec![], l2_to_l1_logs: vec![], status, - root: db_row.block_hash.map(|bytes| H256::from_slice(&bytes)), + root: block_hash, logs_bloom: Default::default(), // Even though the Rust SDK recommends us to supply "None" for legacy transactions // we always supply some number anyway to have the same behaviour as most popular RPCs @@ -137,6 +137,8 @@ impl TransactionsWeb3Dal<'_, '_> { "#, hash.as_bytes() ) + .instrument("get_transaction_receipt_events") + .with_arg("hash", &hash) .fetch_all(self.storage.conn()) .await? .into_iter() @@ -219,9 +221,15 @@ impl TransactionsWeb3Dal<'_, '_> { let storage_tx_details: Option = sqlx::query_as!( StorageTransactionDetails, r#" - SELECT transactions.*, - miniblocks.timestamp as "miniblock_timestamp?", - miniblocks.hash as "block_hash?", + SELECT transactions.is_priority, + transactions.initiator_address, + transactions.gas_limit, + transactions.gas_per_pubdata_limit, + transactions.received_at, + transactions.miniblock_number, + transactions.error, + transactions.effective_gas_price, + transactions.refunded_gas, commit_tx.tx_hash as "eth_commit_tx_hash?", prove_tx.tx_hash as "eth_prove_tx_hash?", execute_tx.tx_hash as "eth_execute_tx_hash?" @@ -235,6 +243,8 @@ impl TransactionsWeb3Dal<'_, '_> { "#, hash.as_bytes() ) + .instrument("get_transaction_details") + .with_arg("hash", &hash) .fetch_optional(self.storage.conn()) .await?; @@ -344,7 +354,7 @@ impl TransactionsWeb3Dal<'_, '_> { #[cfg(test)] mod tests { use db_test_macro::db_test; - use zksync_types::{fee::TransactionExecutionMetrics, l2::L2Tx}; + use zksync_types::{fee::TransactionExecutionMetrics, l2::L2Tx, ProtocolVersion}; use zksync_utils::miniblock_hash; use super::*; @@ -376,6 +386,9 @@ mod tests { #[db_test(dal_crate)] async fn getting_transaction(connection_pool: ConnectionPool) { let mut conn = connection_pool.access_test_storage().await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; let tx = mock_l2_transaction(); let tx_hash = tx.hash(); prepare_transaction(&mut conn, tx).await; @@ -433,6 +446,9 @@ mod tests { #[db_test(dal_crate)] async fn getting_miniblock_transactions(connection_pool: ConnectionPool) { let mut conn = connection_pool.access_test_storage().await; + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; let tx = mock_l2_transaction(); let tx_hash = tx.hash(); prepare_transaction(&mut conn, tx).await; diff --git a/core/lib/dal/src/witness_generator_dal.rs b/core/lib/dal/src/witness_generator_dal.rs index 408bb7b4cf7b..a672f8495511 100644 --- a/core/lib/dal/src/witness_generator_dal.rs +++ b/core/lib/dal/src/witness_generator_dal.rs @@ -1,10 +1,8 @@ -use std::collections::HashMap; -use std::ops::Range; -use std::time::{Duration, Instant}; - use itertools::Itertools; use sqlx::Row; +use std::{collections::HashMap, ops::Range, time::Duration}; + use zksync_types::proofs::{ AggregationRound, JobCountStatistics, WitnessGeneratorJobMetadata, WitnessJobInfo, }; @@ -13,11 +11,14 @@ use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::proof::Proof; use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; -use zksync_types::L1BatchNumber; +use zksync_types::{L1BatchNumber, ProtocolVersionId}; -use crate::models::storage_witness_job_info::StorageWitnessJobInfo; -use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; -use crate::StorageProcessor; +use crate::{ + instrument::{InstrumentExt, MethodLatency}, + models::storage_witness_job_info::StorageWitnessJobInfo, + time_utils::{duration_to_naive_time, pg_interval_from_duration}, + StorageProcessor, +}; #[derive(Debug)] pub struct WitnessGeneratorDal<'a, 'c> { @@ -30,7 +31,9 @@ impl WitnessGeneratorDal<'_, '_> { processing_timeout: Duration, max_attempts: u32, last_l1_batch_to_process: u32, + protocol_versions: &[ProtocolVersionId], ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); let processing_timeout = pg_interval_from_duration(processing_timeout); let result: Option = sqlx::query!( " @@ -38,33 +41,34 @@ impl WitnessGeneratorDal<'_, '_> { SET status = 'in_progress', attempts = attempts + 1, updated_at = now(), processing_started_at = now() WHERE l1_batch_number = ( - SELECT l1_batch_number - FROM witness_inputs - WHERE l1_batch_number <= $3 - AND - ( status = 'queued' - OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) - OR (status = 'failed' AND attempts < $2) - ) - ORDER BY l1_batch_number ASC - LIMIT 1 - FOR UPDATE - SKIP LOCKED + SELECT l1_batch_number + FROM witness_inputs + WHERE l1_batch_number <= $3 + AND + ( status = 'queued' + OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) + OR (status = 'failed' AND attempts < $2) + ) + AND protocol_version = ANY($4) + ORDER BY l1_batch_number ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED ) RETURNING witness_inputs.* ", - &processing_timeout, - max_attempts as i32, - last_l1_batch_to_process as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| WitnessGeneratorJobMetadata { - block_number: L1BatchNumber(row.l1_batch_number as u32), - proofs: vec![], - }); - + &processing_timeout, + max_attempts as i32, + last_l1_batch_to_process as i64, + &protocol_versions[..], + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| WitnessGeneratorJobMetadata { + block_number: L1BatchNumber(row.l1_batch_number as u32), + proofs: vec![], + }); result } @@ -109,7 +113,9 @@ impl WitnessGeneratorDal<'_, '_> { processing_timeout: Duration, max_attempts: u32, last_l1_batch_to_process: u32, + protocol_versions: &[ProtocolVersionId], ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); let processing_timeout = pg_interval_from_duration(processing_timeout); let record = sqlx::query!( " @@ -125,6 +131,7 @@ impl WitnessGeneratorDal<'_, '_> { OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) OR (status = 'failed' AND attempts < $2) ) + AND protocol_version = ANY($4) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE @@ -134,7 +141,8 @@ impl WitnessGeneratorDal<'_, '_> { ", &processing_timeout, max_attempts as i32, - last_l1_batch_to_process as i64 + last_l1_batch_to_process as i64, + &protocol_versions[..], ) .fetch_optional(self.storage.conn()) .await @@ -154,14 +162,13 @@ impl WitnessGeneratorDal<'_, '_> { .await; assert_eq!( - basic_circuits_proofs.len(), - number_of_basic_circuits as usize, - "leaf_aggregation_witness_job for l1 batch {} is in status `queued`, but there are only {} computed basic proofs, which is different from expected {}", - l1_batch_number, - basic_circuits_proofs.len(), - number_of_basic_circuits - ); - + basic_circuits_proofs.len(), + number_of_basic_circuits as usize, + "leaf_aggregation_witness_job for l1 batch {} is in status `queued`, but there are only {} computed basic proofs, which is different from expected {}", + l1_batch_number, + basic_circuits_proofs.len(), + number_of_basic_circuits + ); Some(WitnessGeneratorJobMetadata { block_number: l1_batch_number, proofs: basic_circuits_proofs, @@ -176,7 +183,9 @@ impl WitnessGeneratorDal<'_, '_> { processing_timeout: Duration, max_attempts: u32, last_l1_batch_to_process: u32, + protocol_versions: &[ProtocolVersionId], ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); { let processing_timeout = pg_interval_from_duration(processing_timeout); let record = sqlx::query!( @@ -185,28 +194,30 @@ impl WitnessGeneratorDal<'_, '_> { SET status = 'in_progress', attempts = attempts + 1, updated_at = now(), processing_started_at = now() WHERE l1_batch_number = ( - SELECT l1_batch_number - FROM node_aggregation_witness_jobs - WHERE l1_batch_number <= $3 - AND - ( status = 'queued' - OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) - OR (status = 'failed' AND attempts < $2) - ) - ORDER BY l1_batch_number ASC - LIMIT 1 - FOR UPDATE - SKIP LOCKED + SELECT l1_batch_number + FROM node_aggregation_witness_jobs + WHERE l1_batch_number <= $3 + AND + ( status = 'queued' + OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) + OR (status = 'failed' AND attempts < $2) + ) + AND protocol_version = ANY($4) + ORDER BY l1_batch_number ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED ) RETURNING node_aggregation_witness_jobs.* ", &processing_timeout, max_attempts as i32, last_l1_batch_to_process as i64, + &protocol_versions[..], ) - .fetch_optional(self.storage.conn()) - .await - .unwrap(); + .fetch_optional(self.storage.conn()) + .await + .unwrap(); if let Some(row) = record { let l1_batch_number = L1BatchNumber(row.l1_batch_number as u32); let number_of_leaf_circuits = row.number_of_leaf_circuits.expect("number_of_leaf_circuits is not found in a `queued` `node_aggregation_witness_jobs` job"); @@ -220,13 +231,13 @@ impl WitnessGeneratorDal<'_, '_> { .await; assert_eq!( - leaf_circuits_proofs.len(), - number_of_leaf_circuits as usize, - "node_aggregation_witness_job for l1 batch {} is in status `queued`, but there are only {} computed leaf proofs, which is different from expected {}", - l1_batch_number, - leaf_circuits_proofs.len(), - number_of_leaf_circuits - ); + leaf_circuits_proofs.len(), + number_of_leaf_circuits as usize, + "node_aggregation_witness_job for l1 batch {} is in status `queued`, but there are only {} computed leaf proofs, which is different from expected {}", + l1_batch_number, + leaf_circuits_proofs.len(), + number_of_leaf_circuits + ); Some(WitnessGeneratorJobMetadata { block_number: l1_batch_number, proofs: leaf_circuits_proofs, @@ -242,7 +253,9 @@ impl WitnessGeneratorDal<'_, '_> { processing_timeout: Duration, max_attempts: u32, last_l1_batch_to_process: u32, + protocol_versions: &[ProtocolVersionId], ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); { let processing_timeout = pg_interval_from_duration(processing_timeout); let record = sqlx::query!( @@ -259,6 +272,7 @@ impl WitnessGeneratorDal<'_, '_> { OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) OR (status = 'failed' AND attempts < $2) ) + AND protocol_version = ANY($4) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE @@ -268,7 +282,8 @@ impl WitnessGeneratorDal<'_, '_> { ", &processing_timeout, max_attempts as i32, - last_l1_batch_to_process as i64 + last_l1_batch_to_process as i64, + &protocol_versions[..], ) .fetch_optional(self.storage.conn()) .await @@ -284,13 +299,12 @@ impl WitnessGeneratorDal<'_, '_> { .await; assert_eq!( - leaf_circuits_proofs.len(), - 1usize, - "scheduler_job for l1 batch {} is in status `queued`, but there is {} computed node proofs. We expect exactly one node proof.", - l1_batch_number.0, - leaf_circuits_proofs.len() - ); - + leaf_circuits_proofs.len(), + 1usize, + "scheduler_job for l1 batch {} is in status `queued`, but there is {} computed node proofs. We expect exactly one node proof.", + l1_batch_number.0, + leaf_circuits_proofs.len() + ); Some(WitnessGeneratorJobMetadata { block_number: l1_batch_number, proofs: leaf_circuits_proofs, @@ -452,15 +466,16 @@ impl WitnessGeneratorDal<'_, '_> { basic_circuits_inputs_blob_url: &str, number_of_basic_circuits: usize, scheduler_witness_blob_url: &str, + protocol_version: i32, ) { { - let started_at = Instant::now(); + let latency = MethodLatency::new("create_aggregation_jobs"); sqlx::query!( " INSERT INTO leaf_aggregation_witness_jobs - (l1_batch_number, basic_circuits, basic_circuits_inputs, basic_circuits_blob_url, basic_circuits_inputs_blob_url, number_of_basic_circuits, status, created_at, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, 'waiting_for_proofs', now(), now()) + (l1_batch_number, basic_circuits, basic_circuits_inputs, basic_circuits_blob_url, basic_circuits_inputs_blob_url, number_of_basic_circuits, protocol_version, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, 'waiting_for_proofs', now(), now()) ", block_number.0 as i64, vec![], @@ -468,6 +483,7 @@ impl WitnessGeneratorDal<'_, '_> { basic_circuits_blob_url, basic_circuits_inputs_blob_url, number_of_basic_circuits as i64, + protocol_version, ) .execute(self.storage.conn()) .await @@ -476,10 +492,11 @@ impl WitnessGeneratorDal<'_, '_> { sqlx::query!( " INSERT INTO node_aggregation_witness_jobs - (l1_batch_number, status, created_at, updated_at) - VALUES ($1, 'waiting_for_artifacts', now(), now()) + (l1_batch_number, protocol_version, status, created_at, updated_at) + VALUES ($1, $2, 'waiting_for_artifacts', now(), now()) ", block_number.0 as i64, + protocol_version, ) .execute(self.storage.conn()) .await @@ -488,18 +505,19 @@ impl WitnessGeneratorDal<'_, '_> { sqlx::query!( " INSERT INTO scheduler_witness_jobs - (l1_batch_number, scheduler_witness, scheduler_witness_blob_url, status, created_at, updated_at) - VALUES ($1, $2, $3, 'waiting_for_artifacts', now(), now()) + (l1_batch_number, scheduler_witness, scheduler_witness_blob_url, protocol_version, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, 'waiting_for_artifacts', now(), now()) ", block_number.0 as i64, vec![], scheduler_witness_blob_url, + protocol_version, ) - .execute(self.storage.conn()) - .await - .unwrap(); + .execute(self.storage.conn()) + .await + .unwrap(); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "create_aggregation_jobs"); + drop(latency); } } @@ -510,13 +528,12 @@ impl WitnessGeneratorDal<'_, '_> { /// we keep the status as is to prevent data race. pub async fn save_leaf_aggregation_artifacts( &mut self, - block_number: L1BatchNumber, + l1_batch_number: L1BatchNumber, number_of_leaf_circuits: usize, leaf_layer_subqueues_blob_url: &str, aggregation_outputs_blob_url: &str, ) { { - let started_at = Instant::now(); sqlx::query!( " UPDATE node_aggregation_witness_jobs @@ -528,19 +545,16 @@ impl WitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = $2 AND status != 'queued' ", number_of_leaf_circuits as i64, - block_number.0 as i64, + l1_batch_number.0 as i64, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url, ) + .instrument("save_leaf_aggregation_artifacts") + .report_latency() + .with_arg("l1_batch_number", &l1_batch_number) .execute(self.storage.conn()) .await .unwrap(); - - metrics::histogram!( - "dal.request", - started_at.elapsed(), - "method" => "save_leaf_aggregation_artifacts" - ); } } @@ -554,7 +568,6 @@ impl WitnessGeneratorDal<'_, '_> { node_aggregations_blob_url: &str, ) { { - let started_at = Instant::now(); sqlx::query!( " UPDATE scheduler_witness_jobs @@ -566,15 +579,11 @@ impl WitnessGeneratorDal<'_, '_> { block_number.0 as i64, node_aggregations_blob_url, ) + .instrument("save_node_aggregation_artifacts") + .report_latency() .execute(self.storage.conn()) .await .unwrap(); - - metrics::histogram!( - "dal.request", - started_at.elapsed(), - "method" => "save_node_aggregation_artifacts", - ); } } @@ -958,6 +967,24 @@ impl WitnessGeneratorDal<'_, '_> { .collect() } } + + pub async fn protocol_version_for_l1_batch( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> Option { + sqlx::query!( + r#" + SELECT protocol_version + FROM witness_inputs + WHERE l1_batch_number = $1 + "#, + l1_batch_number.0 as i64, + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .protocol_version + } } pub struct GetWitnessJobsParams { diff --git a/core/lib/eth_client/Cargo.toml b/core/lib/eth_client/Cargo.toml index 9c44c5a701c5..58af159aeda2 100644 --- a/core/lib/eth_client/Cargo.toml +++ b/core/lib/eth_client/Cargo.toml @@ -18,7 +18,6 @@ vlog = { path = "../../lib/vlog", version = "1.0" } jsonrpc-core = "18" serde = "1.0.90" -parity-crypto = { version = "0.9", features = ["publickey"] } hex = "0.4" anyhow = "1.0" diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index ca7b2d52dc6a..07297a3645fb 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -4,18 +4,18 @@ use async_trait::async_trait; use jsonrpc_core::types::error::Error as RpcError; use std::collections::{BTreeMap, HashMap}; use std::sync::RwLock; -use zksync_types::web3::contract::tokens::Detokenize; -use zksync_types::web3::types::{Block, BlockId, Filter, Log, Transaction}; -use zksync_types::web3::{ - contract::tokens::Tokenize, - contract::Options, - ethabi, - types::{BlockNumber, U64}, - Error as Web3Error, +use zksync_types::{ + web3::{ + contract::{ + tokens::{Detokenize, Tokenize}, + Options, + }, + ethabi::{self, Token}, + types::{Block, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, U64}, + Error as Web3Error, + }, + Address, L1ChainId, ProtocolVersionId, H160, H256, U256, }; -use zksync_types::{Address, L1ChainId}; - -use zksync_types::{web3::types::TransactionReceipt, H160, H256, U256}; use crate::{ types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}, @@ -67,6 +67,7 @@ pub struct MockEthereum { /// If true, the mock will not check the ordering nonces of the transactions. /// This is useful for testing the cases when the transactions are executed out of order. pub non_ordering_confirmations: bool, + pub multicall_address: Address, } impl Default for MockEthereum { @@ -82,6 +83,7 @@ impl Default for MockEthereum { pending_nonce: Default::default(), nonces: RwLock::new([(0, 0)].into()), non_ordering_confirmations: false, + multicall_address: Address::default(), } } } @@ -186,6 +188,13 @@ impl MockEthereum { ..self } } + + pub fn with_multicall_address(self, address: Address) -> Self { + Self { + multicall_address: address, + ..self + } + } } #[async_trait] @@ -266,14 +275,6 @@ impl EthInterface for MockEthereum { })) } - async fn get_tx( - &self, - _hash: H256, - _component: &'static str, - ) -> Result, Error> { - unimplemented!("Not needed right now") - } - #[allow(clippy::too_many_arguments)] async fn call_contract_function( &self, @@ -282,7 +283,7 @@ impl EthInterface for MockEthereum { _from: A, _options: Options, _block: B, - _contract_address: Address, + contract_address: Address, _contract_abi: ethabi::Contract, ) -> Result where @@ -291,6 +292,31 @@ impl EthInterface for MockEthereum { B: Into> + Send, P: Tokenize + Send, { + if contract_address == self.multicall_address { + let token = Token::Array(vec![ + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 96])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 32])]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes( + H256::from_low_u64_be(ProtocolVersionId::default() as u64) + .0 + .to_vec(), + ), + ]), + ]); + return Ok(R::from_tokens(vec![token]).unwrap()); + } + Ok(R::from_tokens(vec![]).unwrap()) + } + + async fn get_tx( + &self, + _hash: H256, + _component: &'static str, + ) -> Result, Error> { unimplemented!("Not needed right now") } diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index 0a3bf67ca100..46722ed4ee7e 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -16,7 +16,7 @@ serde = "1.0.90" serde_derive = "1.0.90" serde_json = "1.0.0" hex = "0.4.2" -secp256k1 = "0.21.3" +secp256k1 = "0.27.0" parity-crypto = { version = "0.9", features = ["publickey"] } rlp = "0.5" diff --git a/core/lib/eth_signer/src/json_rpc_signer.rs b/core/lib/eth_signer/src/json_rpc_signer.rs index 95156b53cd35..da81ff51dba7 100644 --- a/core/lib/eth_signer/src/json_rpc_signer.rs +++ b/core/lib/eth_signer/src/json_rpc_signer.rs @@ -462,7 +462,8 @@ mod tests { let data: String = serde_json::from_value(req.params[1].clone()).unwrap(); let data_bytes = hex::decode(&data[2..]).unwrap(); let signature = - PackedEthSignature::sign(state.key_pairs[0].secret(), &data_bytes).unwrap(); + PackedEthSignature::sign(&state.key_pairs[0].secret().0.into(), &data_bytes) + .unwrap(); create_success(json!(signature)) } "eth_signTransaction" => { diff --git a/core/lib/health_check/Cargo.toml b/core/lib/health_check/Cargo.toml index b723d36fbcc3..7c74a7d53bcf 100644 --- a/core/lib/health_check/Cargo.toml +++ b/core/lib/health_check/Cargo.toml @@ -11,3 +11,11 @@ categories = ["cryptography"] [dependencies] async-trait = "0.1" +futures = "0.3" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +tokio = { version = "1", features = ["sync"] } + +[dev-dependencies] +assert_matches = "1.5.0" +tokio = { version = "1", features = ["macros", "rt"] } diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs index 862297aaf454..d121b7a189b9 100644 --- a/core/lib/health_check/src/lib.rs +++ b/core/lib/health_check/src/lib.rs @@ -1,19 +1,229 @@ +use futures::{future, FutureExt}; +use serde::Serialize; +use tokio::sync::watch; + +use std::{collections::HashMap, thread}; + /// Public re-export for other crates to be able to implement the interface. pub use async_trait::async_trait; -/// Interface to be used for healthchecks -/// There's a list of health checks that are looped in the /healthcheck endpoint to verify status +/// Health status returned as a part of `Health`. +#[derive(Debug, Clone, Copy, PartialEq, Serialize)] +#[serde(rename_all = "snake_case")] +#[non_exhaustive] +pub enum HealthStatus { + /// Component is initializing and is not ready yet. + NotReady, + /// Component is ready for operations. + Ready, + /// Component is shut down. + ShutDown, + /// Component has been abnormally interrupted by a panic. + Panicked, +} + +impl HealthStatus { + /// Checks whether a component is ready according to this status. + pub fn is_ready(self) -> bool { + matches!(self, Self::Ready) + } + + fn priority_for_aggregation(self) -> usize { + match self { + Self::Ready => 0, + Self::ShutDown => 1, + Self::NotReady => 2, + Self::Panicked => 3, + } + } +} + +/// Health of a single component. +#[derive(Debug, Clone, Serialize)] +pub struct Health { + status: HealthStatus, + /// Component-specific details allowing to assess whether the component is healthy or not. + #[serde(skip_serializing_if = "Option::is_none")] + details: Option, +} + +impl Health { + /// Sets health details. + #[must_use] + pub fn with_details(mut self, details: T) -> Self { + let details = serde_json::to_value(details).expect("Failed serializing `Health` details"); + self.details = Some(details); + self + } + + /// Returns the overall health status. + pub fn status(&self) -> HealthStatus { + self.status + } +} + +impl From for Health { + fn from(status: HealthStatus) -> Self { + Self { + status, + details: None, + } + } +} + +/// Health information for an application consisting of multiple components. +#[derive(Debug, Serialize)] +pub struct AppHealth { + #[serde(flatten)] + inner: Health, + components: HashMap<&'static str, Health>, +} + +impl AppHealth { + /// Aggregates health info from the provided checks. + pub async fn new(health_checks: &[Box]) -> Self { + let check_futures = health_checks.iter().map(|check| { + let check_name = check.name(); + check.check_health().map(move |health| (check_name, health)) + }); + let components: HashMap<_, _> = future::join_all(check_futures).await.into_iter().collect(); + + let aggregated_status = components + .values() + .map(|health| health.status) + .max_by_key(|status| status.priority_for_aggregation()) + .unwrap_or(HealthStatus::Ready); + let inner = aggregated_status.into(); + + Self { inner, components } + } + + pub fn is_ready(&self) -> bool { + self.inner.status.is_ready() + } +} + +/// Interface to be used for health checks. #[async_trait] pub trait CheckHealth: Send + Sync + 'static { - async fn check_health(&self) -> CheckHealthStatus; + /// Unique name of the component. + fn name(&self) -> &'static str; + /// Checks health of the component. + async fn check_health(&self) -> Health; } -/// Used to return health status when checked. -/// States: -/// Ready => move forward -/// NotReady => check fails with message String -- to be passed to /healthcheck caller -#[derive(Debug, PartialEq)] -pub enum CheckHealthStatus { - Ready, - NotReady(String), +/// Basic implementation of [`CheckHealth`] trait that can be updated using a matching [`HealthUpdater`]. +#[derive(Debug)] +pub struct ReactiveHealthCheck { + name: &'static str, + health_receiver: watch::Receiver, +} + +impl ReactiveHealthCheck { + /// Creates a health check together with an updater that can be used to update it. + /// The check will return [`HealthStatus::NotReady`] initially. + pub fn new(name: &'static str) -> (Self, HealthUpdater) { + let (health_sender, health_receiver) = watch::channel(HealthStatus::NotReady.into()); + let this = Self { + name, + health_receiver, + }; + let updater = HealthUpdater { + name, + health_sender, + }; + (this, updater) + } +} + +#[async_trait] +impl CheckHealth for ReactiveHealthCheck { + fn name(&self) -> &'static str { + self.name + } + + async fn check_health(&self) -> Health { + self.health_receiver.borrow().clone() + } +} + +/// Updater for [`ReactiveHealthCheck`]. Can be created using [`ReactiveHealthCheck::new()`]. +/// +/// On drop, will automatically update status to [`HealthStatus::ShutDown`], or to [`HealthStatus::Panicked`] +/// if the dropping thread is panicking. +#[derive(Debug)] +pub struct HealthUpdater { + name: &'static str, + health_sender: watch::Sender, +} + +impl HealthUpdater { + /// Updates the health check information. + pub fn update(&self, health: Health) { + self.health_sender.send_replace(health); + } + + /// Creates a [`ReactiveHealthCheck`] attached to this updater. This allows not retaining the initial health check + /// returned by [`ReactiveHealthCheck::new()`]. + pub fn subscribe(&self) -> ReactiveHealthCheck { + ReactiveHealthCheck { + name: self.name, + health_receiver: self.health_sender.subscribe(), + } + } +} + +impl Drop for HealthUpdater { + fn drop(&mut self) { + let terminal_health = if thread::panicking() { + HealthStatus::Panicked.into() + } else { + HealthStatus::ShutDown.into() + }; + self.health_sender.send_replace(terminal_health); + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::*; + + #[tokio::test] + async fn updating_health_status() { + let (health_check, health_updater) = ReactiveHealthCheck::new("test"); + assert_eq!(health_check.name(), "test"); + assert_matches!( + health_check.check_health().await.status(), + HealthStatus::NotReady + ); + + health_updater.update(HealthStatus::Ready.into()); + assert_matches!( + health_check.check_health().await.status(), + HealthStatus::Ready + ); + + drop(health_updater); + assert_matches!( + health_check.check_health().await.status(), + HealthStatus::ShutDown + ); + } + + #[tokio::test] + async fn updating_health_status_after_panic() { + let (health_check, health_updater) = ReactiveHealthCheck::new("test"); + let task = tokio::spawn(async move { + health_updater.update(HealthStatus::Ready.into()); + panic!("oops"); + }); + assert!(task.await.unwrap_err().is_panic()); + + assert_matches!( + health_check.check_health().await.status(), + HealthStatus::Panicked + ); + } } diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index 7b5899df1b8e..b14e90c72eb2 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -79,6 +79,9 @@ impl MempoolStore { &initial_nonces, ); } + ExecuteTransactionCommon::ProtocolUpgrade(_) => { + panic!("Protocol upgrade tx is not supposed to be inserted into mempool"); + } } } self.collect_stats(); @@ -189,6 +192,9 @@ impl MempoolStore { self.l2_priority_queue.remove(&score); } } + ExecuteTransactionCommon::ProtocolUpgrade(_) => { + panic!("Protocol upgrade tx is not supposed to be in mempool"); + } } } diff --git a/core/lib/merkle_tree/examples/loadtest/batch.rs b/core/lib/merkle_tree/examples/loadtest/batch.rs new file mode 100644 index 000000000000..f5d36d231d4a --- /dev/null +++ b/core/lib/merkle_tree/examples/loadtest/batch.rs @@ -0,0 +1,52 @@ +//! `Database` implementation that flushes changes at the specified batches. + +use zksync_merkle_tree::{ + unstable::{DeserializeError, Manifest, Node, NodeKey, Root}, + Database, PatchSet, Patched, +}; + +pub struct WithBatching<'a> { + inner: Patched<&'a mut dyn Database>, + batch_size: usize, + in_memory_batch_size: usize, +} + +impl<'a> WithBatching<'a> { + pub fn new(db: &'a mut dyn Database, batch_size: usize) -> Self { + assert!(batch_size > 0, "Batch size must be positive"); + Self { + inner: Patched::new(db), + batch_size, + in_memory_batch_size: 0, + } + } +} + +impl Database for WithBatching<'_> { + fn try_manifest(&self) -> Result, DeserializeError> { + self.inner.try_manifest() + } + + fn try_root(&self, version: u64) -> Result, DeserializeError> { + self.inner.try_root(version) + } + + fn try_tree_node( + &self, + key: &NodeKey, + is_leaf: bool, + ) -> Result, DeserializeError> { + self.inner.try_tree_node(key, is_leaf) + } + + fn apply_patch(&mut self, patch: PatchSet) { + self.inner.apply_patch(patch); + + self.in_memory_batch_size += 1; + if self.in_memory_batch_size >= self.batch_size { + println!("Flushing changes to underlying DB"); + self.inner.flush(); + self.in_memory_batch_size = 0; + } + } +} diff --git a/core/lib/merkle_tree/examples/loadtest/main.rs b/core/lib/merkle_tree/examples/loadtest/main.rs index da0c84071038..10911229d8d9 100644 --- a/core/lib/merkle_tree/examples/loadtest/main.rs +++ b/core/lib/merkle_tree/examples/loadtest/main.rs @@ -16,11 +16,13 @@ use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, MerkleTreePruner, PatchSet, RocksDBWrapper, TreeInstruction, }; +use zksync_storage::RocksDB; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; +mod batch; mod recorder; -use crate::recorder::PrintingRecorder; +use crate::{batch::WithBatching, recorder::PrintingRecorder}; /// CLI for load-testing for the Merkle tree implementation. #[derive(Debug, Parser)] @@ -47,6 +49,12 @@ struct Cli { /// Perform testing on in-memory DB rather than RocksDB (i.e., with focus on hashing logic). #[arg(long = "in-memory", short = 'M')] in_memory: bool, + /// Size of batches used. + #[arg(long = "batch")] + batch_size: Option, + /// Block cache capacity for RocksDB in bytes. + #[arg(long = "block-cache", conflicts_with = "in_memory")] + block_cache: Option, /// Chunk size for RocksDB multi-get operations. #[arg(long = "chunk-size", conflicts_with = "in_memory")] chunk_size: Option, @@ -66,7 +74,7 @@ impl Cli { let (mut mock_db, mut rocksdb); let mut _temp_dir = None; let mut pruner_handles = None; - let db: &mut dyn Database = if self.in_memory { + let mut db: &mut dyn Database = if self.in_memory { mock_db = PatchSet::default(); &mut mock_db } else { @@ -75,7 +83,12 @@ impl Cli { "Created temp dir for RocksDB: {}", dir.path().to_string_lossy() ); - rocksdb = RocksDBWrapper::new(&dir); + rocksdb = if let Some(block_cache_capacity) = self.block_cache { + let db = RocksDB::with_cache(&dir, true, Some(block_cache_capacity)); + RocksDBWrapper::from(db) + } else { + RocksDBWrapper::new(&dir) + }; if let Some(chunk_size) = self.chunk_size { rocksdb.set_multi_get_chunk_size(chunk_size); } @@ -90,6 +103,12 @@ impl Cli { &mut rocksdb }; + let mut batching_db; + if let Some(batch_size) = self.batch_size { + batching_db = WithBatching::new(db, batch_size); + db = &mut batching_db; + } + let hasher: &dyn HashTree = if self.no_hashing { &() } else { &Blake2Hasher }; let mut rng = StdRng::seed_from_u64(self.rng_seed); diff --git a/core/lib/merkle_tree/src/consistency.rs b/core/lib/merkle_tree/src/consistency.rs index 956cb5fe506d..5a7e19202062 100644 --- a/core/lib/merkle_tree/src/consistency.rs +++ b/core/lib/merkle_tree/src/consistency.rs @@ -6,8 +6,8 @@ use std::sync::atomic::{AtomicU64, Ordering}; use crate::{ errors::DeserializeError, - types::{LeafNode, Nibbles, Node, NodeKey}, - Database, Key, MerkleTree, Root, ValueHash, + types::{LeafNode, Nibbles, Node, NodeKey, Root}, + Database, Key, MerkleTree, ValueHash, }; #[derive(Debug, thiserror::Error)] @@ -332,7 +332,11 @@ mod tests { let mut db = prepare_database(); let root = db.roots_mut().get_mut(&0).unwrap(); - let Root::Filled { node: Node::Internal(node), .. } = root else { + let Root::Filled { + node: Node::Internal(node), + .. + } = root + else { panic!("unexpected root: {root:?}"); }; let child_ref = node.child_ref_mut(0xd).unwrap(); diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index 9e46c6142785..24e6ce4d57b5 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -2,11 +2,9 @@ use rayon::{ThreadPool, ThreadPoolBuilder}; -use std::{borrow::Borrow, num::NonZeroU32}; - use crate::{ storage::{MerkleTreeColumnFamily, PatchSet, Patched, RocksDBWrapper}, - types::{Key, Root, TreeInstruction, TreeLogEntry, ValueHash, TREE_DEPTH}, + types::{Key, LeafData, Root, TreeInstruction, TreeLogEntry, ValueHash, TREE_DEPTH}, BlockOutput, HashTree, MerkleTree, }; use zksync_crypto::hasher::blake2::Blake2Hasher; @@ -14,7 +12,7 @@ use zksync_storage::RocksDB; use zksync_types::{ proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, writes::{InitialStorageWrite, RepeatedStorageWrite}, - L1BatchNumber, StorageLogKind, WitnessStorageLog, + L1BatchNumber, StorageLog, StorageLogKind, }; /// Metadata for the current tree state. @@ -24,11 +22,12 @@ pub struct TreeMetadata { pub root_hash: ValueHash, /// 1-based index of the next leaf to be inserted in the tree. pub rollup_last_leaf_index: u64, - /// Initial writes performed in the processed block. + /// Initial writes performed in the processed L1 batch in the order of provided `StorageLog`s. pub initial_writes: Vec, - /// Repeated writes performed in the processed block. + /// Repeated writes performed in the processed L1 batch in the order of provided `StorageLog`s. + /// No-op writes (i.e., writing the same value as previously) will be omitted. pub repeated_writes: Vec, - /// Witness information. + /// Witness information. As with `repeated_writes`, no-op updates will be omitted from Merkle paths. pub witness: Option, } @@ -40,8 +39,8 @@ enum TreeMode { /// Domain-specific wrapper of the Merkle tree. /// -/// This wrapper will accumulate changes introduced by [`Self::process_block()`], -/// [`Self::process_blocks()`] and [`Self::revert_logs()`] in RAM without saving them +/// This wrapper will accumulate changes introduced by [`Self::process_l1_batch()`], +/// [`Self::process_l1_batches()`] and [`Self::revert_logs()`] in RAM without saving them /// to RocksDB. The accumulated changes can be saved to RocksDB via [`Self::save()`] /// or discarded via [`Self::reset()`]. #[derive(Debug)] @@ -52,10 +51,6 @@ pub struct ZkSyncTree { } impl ZkSyncTree { - // A reasonable chunk size for RocksDB multi-get operations. Obtained as a result - // of local benchmarking. - const MULTI_GET_CHUNK_SIZE: usize = 500; - fn create_thread_pool(thread_count: usize) -> ThreadPool { ThreadPoolBuilder::new() .thread_name(|idx| format!("new-merkle-tree-{idx}")) @@ -66,7 +61,7 @@ impl ZkSyncTree { /// Returns metadata based on `storage_logs` generated by the genesis L1 batch. This does not /// create a persistent tree. - pub fn process_genesis_batch(storage_logs: &[WitnessStorageLog]) -> BlockOutput { + pub fn process_genesis_batch(storage_logs: &[StorageLog]) -> BlockOutput { let kvs = Self::filter_write_logs(storage_logs); vlog::info!( "Creating Merkle tree for genesis batch with {instr_count} writes", @@ -95,8 +90,7 @@ impl ZkSyncTree { } fn new_with_mode(db: RocksDB, mode: TreeMode) -> Self { - let mut wrapper = RocksDBWrapper::from(db); - wrapper.set_multi_get_chunk_size(Self::MULTI_GET_CHUNK_SIZE); + let wrapper = RocksDBWrapper::from(db); Self { tree: MerkleTree::new(Patched::new(wrapper)), thread_pool: None, @@ -104,6 +98,21 @@ impl ZkSyncTree { } } + /// Sets the chunk size for multi-get operations. The requested keys will be split + /// into chunks of this size and requested in parallel using `rayon`. Setting chunk size + /// to a large value (e.g., `usize::MAX`) will effectively disable parallelism. + /// + /// # Panics + /// + /// Panics if `chunk_size` is zero. + pub fn set_multi_get_chunk_size(&mut self, chunk_size: usize) { + assert!(chunk_size > 0, "Multi-get chunk size must be positive"); + self.tree + .db + .inner_mut() + .set_multi_get_chunk_size(chunk_size); + } + /// Signals that the tree should use a dedicated `rayon` thread pool for parallel operations /// (for now, hash computations). /// @@ -128,42 +137,54 @@ impl ZkSyncTree { .map_or(true, |root| matches!(root, Root::Empty)) } - /// Returns the current block number. - pub fn block_number(&self) -> u32 { - self.tree.latest_version().map_or(0, |version| { - u32::try_from(version + 1).expect("integer overflow for block number") - }) + /// Returns the next L1 batch number that should be processed by the tree. + pub fn next_l1_batch_number(&self) -> L1BatchNumber { + let number = self.tree.latest_version().map_or(0, |version| { + u32::try_from(version + 1).expect("integer overflow for L1 batch number") + }); + L1BatchNumber(number) } - /// Verifies tree consistency. `block_number`, if provided, specifies the version of the tree - /// to be checked, expressed as the number of blocks applied to the tree. By default, - /// the latest tree version is checked. + /// Verifies tree consistency. `l1_batch_number` specifies the version of the tree + /// to be checked, expressed as the number of latest L1 batch applied to the tree. /// /// # Panics /// /// Panics if an inconsistency is detected. - pub fn verify_consistency(&self, block_number: NonZeroU32) { - let version = u64::from(block_number.get() - 1); + pub fn verify_consistency(&self, l1_batch_number: L1BatchNumber) { + let version = u64::from(l1_batch_number.0); self.tree.verify_consistency(version).unwrap_or_else(|err| { panic!("Tree at version {version} is inconsistent: {err}"); }); } - /// Processes an iterator of block logs comprising a single block. - pub fn process_block(&mut self, storage_logs: &[WitnessStorageLog]) -> TreeMetadata { + /// Reads leaf nodes with the specified keys from the tree storage. The nodes + /// are returned in a `Vec` in the same order as requested. + pub fn read_leaves( + &self, + l1_batch_number: L1BatchNumber, + leaf_keys: &[Key], + ) -> Vec> { + let version = u64::from(l1_batch_number.0); + self.tree.read_leaves(version, leaf_keys) + } + + /// Processes an iterator of storage logs comprising a single L1 batch. + pub fn process_l1_batch(&mut self, storage_logs: &[StorageLog]) -> TreeMetadata { match self.mode { - TreeMode::Full => self.process_block_full(storage_logs), - TreeMode::Lightweight => self.process_block_lightweight(storage_logs), + TreeMode::Full => self.process_l1_batch_full(storage_logs), + TreeMode::Lightweight => self.process_l1_batch_lightweight(storage_logs), } } - fn process_block_full(&mut self, storage_logs: &[WitnessStorageLog]) -> TreeMetadata { - let block_number = self.block_number(); + fn process_l1_batch_full(&mut self, storage_logs: &[StorageLog]) -> TreeMetadata { + let l1_batch_number = self.next_l1_batch_number(); let instructions = Self::transform_logs(storage_logs); let starting_leaf_count = self.tree.latest_root().leaf_count(); + let starting_root_hash = self.tree.latest_root_hash(); vlog::info!( - "Extending Merkle tree with batch #{block_number} with {instr_count} ops in full mode", + "Extending Merkle tree with batch #{l1_batch_number} with {instr_count} ops in full mode", instr_count = instructions.len() ); @@ -185,6 +206,10 @@ impl ZkSyncTree { .map(|hash| hash.0) .collect(); + let value_written = match instruction { + TreeInstruction::Write(value) => value.0, + TreeInstruction::Read => [0_u8; 32], + }; let log = StorageLogMetadata { root_hash: log.root_hash.0, is_write: !log.base.is_read(), @@ -197,12 +222,15 @@ impl ZkSyncTree { | TreeLogEntry::Read { leaf_index, .. } => leaf_index, TreeLogEntry::ReadMissingKey => 0, }, - value_written: match instruction { - TreeInstruction::Write(value) => value.0, - TreeInstruction::Read => [0_u8; 32], - }, + value_written, value_read: match log.base { - TreeLogEntry::Updated { previous_value, .. } => previous_value.0, + TreeLogEntry::Updated { previous_value, .. } => { + if previous_value.0 == value_written { + // A no-op update that must be omitted from the produced `witness`. + continue; + } + previous_value.0 + } TreeLogEntry::Read { value, .. } => value.0, TreeLogEntry::Inserted { .. } | TreeLogEntry::ReadMissingKey => [0_u8; 32], }, @@ -210,7 +238,7 @@ impl ZkSyncTree { witness.push_merkle_path(log); } - let root_hash = output.root_hash().unwrap(); + let root_hash = output.root_hash().unwrap_or(starting_root_hash); let logs = output .logs .into_iter() @@ -224,7 +252,7 @@ impl ZkSyncTree { let (initial_writes, repeated_writes) = Self::extract_writes(logs, kvs); vlog::info!( - "Processed batch #{block_number}; root hash is {root_hash}, \ + "Processed batch #{l1_batch_number}; root hash is {root_hash}, \ {leaf_count} leaves in total, \ {initial_writes} initial writes, {repeated_writes} repeated writes", leaf_count = output.leaf_count, @@ -241,9 +269,8 @@ impl ZkSyncTree { } } - fn transform_logs(storage_logs: &[WitnessStorageLog]) -> Vec<(Key, TreeInstruction)> { + fn transform_logs(storage_logs: &[StorageLog]) -> Vec<(Key, TreeInstruction)> { let instructions = storage_logs.iter().map(|log| { - let log = &log.storage_log; let key = log.key.hashed_key_u256(); let instruction = match log.kind { StorageLogKind::Write => TreeInstruction::Write(log.value), @@ -262,26 +289,36 @@ impl ZkSyncTree { let mut repeated_writes = vec![]; for (log_entry, (key, value)) in logs.zip(kvs) { match log_entry { - TreeLogEntry::Inserted { .. } => { - initial_writes.push(InitialStorageWrite { key, value }); - } - TreeLogEntry::Updated { leaf_index, .. } => { - repeated_writes.push(RepeatedStorageWrite { + TreeLogEntry::Inserted { leaf_index } => { + initial_writes.push(InitialStorageWrite { index: leaf_index, + key, value, }); } + TreeLogEntry::Updated { + leaf_index, + previous_value, + } => { + if previous_value != value { + repeated_writes.push(RepeatedStorageWrite { + index: leaf_index, + value, + }); + } + // Else we have a no-op update that must be omitted from `repeated_writes`. + } TreeLogEntry::Read { .. } | TreeLogEntry::ReadMissingKey => {} } } (initial_writes, repeated_writes) } - fn process_block_lightweight(&mut self, storage_logs: &[WitnessStorageLog]) -> TreeMetadata { + fn process_l1_batch_lightweight(&mut self, storage_logs: &[StorageLog]) -> TreeMetadata { let kvs = Self::filter_write_logs(storage_logs); - let block_number = self.block_number(); + let l1_batch_number = self.next_l1_batch_number(); vlog::info!( - "Extending Merkle tree with batch #{block_number} with {kv_count} writes \ + "Extending Merkle tree with batch #{l1_batch_number} with {kv_count} writes \ in lightweight mode", kv_count = kvs.len() ); @@ -295,7 +332,7 @@ impl ZkSyncTree { Self::extract_writes(output.logs.into_iter(), kvs.into_iter()); vlog::info!( - "Processed batch #{block_number}; root hash is {root_hash}, \ + "Processed batch #{l1_batch_number}; root hash is {root_hash}, \ {leaf_count} leaves in total, \ {initial_writes} initial writes, {repeated_writes} repeated writes", root_hash = output.root_hash, @@ -313,16 +350,13 @@ impl ZkSyncTree { } } - fn filter_write_logs(storage_logs: &[WitnessStorageLog]) -> Vec<(Key, ValueHash)> { - let kvs = storage_logs.iter().filter_map(|log| { - let log = &log.borrow().storage_log; - match log.kind { - StorageLogKind::Write => { - let key = log.key.hashed_key_u256(); - Some((key, log.value)) - } - StorageLogKind::Read => None, + fn filter_write_logs(storage_logs: &[StorageLog]) -> Vec<(Key, ValueHash)> { + let kvs = storage_logs.iter().filter_map(|log| match log.kind { + StorageLogKind::Write => { + let key = log.key.hashed_key_u256(); + Some((key, log.value)) } + StorageLogKind::Read => None, }); kvs.collect() } @@ -330,10 +364,10 @@ impl ZkSyncTree { /// Reverts the tree to a previous state. /// /// This method will overwrite all unsaved changes in the tree. - pub fn revert_logs(&mut self, block_number: L1BatchNumber) { + pub fn revert_logs(&mut self, last_l1_batch_to_keep: L1BatchNumber) { self.tree.db.reset(); - let block_number = u64::from(block_number.0 + 1); - self.tree.truncate_recent_versions(block_number); + let retained_version_count = u64::from(last_l1_batch_to_keep.0 + 1); + self.tree.truncate_recent_versions(retained_version_count); } /// Saves the accumulated changes in the tree to RocksDB. diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index 92e6fba45931..b2aba5c12843 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -109,6 +109,7 @@ impl From for DeserializeError { impl DeserializeError { /// Appends a context to this error. + #[must_use] pub fn with_context(mut self, context: ErrorContext) -> Self { self.contexts.push(context); self diff --git a/core/lib/merkle_tree/src/getters.rs b/core/lib/merkle_tree/src/getters.rs new file mode 100644 index 000000000000..3c32b64b8d8f --- /dev/null +++ b/core/lib/merkle_tree/src/getters.rs @@ -0,0 +1,37 @@ +//! Getters for the Merkle tree. + +use crate::{ + storage::{LoadAncestorsResult, SortedKeys, WorkingPatchSet}, + types::{LeafData, Node}, + Database, Key, MerkleTree, +}; + +impl MerkleTree<'_, DB> +where + DB: Database, +{ + /// Reads leaf nodes with the specified keys from the tree storage. The nodes + /// are returned in a `Vec` in the same order as requested. + pub fn read_leaves(&self, version: u64, leaf_keys: &[Key]) -> Vec> { + let Some(root) = self.db.root(version) else { + return vec![None; leaf_keys.len()]; + }; + let sorted_keys = SortedKeys::new(leaf_keys.iter().copied()); + let mut patch_set = WorkingPatchSet::new(version, root); + let LoadAncestorsResult { + longest_prefixes, .. + } = patch_set.load_ancestors(&sorted_keys, &self.db); + + leaf_keys + .iter() + .zip(&longest_prefixes) + .map(|(leaf_key, longest_prefix)| { + let node = patch_set.get(longest_prefix); + match node { + Some(Node::Leaf(leaf)) if &leaf.full_key == leaf_key => Some((*leaf).into()), + _ => None, + } + }) + .collect() + } +} diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index f675b8fb3eb0..4a2a16b841c4 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -44,6 +44,7 @@ mod consistency; pub mod domain; mod errors; +mod getters; mod hasher; mod metrics; mod pruning; @@ -51,6 +52,17 @@ mod storage; mod types; mod utils; +/// Unstable types that should not be used unless you know what you're doing (e.g., implementing +/// `Database` trait for a custom type). There are no guarantees whatsoever that APIs / structure of +/// these types will remain stable. +#[doc(hidden)] +pub mod unstable { + pub use crate::{ + errors::DeserializeError, + types::{Manifest, Node, NodeKey, Root}, + }; +} + pub use crate::{ hasher::HashTree, pruning::{MerkleTreePruner, MerkleTreePrunerHandle}, @@ -59,12 +71,12 @@ pub use crate::{ RocksDBWrapper, }, types::{ - BlockOutput, BlockOutputWithProofs, Key, Root, TreeInstruction, TreeLogEntry, + BlockOutput, BlockOutputWithProofs, Key, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }, }; -use crate::{metrics::describe_metrics, storage::Storage}; +use crate::{metrics::describe_metrics, storage::Storage, types::Root}; use zksync_crypto::hasher::blake2::Blake2Hasher; /// Binary Merkle tree implemented using AR16MT from Diem [Jellyfish Merkle tree] white paper. diff --git a/core/lib/merkle_tree/src/storage/database.rs b/core/lib/merkle_tree/src/storage/database.rs index 9924867a12be..368b1cb84747 100644 --- a/core/lib/merkle_tree/src/storage/database.rs +++ b/core/lib/merkle_tree/src/storage/database.rs @@ -175,6 +175,11 @@ impl Patched { .map_or_else(Vec::new, |patch| patch.roots.keys().copied().collect()) } + /// Provides access to the wrapped DB. Should not be used to mutate DB data. + pub(crate) fn inner_mut(&mut self) -> &mut DB { + &mut self.inner + } + /// Flushes changes from RAM to the wrapped database. pub fn flush(&mut self) { if let Some(patch) = self.patch.take() { diff --git a/core/lib/merkle_tree/src/storage/mod.rs b/core/lib/merkle_tree/src/storage/mod.rs index 7a02cac8bfc5..9da0d30f3f9c 100644 --- a/core/lib/merkle_tree/src/storage/mod.rs +++ b/core/lib/merkle_tree/src/storage/mod.rs @@ -8,19 +8,19 @@ mod serialization; #[cfg(test)] mod tests; +pub(crate) use self::patch::{LoadAncestorsResult, WorkingPatchSet}; pub use self::{ database::{Database, NodeKeys, Patched, PruneDatabase, PrunePatchSet}, patch::PatchSet, rocksdb::{MerkleTreeColumnFamily, RocksDBWrapper}, }; -use self::patch::WorkingPatchSet; use crate::{ hasher::HashTree, metrics::{BlockTimings, LeafCountMetric, Timing, TreeUpdaterMetrics}, types::{ BlockOutput, ChildRef, InternalNode, Key, LeafNode, Manifest, Nibbles, Node, Root, - TreeLogEntry, TreeTags, ValueHash, KEY_SIZE, + TreeLogEntry, TreeTags, ValueHash, }, utils::increment_counter, }; @@ -40,10 +40,6 @@ impl TreeUpdater { } } - fn root_node_mut(&mut self) -> Option<&mut Node> { - self.patch_set.get_mut(&Nibbles::EMPTY) - } - fn set_root_node(&mut self, node: Node) { self.patch_set.insert(Nibbles::EMPTY, node); } @@ -78,9 +74,6 @@ impl TreeUpdater { /// /// # Implementation notes /// - /// This method works by traversing the tree level by level. It uses [`Database::tree_nodes()`] - /// (translating to multi-get in RocksDB) for each level to expedite node loading. - /// /// It may seem that the loaded leaf nodes may just increase the patch size. However, /// each leaf node will actually be modified by [`Self::insert()`], either by changing /// its `value_hash` (on full key match), or by moving the leaf node down the tree @@ -90,71 +83,13 @@ impl TreeUpdater { sorted_keys: &SortedKeys, db: &DB, ) -> Vec { - let Some(Node::Internal(_)) = self.root_node_mut() else { - return vec![Nibbles::EMPTY; sorted_keys.0.len()]; - }; - let patch_set = &mut self.patch_set; - - // Longest prefix for each key in `key_value_pairs` (i.e., what we'll return from - // this method). `None` indicates that the longest prefix for a key is not determined yet. - let mut longest_prefixes = vec![None; sorted_keys.0.len()]; - // Previous encountered when iterating by `sorted_keys` below. - let mut prev_nibbles = None; - for nibble_count in 1.. { - // Extract `nibble_count` nibbles from each key for which we haven't found the parent - // yet. Note that nibbles in `requested_keys` are sorted. - let requested_keys = sorted_keys.0.iter().filter_map(|(idx, key)| { - if longest_prefixes[*idx].is_some() { - return None; - } - if nibble_count > 2 * KEY_SIZE { - // We have traversed to the final tree level. There's nothing to load; - // we just need to record the longest prefix as the full key. - longest_prefixes[*idx] = Some(Nibbles::new(key, 2 * KEY_SIZE)); - return None; - } - - let nibbles = Nibbles::new(key, nibble_count); - let (this_parent_nibbles, last_nibble) = nibbles.split_last().unwrap(); - // ^ `unwrap()` is safe by construction; `nibble_count` is positive - let this_ref = patch_set.child_ref(&this_parent_nibbles, last_nibble); - let Some(this_ref) = this_ref else { - longest_prefixes[*idx] = Some(this_parent_nibbles); - return None; - }; - - // Deduplicate by `nibbles`. We do it at the end to properly - // assign `parent_nibbles` for all keys, and before the version is updated - // for `ChildRef`s, in order to update it only once. - if prev_nibbles == Some(nibbles) { - return None; - } - prev_nibbles = Some(nibbles); - - Some((nibbles.with_version(this_ref.version), this_ref.is_leaf)) - }); - let requested_keys: Vec<_> = requested_keys.collect(); - - if requested_keys.is_empty() { - break; - } - let new_nodes = db.tree_nodes(&requested_keys); - self.metrics.db_reads += new_nodes.len() as u64; - - // Since we load nodes level by level, we can update `patch_set` more efficiently - // by pushing entire `HashMap`s into `changes_by_nibble_count`. - let level = requested_keys - .iter() - .zip(new_nodes) - .map(|((key, _), node)| { - (key, node.unwrap()) - // ^ `unwrap()` is safe: all requested nodes are referenced by their parents - }); - patch_set.push_level_from_db(level); - } + let LoadAncestorsResult { + longest_prefixes, + db_reads, + } = self.patch_set.load_ancestors(sorted_keys, db); - // All parents must be set at this point. - longest_prefixes.into_iter().map(Option::unwrap).collect() + self.metrics.db_reads += db_reads; + longest_prefixes } fn traverse(&self, key: Key, parent_nibbles: &Nibbles) -> TraverseOutcome { @@ -376,10 +311,10 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { /// Sorted [`Key`]s together with their indices in the block. #[derive(Debug)] -struct SortedKeys(Vec<(usize, Key)>); +pub(crate) struct SortedKeys(Vec<(usize, Key)>); impl SortedKeys { - fn new(keys: impl Iterator) -> Self { + pub fn new(keys: impl Iterator) -> Self { let mut keys: Vec<_> = keys.enumerate().collect(); keys.sort_unstable_by_key(|(_, key)| *key); Self(keys) diff --git a/core/lib/merkle_tree/src/storage/patch.rs b/core/lib/merkle_tree/src/storage/patch.rs index 7e71d8a0647b..f8eb9a3747c5 100644 --- a/core/lib/merkle_tree/src/storage/patch.rs +++ b/core/lib/merkle_tree/src/storage/patch.rs @@ -7,10 +7,12 @@ use std::collections::{hash_map::Entry, HashMap}; use crate::{ hasher::HashTree, metrics::HashingMetrics, - storage::proofs::SUBTREE_COUNT, + storage::{proofs::SUBTREE_COUNT, SortedKeys}, types::{ ChildRef, InternalNode, Manifest, Nibbles, NibblesBytes, Node, NodeKey, Root, ValueHash, + KEY_SIZE, }, + Database, }; /// Raw set of database changes. @@ -130,10 +132,19 @@ impl WorkingNode { } } +/// Result of ancestors loading. +#[derive(Debug)] +pub(crate) struct LoadAncestorsResult { + /// The longest prefixes present in the tree currently for each requested key. + pub longest_prefixes: Vec, + /// Number of db reads used. + pub db_reads: u64, +} + /// Mutable version of [`PatchSet`] where we insert all changed nodes when updating /// a Merkle tree. #[derive(Debug)] -pub(super) struct WorkingPatchSet { +pub(crate) struct WorkingPatchSet { version: u64, // Group changes by `nibble_count` (which is linearly tied to the tree depth: // `depth == nibble_count * 4`) so that we can compute hashes for all changed nodes @@ -413,6 +424,91 @@ impl WorkingPatchSet { }); PatchSet::new(manifest, self.version, root, nodes.collect(), stale_keys) } + + /// Loads ancestor nodes for all keys in `sorted_keys`. + /// + /// This method works by traversing the tree level by level. It uses [`Database::tree_nodes()`] + /// (translating to multi-get in RocksDB) for each level to expedite node loading. + pub fn load_ancestors( + &mut self, + sorted_keys: &SortedKeys, + db: &DB, + ) -> LoadAncestorsResult { + let Some(Node::Internal(_)) = self.get(&Nibbles::EMPTY) else { + return LoadAncestorsResult { + longest_prefixes: vec![Nibbles::EMPTY; sorted_keys.0.len()], + db_reads: 0, + }; + }; + + // Longest prefix for each key in `key_value_pairs` (i.e., what we'll return from + // this method). `None` indicates that the longest prefix for a key is not determined yet. + let mut longest_prefixes = vec![None; sorted_keys.0.len()]; + // Previous encountered when iterating by `sorted_keys` below. + let mut prev_nibbles = None; + // Cumulative number of db reads. + let mut db_reads = 0; + for nibble_count in 1.. { + // Extract `nibble_count` nibbles from each key for which we haven't found the parent + // yet. Note that nibbles in `requested_keys` are sorted. + let requested_keys = sorted_keys.0.iter().filter_map(|(idx, key)| { + if longest_prefixes[*idx].is_some() { + return None; + } + if nibble_count > 2 * KEY_SIZE { + // We have traversed to the final tree level. There's nothing to load; + // we just need to record the longest prefix as the full key. + longest_prefixes[*idx] = Some(Nibbles::new(key, 2 * KEY_SIZE)); + return None; + } + + let nibbles = Nibbles::new(key, nibble_count); + let (this_parent_nibbles, last_nibble) = nibbles.split_last().unwrap(); + // ^ `unwrap()` is safe by construction; `nibble_count` is positive + let this_ref = self.child_ref(&this_parent_nibbles, last_nibble); + let Some(this_ref) = this_ref else { + longest_prefixes[*idx] = Some(this_parent_nibbles); + return None; + }; + + // Deduplicate by `nibbles`. We do it at the end to properly + // assign `parent_nibbles` for all keys, and before the version is updated + // for `ChildRef`s, in order to update it only once. + if prev_nibbles == Some(nibbles) { + return None; + } + prev_nibbles = Some(nibbles); + + Some((nibbles.with_version(this_ref.version), this_ref.is_leaf)) + }); + let requested_keys: Vec<_> = requested_keys.collect(); + + if requested_keys.is_empty() { + break; + } + let new_nodes = db.tree_nodes(&requested_keys); + db_reads += new_nodes.len() as u64; + + // Since we load nodes level by level, we can update `patch_set` more efficiently + // by pushing entire `HashMap`s into `changes_by_nibble_count`. + let level = requested_keys + .iter() + .zip(new_nodes) + .map(|((key, _), node)| { + (key, node.unwrap()) + // ^ `unwrap()` is safe: all requested nodes are referenced by their parents + }); + self.push_level_from_db(level); + } + + // All parents must be set at this point. + let longest_prefixes = longest_prefixes.into_iter().map(Option::unwrap).collect(); + + LoadAncestorsResult { + longest_prefixes, + db_reads, + } + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/src/storage/proofs.rs b/core/lib/merkle_tree/src/storage/proofs.rs index 735b762c51ed..230ca68f270e 100644 --- a/core/lib/merkle_tree/src/storage/proofs.rs +++ b/core/lib/merkle_tree/src/storage/proofs.rs @@ -159,7 +159,9 @@ impl TreeUpdater { } let parent = self.patch_set.get_mut(&parent_nibbles); - let Some(Node::Internal(parent)) = parent else { unreachable!() }; + let Some(Node::Internal(parent)) = parent else { + unreachable!() + }; let parent_level = parent_nibbles.nibble_count() * 4; let mut updater = parent.updater(hasher, parent_level, last_nibble); node_hash = updater.update_child_hash(node_hash); @@ -223,7 +225,9 @@ impl TreeUpdater { } let parent = self.patch_set.get_mut_without_updating(&parent_nibbles); - let Some(Node::Internal(parent)) = parent else { unreachable!() }; + let Some(Node::Internal(parent)) = parent else { + unreachable!() + }; let parent_level = parent_nibbles.nibble_count() * 4; parent .updater(hasher, parent_level, last_nibble) diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 14d5595c96a8..3aadf97f3739 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -13,7 +13,7 @@ use crate::{ }, types::{InternalNode, LeafNode, Manifest, Nibbles, Node, NodeKey, Root, StaleNodeKey}, }; -use zksync_storage::{db::NamedColumnFamily, RocksDB}; +use zksync_storage::{db::NamedColumnFamily, rocksdb::DBPinnableSlice, RocksDB}; /// RocksDB column families used by the tree. #[derive(Debug, Clone, Copy)] @@ -77,8 +77,6 @@ impl RocksDBWrapper { /// the `io-uring` feature of `rocksdb` crate and is only available on Linux. /// Thus, setting this value to around `100..1_000` can still lead to substantial /// performance boost (order of 2x) in some environments. - /// - /// [RocksDB docs]: https://github.com/facebook/rocksdb/wiki/MultiGet-Performance pub fn set_multi_get_chunk_size(&mut self, chunk_size: usize) { self.multi_get_chunk_size = chunk_size; } @@ -89,7 +87,7 @@ impl RocksDBWrapper { .expect("Failed reading from RocksDB") } - fn raw_nodes(&self, keys: &NodeKeys) -> Vec>> { + fn raw_nodes(&self, keys: &NodeKeys) -> Vec>> { // `par_chunks()` below uses `rayon` to speed up multi-get I/O; // see `Self::set_multi_get_chunk_size()` docs for an explanation why this makes sense. keys.par_chunks(self.multi_get_chunk_size) diff --git a/core/lib/merkle_tree/src/storage/tests.rs b/core/lib/merkle_tree/src/storage/tests.rs index bc927d6f95a3..190066891745 100644 --- a/core/lib/merkle_tree/src/storage/tests.rs +++ b/core/lib/merkle_tree/src/storage/tests.rs @@ -10,7 +10,7 @@ use std::collections::{HashMap, HashSet}; use super::*; use crate::{ hasher::{HasherWithStats, MerklePath}, - types::{NodeKey, TreeInstruction}, + types::{NodeKey, TreeInstruction, KEY_SIZE}, }; use zksync_types::{H256, U256}; @@ -43,7 +43,7 @@ fn inserting_entries_in_empty_database() { let db = PatchSet::default(); let mut updater = TreeUpdater::new(0, Root::Empty); assert_eq!(updater.patch_set.version(), 0); - assert!(updater.root_node_mut().is_none()); + assert!(updater.patch_set.get(&Nibbles::EMPTY).is_none()); let sorted_keys = SortedKeys::new([FIRST_KEY, SECOND_KEY, THIRD_KEY].into_iter()); let parent_nibbles = updater.load_ancestors(&sorted_keys, &db); @@ -302,7 +302,11 @@ fn reading_keys_does_not_change_child_version() { ]; let (_, patch) = storage.extend_with_proofs(instructions); - let Root::Filled { leaf_count, node: Node::Internal(node) } = &patch.roots[&1] else { + let Root::Filled { + leaf_count, + node: Node::Internal(node), + } = &patch.roots[&1] + else { panic!("unexpected root"); }; assert_eq!(u64::from(*leaf_count), 3); diff --git a/core/lib/merkle_tree/src/types.rs b/core/lib/merkle_tree/src/types.rs index 8bd151e749c7..5ee36dc6ef94 100644 --- a/core/lib/merkle_tree/src/types.rs +++ b/core/lib/merkle_tree/src/types.rs @@ -271,7 +271,7 @@ impl NodeKey { Nibbles::from_parts(nibbles, nibble_count).with_version(version) } - pub fn is_empty(&self) -> bool { + pub(crate) fn is_empty(&self) -> bool { self.nibbles.nibble_count == 0 } @@ -319,6 +319,22 @@ impl LeafNode { } } +/// Data of a leaf node of the tree. +#[derive(Debug, Clone, Copy)] +pub struct LeafData { + pub value_hash: ValueHash, + pub leaf_index: u64, +} + +impl From for LeafData { + fn from(leaf: LeafNode) -> Self { + Self { + value_hash: leaf.value_hash, + leaf_index: leaf.leaf_index, + } + } +} + /// Reference to a child in an [`InternalNode`]. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] @@ -436,10 +452,13 @@ impl InternalNode { } } +/// Tree node (either a leaf or an internal node). #[derive(Debug, Clone)] #[cfg_attr(test, derive(PartialEq))] pub enum Node { + /// Internal node. Internal(InternalNode), + /// Tree leaf. Leaf(LeafNode), } diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index e8b0d91c0f37..cb84794c7263 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -4,18 +4,17 @@ use serde::{Deserialize, Serialize}; use serde_with::{hex::Hex, serde_as}; use tempfile::TempDir; -use std::{num::NonZeroU32, slice}; +use std::slice; use zksync_config::constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{domain::ZkSyncTree, HashTree}; use zksync_storage::RocksDB; use zksync_types::{ - proofs::StorageLogMetadata, AccountTreeId, Address, L1BatchNumber, StorageKey, StorageLog, - WitnessStorageLog, H256, + proofs::StorageLogMetadata, AccountTreeId, Address, L1BatchNumber, StorageKey, StorageLog, H256, }; -fn gen_storage_logs() -> Vec { +fn gen_storage_logs() -> Vec { let addrs = vec![ "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2", "ef4bb7b21c5fe7432a7d63876cc59ecc23b46636", @@ -33,36 +32,21 @@ fn gen_storage_logs() -> Vec { proof_keys .zip(proof_values) - .map(|(proof_key, proof_value)| { - let storage_log = StorageLog::new_write_log(proof_key, proof_value); - WitnessStorageLog { - storage_log, - previous_value: H256::zero(), - } - }) + .map(|(proof_key, proof_value)| StorageLog::new_write_log(proof_key, proof_value)) .collect() } -fn convert_logs(logs: impl Iterator) -> Vec { - logs.map(|storage_log| WitnessStorageLog { - storage_log, - previous_value: H256::zero(), - }) - .collect() -} - #[test] fn basic_workflow() { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let logs = gen_storage_logs(); - let block_number = NonZeroU32::new(1).unwrap(); let (metadata, expected_root_hash) = { let db = RocksDB::new(temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new_lightweight(db); - let metadata = tree.process_block(&logs); + let metadata = tree.process_l1_batch(&logs); tree.save(); - tree.verify_consistency(block_number); + tree.verify_consistency(L1BatchNumber(0)); (metadata, tree.root_hash()) }; @@ -70,7 +54,7 @@ fn basic_workflow() { assert_eq!(metadata.rollup_last_leaf_index, 101); assert_eq!(metadata.initial_writes.len(), logs.len()); for (write, log) in metadata.initial_writes.iter().zip(&logs) { - assert_eq!(write.value, log.storage_log.value); + assert_eq!(write.value, log.value); } assert!(metadata.repeated_writes.is_empty()); @@ -84,9 +68,9 @@ fn basic_workflow() { let db = RocksDB::new(temp_dir.as_ref(), false); let tree = ZkSyncTree::new_lightweight(db); - tree.verify_consistency(block_number); + tree.verify_consistency(L1BatchNumber(0)); assert_eq!(tree.root_hash(), expected_root_hash); - assert_eq!(tree.block_number(), block_number.get()); + assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(1)); } #[test] @@ -100,7 +84,7 @@ fn basic_workflow_multiblock() { let mut tree = ZkSyncTree::new_lightweight(db); tree.use_dedicated_thread_pool(2); for block in blocks { - tree.process_block(block); + tree.process_l1_batch(block); } tree.save(); tree.root_hash() @@ -117,7 +101,42 @@ fn basic_workflow_multiblock() { let db = RocksDB::new(temp_dir.as_ref(), false); let tree = ZkSyncTree::new_lightweight(db); assert_eq!(tree.root_hash(), expected_root_hash); - assert_eq!(tree.block_number(), 12); + assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(12)); +} + +#[test] +fn filtering_out_no_op_writes() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let db = RocksDB::new(temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new(db); + let mut logs = gen_storage_logs(); + let root_hash = tree.process_l1_batch(&logs).root_hash; + tree.save(); + + // All writes are no-op updates and thus must be filtered out. + let new_metadata = tree.process_l1_batch(&logs); + assert_eq!(new_metadata.root_hash, root_hash); + assert!(new_metadata.initial_writes.is_empty()); + assert!(new_metadata.repeated_writes.is_empty()); + let merkle_paths = new_metadata.witness.unwrap().into_merkle_paths(); + assert_eq!(merkle_paths.len(), 0); + + // Add some actual repeated writes. + let mut expected_writes_count = 0; + for log in logs.iter_mut().step_by(3) { + log.value = H256::repeat_byte(0xff); + expected_writes_count += 1; + } + let new_metadata = tree.process_l1_batch(&logs); + assert_ne!(new_metadata.root_hash, root_hash); + assert!(new_metadata.initial_writes.is_empty()); + assert_eq!(new_metadata.repeated_writes.len(), expected_writes_count); + let merkle_paths = new_metadata.witness.unwrap().into_merkle_paths(); + assert_eq!(merkle_paths.len(), expected_writes_count); + for merkle_path in merkle_paths { + assert!(!merkle_path.first_write); + assert_eq!(merkle_path.value_written, [0xff; 32]); + } } #[test] @@ -134,25 +153,24 @@ fn revert_blocks() { let proof_values = (0..100).map(H256::from_low_u64_be); // Add couple of blocks of distinct keys/values - let mut logs: Vec<_> = convert_logs( - proof_keys - .zip(proof_values) - .map(|(proof_key, proof_value)| StorageLog::new_write_log(proof_key, proof_value)), - ); + let mut logs: Vec<_> = proof_keys + .zip(proof_values) + .map(|(proof_key, proof_value)| StorageLog::new_write_log(proof_key, proof_value)) + .collect(); // Add a block with repeated keys - let mut extra_logs = convert_logs((0..block_size).map(move |i| { + let extra_logs = (0..block_size).map(move |i| { StorageLog::new_write_log( StorageKey::new(AccountTreeId::new(address), H256::from_low_u64_be(i as u64)), H256::from_low_u64_be((i + 1) as u64), ) - })); - logs.append(&mut extra_logs); + }); + logs.extend(extra_logs); let mirror_logs = logs.clone(); let tree_metadata: Vec<_> = { let mut tree = ZkSyncTree::new_lightweight(storage); let metadata = logs.chunks(block_size).map(|chunk| { - let metadata = tree.process_block(chunk); + let metadata = tree.process_l1_batch(chunk); tree.save(); metadata }); @@ -212,14 +230,14 @@ fn revert_blocks() { { let storage_log = mirror_logs.get(3 * block_size).unwrap(); let mut tree = ZkSyncTree::new_lightweight(storage); - tree.process_block(slice::from_ref(storage_log)); + tree.process_l1_batch(slice::from_ref(storage_log)); tree.save(); } // check saved block number let storage = RocksDB::new(temp_dir.as_ref(), false); let tree = ZkSyncTree::new_lightweight(storage); - assert_eq!(tree.block_number(), 3); + assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(3)); } #[test] @@ -233,11 +251,11 @@ fn reset_tree() { logs.chunks(5) .into_iter() .fold(empty_root_hash, |hash, chunk| { - tree.process_block(chunk); + tree.process_l1_batch(chunk); tree.reset(); assert_eq!(tree.root_hash(), hash); - tree.process_block(chunk); + tree.process_l1_batch(chunk); tree.save(); tree.root_hash() }); @@ -252,17 +270,18 @@ fn read_logs() { let write_metadata = { let db = RocksDB::new(temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new_lightweight(db); - let metadata = tree.process_block(&logs); + let metadata = tree.process_l1_batch(&logs); tree.save(); metadata }; let db = RocksDB::new(temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new_lightweight(db); - let read_logs = logs + let read_logs: Vec<_> = logs .into_iter() - .map(|log| StorageLog::new_read_log(log.storage_log.key, log.storage_log.value)); - let read_metadata = tree.process_block(&convert_logs(read_logs)); + .map(|log| StorageLog::new_read_log(log.key, log.value)) + .collect(); + let read_metadata = tree.process_l1_batch(&read_logs); assert_eq!(read_metadata.root_hash, write_metadata.root_hash); } @@ -271,14 +290,11 @@ fn create_write_log( address: Address, address_storage_key: [u8; 32], value: [u8; 32], -) -> WitnessStorageLog { - WitnessStorageLog { - storage_log: StorageLog::new_write_log( - StorageKey::new(AccountTreeId::new(address), H256(address_storage_key)), - H256(value), - ), - previous_value: H256::zero(), - } +) -> StorageLog { + StorageLog::new_write_log( + StorageKey::new(AccountTreeId::new(address), H256(address_storage_key)), + H256(value), + ) } fn subtract_from_max_value(diff: u8) -> [u8; 32] { @@ -329,7 +345,7 @@ fn root_hash_compatibility() { ), ]; - let metadata = tree.process_block(&storage_logs); + let metadata = tree.process_l1_batch(&storage_logs); assert_eq!( metadata.root_hash, H256([ @@ -345,11 +361,11 @@ fn process_block_idempotency_check() { let rocks_db = RocksDB::new(temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new_lightweight(rocks_db); let logs = gen_storage_logs(); - let tree_metadata = tree.process_block(&logs); + let tree_metadata = tree.process_l1_batch(&logs); // Simulate server restart by calling `process_block` again on the same tree tree.reset(); - let repeated_tree_metadata = tree.process_block(&logs); + let repeated_tree_metadata = tree.process_l1_batch(&logs); assert_eq!(repeated_tree_metadata.root_hash, tree_metadata.root_hash); assert_eq!( repeated_tree_metadata.initial_writes, @@ -407,7 +423,7 @@ fn witness_workflow() { let db = RocksDB::new(temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new(db); - let metadata = tree.process_block(first_chunk); + let metadata = tree.process_l1_batch(first_chunk); let job = metadata.witness.unwrap(); assert_eq!(job.next_enumeration_index(), 1); let merkle_paths: Vec<_> = job.into_merkle_paths().collect(); @@ -442,7 +458,7 @@ fn witnesses_with_multiple_blocks() { .collect(); let non_empty_levels_by_block = logs.chunks(10).map(|block| { - let metadata = tree.process_block(block); + let metadata = tree.process_l1_batch(block); let witness = metadata.witness.unwrap(); let non_empty_levels = witness.into_merkle_paths().map(|log| { diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml new file mode 100644 index 000000000000..7cfdc2ee69e8 --- /dev/null +++ b/core/lib/multivm/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "multivm" +version = "0.1.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +[dependencies] +vm_m5 = { path = "../../multivm_deps/vm_m5", version = "0.1.0" } +vm_m6 = { path = "../../multivm_deps/vm_m6", version = "0.1.0" } +vm_vm1_3_2 = { path = "../vm", package = "vm", version = "0.1.0" } + +zksync_types = { path = "../types", version = "1.0.0" } +zksync_state = { path = "../state", version = "1.0.0" } +zksync_contracts = { path = "../contracts", version = "1.0.0" } +zksync_utils = { path = "../utils", version = "1.0.0" } + +vlog = { path = "../../lib/vlog", version = "1.0" } diff --git a/core/lib/multivm/README.md b/core/lib/multivm/README.md new file mode 100644 index 000000000000..3140e15a4a08 --- /dev/null +++ b/core/lib/multivm/README.md @@ -0,0 +1,6 @@ +# External node's VM + +This crate represents a wrapper over a several versions of VM that have been used by the main node. It contains the glue +code that allows switching the VM version based on the externally provided marker while preserving the public interface. +This crate exists to enable the external node to process breaking upgrades and re-execute all the transactions from the +genesis block. diff --git a/core/lib/multivm/src/glue/block_properties.rs b/core/lib/multivm/src/glue/block_properties.rs new file mode 100644 index 000000000000..0e8f4d45ed4a --- /dev/null +++ b/core/lib/multivm/src/glue/block_properties.rs @@ -0,0 +1,66 @@ +use zksync_types::U256; + +use crate::VmVersion; + +/// Wrapper for block properties. +/// We have to have it as a public type, since VM requires it to be passed via reference with the same lifetime as other +/// parameters. +/// +/// Note that unlike other wrappers, this one depends not on versions of VMs, but on the version of +/// `zk_evm` crate. Prior to `VM1_3_2`, a single version of `zk_evm` crate was used, so here we reduce the amount of +/// boilerplate leaving the necessary wrappers only. +pub enum BlockProperties { + // M5 & M6 are covered by this variant. + Vm1_3_1(vm_m6::zk_evm::block_properties::BlockProperties), + Vm1_3_2(vm_vm1_3_2::zk_evm::block_properties::BlockProperties), +} + +impl BlockProperties { + pub fn new(vm_version: VmVersion, default_aa_code_hash: U256) -> Self { + match vm_version { + VmVersion::M5WithoutRefunds + | VmVersion::M5WithRefunds + | VmVersion::M6Initial + | VmVersion::M6BugWithCompressionFixed => { + let inner = vm_m6::zk_evm::block_properties::BlockProperties { + zkporter_is_available: false, + default_aa_code_hash, + }; + Self::Vm1_3_1(inner) + } + VmVersion::Vm1_3_2 => { + let inner = vm_vm1_3_2::zk_evm::block_properties::BlockProperties { + zkporter_is_available: false, + default_aa_code_hash, + }; + Self::Vm1_3_2(inner) + } + } + } + + pub fn m5(&self) -> &vm_m5::zk_evm::block_properties::BlockProperties { + // This is not a typo, M5 is covered by this variant. See doc-comment for the enum. + if let BlockProperties::Vm1_3_1(inner) = self { + inner + } else { + panic!("BlockProperties::m5 called on non-M5 variant"); + } + } + + pub fn m6(&self) -> &vm_m6::zk_evm::block_properties::BlockProperties { + // This is not a typo, M6 is covered by this variant. See doc-comment for the enum. + if let BlockProperties::Vm1_3_1(inner) = self { + inner + } else { + panic!("BlockProperties::m5 called on non-M5 variant"); + } + } + + pub fn vm1_3_2(&self) -> &vm_vm1_3_2::zk_evm::block_properties::BlockProperties { + if let BlockProperties::Vm1_3_2(inner) = self { + inner + } else { + panic!("BlockProperties::m5 called on non-M5 variant"); + } + } +} diff --git a/core/lib/multivm/src/glue/init_vm.rs b/core/lib/multivm/src/glue/init_vm.rs new file mode 100644 index 000000000000..43e64e4a1508 --- /dev/null +++ b/core/lib/multivm/src/glue/init_vm.rs @@ -0,0 +1,101 @@ +use super::GlueInto; +use crate::VmVersion; + +pub fn init_vm<'a>( + version: VmVersion, + oracle_tools: &'a mut crate::glue::oracle_tools::OracleTools<'a>, + block_context: vm_vm1_3_2::vm_with_bootloader::BlockContextMode, + block_properties: &'a crate::glue::block_properties::BlockProperties, + execution_mode: vm_vm1_3_2::vm_with_bootloader::TxExecutionMode, + base_system_contracts: &zksync_contracts::BaseSystemContracts, +) -> crate::VmInstance<'a> { + let gas_limit = match version { + VmVersion::M5WithoutRefunds => vm_m5::utils::BLOCK_GAS_LIMIT, + VmVersion::M5WithRefunds => vm_m5::utils::BLOCK_GAS_LIMIT, + VmVersion::M6Initial => vm_m6::utils::BLOCK_GAS_LIMIT, + VmVersion::M6BugWithCompressionFixed => vm_m6::utils::BLOCK_GAS_LIMIT, + VmVersion::Vm1_3_2 => vm_vm1_3_2::utils::BLOCK_GAS_LIMIT, + }; + + init_vm_with_gas_limit( + version, + oracle_tools, + block_context, + block_properties, + execution_mode, + base_system_contracts, + gas_limit, + ) +} + +pub fn init_vm_with_gas_limit<'a>( + version: VmVersion, + oracle_tools: &'a mut crate::glue::oracle_tools::OracleTools<'a>, + block_context: vm_vm1_3_2::vm_with_bootloader::BlockContextMode, + block_properties: &'a crate::glue::block_properties::BlockProperties, + execution_mode: vm_vm1_3_2::vm_with_bootloader::TxExecutionMode, + base_system_contracts: &zksync_contracts::BaseSystemContracts, + gas_limit: u32, +) -> crate::VmInstance<'a> { + match version { + VmVersion::M5WithoutRefunds => { + let inner_vm = vm_m5::vm_with_bootloader::init_vm_with_gas_limit( + vm_m5::vm::MultiVMSubversion::V1, + oracle_tools.m5(), + block_context.glue_into(), + block_properties.m5(), + execution_mode.glue_into(), + &base_system_contracts.clone().glue_into(), + gas_limit, + ); + crate::VmInstance::VmM5(inner_vm) + } + VmVersion::M5WithRefunds => { + let inner_vm = vm_m5::vm_with_bootloader::init_vm_with_gas_limit( + vm_m5::vm::MultiVMSubversion::V2, + oracle_tools.m5(), + block_context.glue_into(), + block_properties.m5(), + execution_mode.glue_into(), + &base_system_contracts.clone().glue_into(), + gas_limit, + ); + crate::VmInstance::VmM5(inner_vm) + } + VmVersion::M6Initial => { + let inner_vm = vm_m6::vm_with_bootloader::init_vm_with_gas_limit( + vm_m6::vm::MultiVMSubversion::V1, + oracle_tools.m6(), + block_context.glue_into(), + block_properties.m6(), + execution_mode.glue_into(), + &base_system_contracts.clone().glue_into(), + gas_limit, + ); + crate::VmInstance::VmM6(inner_vm) + } + VmVersion::M6BugWithCompressionFixed => { + let inner_vm = vm_m6::vm_with_bootloader::init_vm_with_gas_limit( + vm_m6::vm::MultiVMSubversion::V2, + oracle_tools.m6(), + block_context.glue_into(), + block_properties.m6(), + execution_mode.glue_into(), + &base_system_contracts.clone().glue_into(), + gas_limit, + ); + crate::VmInstance::VmM6(inner_vm) + } + VmVersion::Vm1_3_2 => { + let inner_vm = vm_vm1_3_2::vm_with_bootloader::init_vm_with_gas_limit( + oracle_tools.vm1_3_2(), + block_context.glue_into(), + block_properties.vm1_3_2(), + execution_mode.glue_into(), + &base_system_contracts.clone().glue_into(), + gas_limit, + ); + crate::VmInstance::Vm1_3_2(inner_vm) + } + } +} diff --git a/core/lib/multivm/src/glue/mod.rs b/core/lib/multivm/src/glue/mod.rs new file mode 100644 index 000000000000..8954089b1714 --- /dev/null +++ b/core/lib/multivm/src/glue/mod.rs @@ -0,0 +1,48 @@ +//! This module contains "glue" code that allows to operate with multiple versions of the VM crate families (i.e. +//! `vm` crate and its dependencies that are used in the crate API). +//! +//! Glue generally comes in two flavors: +//! - "Public glue", aka types that are used externally to instantiate the MultiVM (like `OracleTools` and `init_vm`). +//! - "Private glue", aka type conversions from current to the "past" and vice versa. +//! +//! The "private glue" lies in the `types` module. + +pub(crate) mod block_properties; +pub(crate) mod init_vm; +pub(crate) mod oracle_tools; +mod types; + +/// This trait is a workaround on the Rust'c [orphan rule](orphan_rule). +/// We need to convert a lot of types that come from two different versions of some crate, +/// and `From`/`Into` traits are natural way of doing so. Unfortunately, we can't implement an +/// external trait on a pair of external types, so we're unable to use these traits. +/// +/// However, we can implement any *local* trait on a pair of external types, so here are the "glued" +/// versions of `From`/`Into`. +/// +/// [orphan_rule]: https://github.com/Ixrec/rust-orphan-rules +pub trait GlueFrom: Sized { + fn glue_from(value: T) -> Self; +} + +/// See the description of [`GlueFrom`] trait above. +pub trait GlueInto: Sized { + fn glue_into(self) -> T; +} + +// Blaknet `GlueInto` impl for any type that implements `GlueFrom`. +impl GlueInto for T +where + U: GlueFrom, +{ + fn glue_into(self) -> U { + U::glue_from(self) + } +} + +// Identity impl. +impl GlueFrom for T { + fn glue_from(this: T) -> Self { + this + } +} diff --git a/core/lib/multivm/src/glue/oracle_tools.rs b/core/lib/multivm/src/glue/oracle_tools.rs new file mode 100644 index 000000000000..49cf552f1ce2 --- /dev/null +++ b/core/lib/multivm/src/glue/oracle_tools.rs @@ -0,0 +1,69 @@ +use crate::VmVersion; + +use zksync_state::{ReadStorage, StorageView}; + +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +pub enum OracleTools<'a> { + M5(vm_m5::OracleTools<'a, false>), + M6(vm_m6::OracleTools<'a, false, vm_m6::HistoryEnabled>), + Vm1_3_2(vm_vm1_3_2::OracleTools<'a, false, vm_vm1_3_2::HistoryEnabled>), +} + +impl<'a> OracleTools<'a> { + pub fn new(version: VmVersion, state: &'a mut StorageView) -> Self + where + S: ReadStorage + std::fmt::Debug + Send + Sync, + { + match version { + VmVersion::M5WithoutRefunds => { + let oracle_tools = vm_m5::OracleTools::new( + state as &mut dyn vm_m5::storage::Storage, + vm_m5::vm::MultiVMSubversion::V1, + ); + OracleTools::M5(oracle_tools) + } + VmVersion::M5WithRefunds => { + let oracle_tools = vm_m5::OracleTools::new( + state as &mut dyn vm_m5::storage::Storage, + vm_m5::vm::MultiVMSubversion::V2, + ); + OracleTools::M5(oracle_tools) + } + VmVersion::M6Initial | VmVersion::M6BugWithCompressionFixed => { + let oracle_tools = vm_m6::OracleTools::new( + state as &mut dyn vm_m6::storage::Storage, + vm_m6::HistoryEnabled, + ); + OracleTools::M6(oracle_tools) + } + VmVersion::Vm1_3_2 => { + let oracle_tools = vm_vm1_3_2::OracleTools::new(state, vm_vm1_3_2::HistoryEnabled); + OracleTools::Vm1_3_2(oracle_tools) + } + } + } + + pub fn vm1_3_2( + &mut self, + ) -> &mut vm_vm1_3_2::OracleTools<'a, false, vm_vm1_3_2::HistoryEnabled> { + let OracleTools::Vm1_3_2(oracle_tools) = self else { + panic!("OracleTools::latest() called on non-latest version") + }; + oracle_tools + } + + pub fn m6(&mut self) -> &mut vm_m6::OracleTools<'a, false, vm_m6::HistoryEnabled> { + let OracleTools::M6(oracle_tools) = self else { + panic!("OracleTools::m6() called on non-m6 version") + }; + oracle_tools + } + + pub fn m5(&mut self) -> &mut vm_m5::OracleTools<'a, false> { + let OracleTools::M5(oracle_tools) = self else { + panic!("OracleTools::m5() called on non-m5 version") + }; + oracle_tools + } +} diff --git a/core/lib/multivm/src/glue/types/mod.rs b/core/lib/multivm/src/glue/types/mod.rs new file mode 100644 index 000000000000..ea96eac8bcb1 --- /dev/null +++ b/core/lib/multivm/src/glue/types/mod.rs @@ -0,0 +1,8 @@ +//! Glue for the basic types that are used in the VM. +//! This is "internal" glue that generally converts the "latest" input type to the target +//! "VM" type (e.g. "latest" -> "vm_m5"), and then converts the "VM" output type to the +//! "latest" output type (e.g. "vm_m5" -> "latest"). +//! +//! This "glue layer" is generally not visible outside of the crate. + +mod vm; diff --git a/core/lib/multivm/src/glue/types/vm/block_context.rs b/core/lib/multivm/src/glue/types/vm/block_context.rs new file mode 100644 index 000000000000..702f7f08fcd5 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/block_context.rs @@ -0,0 +1,29 @@ +use crate::glue::GlueFrom; + +impl GlueFrom + for vm_m5::vm_with_bootloader::BlockContext +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::BlockContext) -> Self { + Self { + block_number: value.block_number, + block_timestamp: value.block_timestamp, + operator_address: value.operator_address, + l1_gas_price: value.l1_gas_price, + fair_l2_gas_price: value.fair_l2_gas_price, + } + } +} + +impl GlueFrom + for vm_m6::vm_with_bootloader::BlockContext +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::BlockContext) -> Self { + Self { + block_number: value.block_number, + block_timestamp: value.block_timestamp, + operator_address: value.operator_address, + l1_gas_price: value.l1_gas_price, + fair_l2_gas_price: value.fair_l2_gas_price, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/block_context_mode.rs b/core/lib/multivm/src/glue/types/vm/block_context_mode.rs new file mode 100644 index 000000000000..70830947c928 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/block_context_mode.rs @@ -0,0 +1,41 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom + for vm_m5::vm_with_bootloader::BlockContextMode +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::BlockContextMode) -> Self { + match value { + vm_vm1_3_2::vm_with_bootloader::BlockContextMode::NewBlock( + derived, + prev_block_hash, + ) => { + let derived = derived.glue_into(); + Self::NewBlock(derived, prev_block_hash) + } + vm_vm1_3_2::vm_with_bootloader::BlockContextMode::OverrideCurrent(derived) => { + let derived = derived.glue_into(); + Self::OverrideCurrent(derived) + } + } + } +} + +impl GlueFrom + for vm_m6::vm_with_bootloader::BlockContextMode +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::BlockContextMode) -> Self { + match value { + vm_vm1_3_2::vm_with_bootloader::BlockContextMode::NewBlock( + derived, + prev_block_hash, + ) => { + let derived = derived.glue_into(); + Self::NewBlock(derived, prev_block_hash) + } + vm_vm1_3_2::vm_with_bootloader::BlockContextMode::OverrideCurrent(derived) => { + let derived = derived.glue_into(); + Self::OverrideCurrent(derived) + } + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/bootloader_job_type.rs b/core/lib/multivm/src/glue/types/vm/bootloader_job_type.rs new file mode 100644 index 000000000000..f19891d2f949 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/bootloader_job_type.rs @@ -0,0 +1,31 @@ +use crate::glue::GlueFrom; + +impl GlueFrom + for vm_m5::vm_with_bootloader::BootloaderJobType +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::BootloaderJobType) -> Self { + match value { + vm_vm1_3_2::vm_with_bootloader::BootloaderJobType::TransactionExecution => { + Self::TransactionExecution + } + vm_vm1_3_2::vm_with_bootloader::BootloaderJobType::BlockPostprocessing => { + Self::BlockPostprocessing + } + } + } +} + +impl GlueFrom + for vm_m6::vm_with_bootloader::BootloaderJobType +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::BootloaderJobType) -> Self { + match value { + vm_vm1_3_2::vm_with_bootloader::BootloaderJobType::TransactionExecution => { + Self::TransactionExecution + } + vm_vm1_3_2::vm_with_bootloader::BootloaderJobType::BlockPostprocessing => { + Self::BlockPostprocessing + } + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/derived_block_context.rs b/core/lib/multivm/src/glue/types/vm/derived_block_context.rs new file mode 100644 index 000000000000..146db6f3af1a --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/derived_block_context.rs @@ -0,0 +1,23 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom + for vm_m5::vm_with_bootloader::DerivedBlockContext +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::DerivedBlockContext) -> Self { + Self { + context: value.context.glue_into(), + base_fee: value.base_fee, + } + } +} + +impl GlueFrom + for vm_m6::vm_with_bootloader::DerivedBlockContext +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::DerivedBlockContext) -> Self { + Self { + context: value.context.glue_into(), + base_fee: value.base_fee, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/mod.rs b/core/lib/multivm/src/glue/types/vm/mod.rs new file mode 100644 index 000000000000..a6f6aa3c526c --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/mod.rs @@ -0,0 +1,12 @@ +mod block_context; +mod block_context_mode; +mod bootloader_job_type; +mod derived_block_context; +mod tx_execution_mode; +mod tx_revert_reason; +mod vm_block_result; +mod vm_execution_result; +mod vm_partial_execution_result; +mod vm_revert_reason; +mod vm_revert_reason_parsing_result; +mod vm_tx_execution_result; diff --git a/core/lib/multivm/src/glue/types/vm/tx_execution_mode.rs b/core/lib/multivm/src/glue/types/vm/tx_execution_mode.rs new file mode 100644 index 000000000000..7095e60463f8 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/tx_execution_mode.rs @@ -0,0 +1,35 @@ +use crate::glue::GlueFrom; + +impl GlueFrom + for vm_m5::vm_with_bootloader::TxExecutionMode +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::TxExecutionMode) -> Self { + match value { + vm_vm1_3_2::vm_with_bootloader::TxExecutionMode::VerifyExecute => Self::VerifyExecute, + vm_vm1_3_2::vm_with_bootloader::TxExecutionMode::EstimateFee { .. } => { + Self::EstimateFee + } + vm_vm1_3_2::vm_with_bootloader::TxExecutionMode::EthCall { .. } => Self::EthCall, + } + } +} + +impl GlueFrom + for vm_m6::vm_with_bootloader::TxExecutionMode +{ + fn glue_from(value: vm_vm1_3_2::vm_with_bootloader::TxExecutionMode) -> Self { + match value { + vm_vm1_3_2::vm_with_bootloader::TxExecutionMode::VerifyExecute => Self::VerifyExecute, + vm_vm1_3_2::vm_with_bootloader::TxExecutionMode::EstimateFee { + missed_storage_invocation_limit, + } => Self::EstimateFee { + missed_storage_invocation_limit, + }, + vm_vm1_3_2::vm_with_bootloader::TxExecutionMode::EthCall { + missed_storage_invocation_limit, + } => Self::EthCall { + missed_storage_invocation_limit, + }, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/tx_revert_reason.rs b/core/lib/multivm/src/glue/types/vm/tx_revert_reason.rs new file mode 100644 index 000000000000..7751eed3588d --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/tx_revert_reason.rs @@ -0,0 +1,64 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for vm_vm1_3_2::TxRevertReason { + fn glue_from(value: vm_m5::TxRevertReason) -> Self { + match value { + vm_m5::TxRevertReason::EthCall(err) => Self::EthCall(err.glue_into()), + vm_m5::TxRevertReason::TxReverted(err) => Self::TxReverted(err.glue_into()), + vm_m5::TxRevertReason::ValidationFailed(err) => Self::ValidationFailed(err.glue_into()), + vm_m5::TxRevertReason::PaymasterValidationFailed(err) => { + Self::PaymasterValidationFailed(err.glue_into()) + } + vm_m5::TxRevertReason::PrePaymasterPreparationFailed(err) => { + Self::PrePaymasterPreparationFailed(err.glue_into()) + } + vm_m5::TxRevertReason::PayForTxFailed(err) => Self::PayForTxFailed(err.glue_into()), + vm_m5::TxRevertReason::FailedToMarkFactoryDependencies(err) => { + Self::FailedToMarkFactoryDependencies(err.glue_into()) + } + vm_m5::TxRevertReason::FailedToChargeFee(err) => { + Self::FailedToChargeFee(err.glue_into()) + } + vm_m5::TxRevertReason::FromIsNotAnAccount => Self::FromIsNotAnAccount, + vm_m5::TxRevertReason::InnerTxError => Self::InnerTxError, + vm_m5::TxRevertReason::Unknown(err) => Self::Unknown(err.glue_into()), + vm_m5::TxRevertReason::UnexpectedVMBehavior(err) => Self::UnexpectedVMBehavior(err), + vm_m5::TxRevertReason::BootloaderOutOfGas => Self::BootloaderOutOfGas, + vm_m5::TxRevertReason::TooBigGasLimit => Self::TooBigGasLimit, + vm_m5::TxRevertReason::NotEnoughGasProvided => Self::NotEnoughGasProvided, + } + } +} + +impl GlueFrom for vm_vm1_3_2::TxRevertReason { + fn glue_from(value: vm_m6::TxRevertReason) -> Self { + match value { + vm_m6::TxRevertReason::EthCall(err) => Self::EthCall(err.glue_into()), + vm_m6::TxRevertReason::TxReverted(err) => Self::TxReverted(err.glue_into()), + vm_m6::TxRevertReason::ValidationFailed(err) => Self::ValidationFailed(err.glue_into()), + vm_m6::TxRevertReason::PaymasterValidationFailed(err) => { + Self::PaymasterValidationFailed(err.glue_into()) + } + vm_m6::TxRevertReason::PrePaymasterPreparationFailed(err) => { + Self::PrePaymasterPreparationFailed(err.glue_into()) + } + vm_m6::TxRevertReason::PayForTxFailed(err) => Self::PayForTxFailed(err.glue_into()), + vm_m6::TxRevertReason::FailedToMarkFactoryDependencies(err) => { + Self::FailedToMarkFactoryDependencies(err.glue_into()) + } + vm_m6::TxRevertReason::FailedToChargeFee(err) => { + Self::FailedToChargeFee(err.glue_into()) + } + vm_m6::TxRevertReason::FromIsNotAnAccount => Self::FromIsNotAnAccount, + vm_m6::TxRevertReason::InnerTxError => Self::InnerTxError, + vm_m6::TxRevertReason::Unknown(err) => Self::Unknown(err.glue_into()), + vm_m6::TxRevertReason::UnexpectedVMBehavior(err) => Self::UnexpectedVMBehavior(err), + vm_m6::TxRevertReason::BootloaderOutOfGas => Self::BootloaderOutOfGas, + vm_m6::TxRevertReason::TooBigGasLimit => Self::TooBigGasLimit, + vm_m6::TxRevertReason::NotEnoughGasProvided => Self::NotEnoughGasProvided, + vm_m6::TxRevertReason::MissingInvocationLimitReached => { + Self::MissingInvocationLimitReached + } + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs new file mode 100644 index 000000000000..8b6225d2cfe5 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -0,0 +1,19 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for vm_vm1_3_2::VmBlockResult { + fn glue_from(value: vm_m5::VmBlockResult) -> Self { + Self { + full_result: value.full_result.glue_into(), + block_tip_result: value.block_tip_result.glue_into(), + } + } +} + +impl GlueFrom for vm_vm1_3_2::VmBlockResult { + fn glue_from(value: vm_m6::VmBlockResult) -> Self { + Self { + full_result: value.full_result.glue_into(), + block_tip_result: value.block_tip_result.glue_into(), + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_execution_result.rs new file mode 100644 index 000000000000..c80c2ea526da --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_execution_result.rs @@ -0,0 +1,55 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for vm_vm1_3_2::VmExecutionResult { + fn glue_from(value: vm_m5::VmExecutionResult) -> Self { + Self { + events: value.events.into_iter().map(GlueInto::glue_into).collect(), + storage_log_queries: value + .storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + used_contract_hashes: value.used_contract_hashes, + l2_to_l1_logs: value + .l2_to_l1_logs + .into_iter() + .map(GlueInto::glue_into) + .collect(), + return_data: value.return_data, + gas_used: value.gas_used, + computational_gas_used: value.gas_used, // Substitute due to lack of such field + contracts_used: value.contracts_used, + revert_reason: value.revert_reason.map(GlueFrom::glue_from), + trace: zksync_types::vm_trace::VmTrace::ExecutionTrace(value.trace.glue_into()), + total_log_queries: value.total_log_queries, + cycles_used: value.cycles_used, + } + } +} + +impl GlueFrom for vm_vm1_3_2::VmExecutionResult { + fn glue_from(value: vm_m6::VmExecutionResult) -> Self { + Self { + events: value.events.into_iter().map(GlueInto::glue_into).collect(), + storage_log_queries: value + .storage_log_queries + .into_iter() + .map(GlueInto::glue_into) + .collect(), + used_contract_hashes: value.used_contract_hashes, + l2_to_l1_logs: value + .l2_to_l1_logs + .into_iter() + .map(GlueInto::glue_into) + .collect(), + return_data: value.return_data, + gas_used: value.gas_used, + computational_gas_used: value.gas_used, // Substitute due to lack of such field + contracts_used: value.contracts_used, + revert_reason: value.revert_reason.map(GlueFrom::glue_from), + trace: value.trace.glue_into(), + total_log_queries: value.total_log_queries, + cycles_used: value.cycles_used, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs new file mode 100644 index 000000000000..31ebacacb401 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -0,0 +1,25 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for vm_vm1_3_2::vm::VmPartialExecutionResult { + fn glue_from(value: vm_m5::vm::VmPartialExecutionResult) -> Self { + Self { + logs: value.logs.glue_into(), + revert_reason: value.revert_reason.map(GlueInto::glue_into), + contracts_used: value.contracts_used, + cycles_used: value.cycles_used, + computational_gas_used: 0, + } + } +} + +impl GlueFrom for vm_vm1_3_2::vm::VmPartialExecutionResult { + fn glue_from(value: vm_m6::vm::VmPartialExecutionResult) -> Self { + Self { + logs: value.logs.glue_into(), + revert_reason: value.revert_reason.map(GlueInto::glue_into), + contracts_used: value.contracts_used, + cycles_used: value.cycles_used, + computational_gas_used: 0, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_revert_reason.rs b/core/lib/multivm/src/glue/types/vm/vm_revert_reason.rs new file mode 100644 index 000000000000..57bd97a21985 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_revert_reason.rs @@ -0,0 +1,38 @@ +use crate::glue::GlueFrom; + +impl GlueFrom for vm_vm1_3_2::errors::VmRevertReason { + fn glue_from(value: vm_m5::errors::VmRevertReason) -> Self { + match value { + vm_m5::errors::VmRevertReason::General { msg } => Self::General { + msg, + data: Vec::new(), + }, + vm_m5::errors::VmRevertReason::InnerTxError => Self::InnerTxError, + vm_m5::errors::VmRevertReason::VmError => Self::VmError, + vm_m5::errors::VmRevertReason::Unknown { + function_selector, + data, + } => Self::Unknown { + function_selector, + data, + }, + } + } +} + +impl GlueFrom for vm_vm1_3_2::errors::VmRevertReason { + fn glue_from(value: vm_m6::errors::VmRevertReason) -> Self { + match value { + vm_m6::errors::VmRevertReason::General { msg, data } => Self::General { msg, data }, + vm_m6::errors::VmRevertReason::InnerTxError => Self::InnerTxError, + vm_m6::errors::VmRevertReason::VmError => Self::VmError, + vm_m6::errors::VmRevertReason::Unknown { + function_selector, + data, + } => Self::Unknown { + function_selector, + data, + }, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_revert_reason_parsing_result.rs b/core/lib/multivm/src/glue/types/vm/vm_revert_reason_parsing_result.rs new file mode 100644 index 000000000000..a88bd76c77bc --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_revert_reason_parsing_result.rs @@ -0,0 +1,23 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom + for vm_vm1_3_2::errors::VmRevertReasonParsingResult +{ + fn glue_from(value: vm_m5::errors::VmRevertReasonParsingResult) -> Self { + Self { + revert_reason: value.revert_reason.glue_into(), + original_data: value.original_data, + } + } +} + +impl GlueFrom + for vm_vm1_3_2::errors::VmRevertReasonParsingResult +{ + fn glue_from(value: vm_m6::errors::VmRevertReasonParsingResult) -> Self { + Self { + revert_reason: value.revert_reason.glue_into(), + original_data: value.original_data, + } + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs new file mode 100644 index 000000000000..417bb98adc47 --- /dev/null +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -0,0 +1,25 @@ +use crate::glue::{GlueFrom, GlueInto}; + +impl GlueFrom for vm_vm1_3_2::vm::VmTxExecutionResult { + fn glue_from(value: vm_m5::vm::VmTxExecutionResult) -> Self { + Self { + status: value.status.glue_into(), + result: value.result.glue_into(), + call_traces: vec![], // Substitute due to lack of fields. + gas_refunded: value.gas_refunded, + operator_suggested_refund: value.operator_suggested_refund, + } + } +} + +impl GlueFrom for vm_vm1_3_2::vm::VmTxExecutionResult { + fn glue_from(value: vm_m6::vm::VmTxExecutionResult) -> Self { + Self { + status: value.status.glue_into(), + result: value.result.glue_into(), + call_traces: vec![], // Substitute due to lack of fields. + gas_refunded: value.gas_refunded, + operator_suggested_refund: value.operator_suggested_refund, + } + } +} diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs new file mode 100644 index 000000000000..deffe7d63464 --- /dev/null +++ b/core/lib/multivm/src/lib.rs @@ -0,0 +1,28 @@ +pub use crate::{ + glue::{ + block_properties::BlockProperties, + init_vm::{init_vm, init_vm_with_gas_limit}, + oracle_tools::OracleTools, + }, + vm_instance::VmInstance, +}; + +mod glue; +mod vm_instance; + +/// Marker of the VM version to be used by the MultiVM. +#[derive(Debug, Clone, Copy)] +pub enum VmVersion { + M5WithoutRefunds, + M5WithRefunds, + M6Initial, + M6BugWithCompressionFixed, + Vm1_3_2, +} + +impl VmVersion { + /// Returns the latest supported VM version. + pub const fn latest() -> VmVersion { + Self::Vm1_3_2 + } +} diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs new file mode 100644 index 000000000000..f8c9d5301549 --- /dev/null +++ b/core/lib/multivm/src/vm_instance.rs @@ -0,0 +1,141 @@ +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use crate::glue::GlueInto; + +#[derive(Debug)] +pub enum VmInstance<'a> { + VmM5(Box>), + VmM6(Box>), + Vm1_3_2(Box>), +} + +impl<'a> VmInstance<'a> { + pub fn gas_consumed(&self) -> u32 { + match self { + VmInstance::VmM5(vm) => vm.gas_consumed(), + VmInstance::VmM6(vm) => vm.gas_consumed(), + VmInstance::Vm1_3_2(vm) => vm.gas_consumed(), + } + } + + pub fn save_current_vm_as_snapshot(&mut self) { + match self { + VmInstance::VmM5(vm) => vm.save_current_vm_as_snapshot(), + VmInstance::VmM6(vm) => vm.save_current_vm_as_snapshot(), + VmInstance::Vm1_3_2(vm) => vm.save_current_vm_as_snapshot(), + } + } + + pub fn rollback_to_snapshot_popping(&mut self) { + match self { + VmInstance::VmM5(vm) => vm.rollback_to_latest_snapshot_popping(), + VmInstance::VmM6(vm) => vm.rollback_to_latest_snapshot_popping(), + VmInstance::Vm1_3_2(vm) => vm.rollback_to_latest_snapshot_popping(), + } + } + + pub fn pop_snapshot_no_rollback(&mut self) { + match self { + VmInstance::VmM5(vm) => { + // A dedicated method was added later. + vm.snapshots.pop().unwrap(); + } + VmInstance::VmM6(vm) => vm.pop_snapshot_no_rollback(), + VmInstance::Vm1_3_2(vm) => vm.pop_snapshot_no_rollback(), + } + } + + pub fn execute_till_block_end( + &mut self, + job_type: vm_vm1_3_2::vm_with_bootloader::BootloaderJobType, + ) -> vm_vm1_3_2::VmBlockResult { + match self { + VmInstance::VmM5(vm) => vm.execute_till_block_end(job_type.glue_into()).glue_into(), + VmInstance::VmM6(vm) => vm.execute_till_block_end(job_type.glue_into()).glue_into(), + VmInstance::Vm1_3_2(vm) => vm.execute_till_block_end(job_type).glue_into(), + } + } + + pub fn is_bytecode_known(&self, bytecode_hash: &zksync_types::H256) -> bool { + match self { + VmInstance::VmM5(_) => { + false + } + VmInstance::VmM6(vm) => vm + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(bytecode_hash), + VmInstance::Vm1_3_2(vm) => vm + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(bytecode_hash), + } + } + + pub fn push_transaction_to_bootloader_memory( + &mut self, + tx: &zksync_types::Transaction, + execution_mode: vm_vm1_3_2::vm_with_bootloader::TxExecutionMode, + explicit_compressed_bytecodes: Option>, + ) { + match self { + VmInstance::VmM5(vm) => { + vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( + vm, + tx, + execution_mode.glue_into(), + ) + } + VmInstance::VmM6(vm) => { + vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( + vm, + tx, + execution_mode.glue_into(), + explicit_compressed_bytecodes, + ) + } + VmInstance::Vm1_3_2(vm) => { + vm_vm1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( + vm, + tx, + execution_mode, + explicit_compressed_bytecodes, + ) + } + } + } + + pub fn execute_next_tx( + &mut self, + validation_computational_gas_limit: u32, + with_call_tracer: bool, + ) -> Result { + match self { + VmInstance::VmM5(vm) => vm + .execute_next_tx() + .map(GlueInto::glue_into) + .map_err(GlueInto::glue_into), + VmInstance::VmM6(vm) => vm + .execute_next_tx(validation_computational_gas_limit, with_call_tracer) + .map(GlueInto::glue_into) + .map_err(GlueInto::glue_into), + VmInstance::Vm1_3_2(vm) => { + vm.execute_next_tx(validation_computational_gas_limit, with_call_tracer) + } + } + } + + pub fn execute_block_tip(&mut self) -> vm_vm1_3_2::vm::VmPartialExecutionResult { + match self { + VmInstance::VmM5(vm) => vm.execute_block_tip().glue_into(), + VmInstance::VmM6(vm) => vm.execute_block_tip().glue_into(), + VmInstance::Vm1_3_2(vm) => vm.execute_block_tip(), + } + } +} diff --git a/core/lib/object_store/src/mock.rs b/core/lib/object_store/src/mock.rs index 4b31cc43e6b9..727ef1e8d53a 100644 --- a/core/lib/object_store/src/mock.rs +++ b/core/lib/object_store/src/mock.rs @@ -40,7 +40,7 @@ impl ObjectStore for MockStore { async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { let mut lock = self.inner.lock().await; let Some(bucket_map) = lock.get_mut(&bucket) else { - return Ok(()) + return Ok(()); }; bucket_map.remove(key); Ok(()) diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index 6661ae0f0d06..d6858d683d96 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -2,6 +2,7 @@ use zksync_types::{ proofs::{AggregationRound, PrepareBasicCircuitsJob}, + prover_server_api::RawProof, zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, @@ -189,6 +190,17 @@ impl StoredObject for ZkSyncCircuit> { serialize_using_bincode!(); } +impl StoredObject for RawProof { + const BUCKET: Bucket = Bucket::ProofsFri; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("proof_fri_{key}.bin") + } + + serialize_using_bincode!(); +} + impl dyn ObjectStore + '_ { /// Fetches the value for the given key if it exists. /// diff --git a/core/lib/prometheus_exporter/Cargo.toml b/core/lib/prometheus_exporter/Cargo.toml index 76151faac830..4a5446de11b9 100644 --- a/core/lib/prometheus_exporter/Cargo.toml +++ b/core/lib/prometheus_exporter/Cargo.toml @@ -13,5 +13,4 @@ categories = ["cryptography"] tokio = "1" metrics = "0.20" metrics-exporter-prometheus = "0.11" -zksync_config = { path = "../config", version = "1.0" } vlog = { path = "../vlog", version = "1.0" } diff --git a/core/lib/prometheus_exporter/src/lib.rs b/core/lib/prometheus_exporter/src/lib.rs index d56b0db345c6..d3d73de6a74c 100644 --- a/core/lib/prometheus_exporter/src/lib.rs +++ b/core/lib/prometheus_exporter/src/lib.rs @@ -37,7 +37,7 @@ pub fn run_prometheus_exporter( let zero_to_one_buckets = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]; let around_one_buckets = [ - 0.1, 0.25, 0.5, 0.75, 1., 1.5, 2., 3., 4., 5., 10., 20., 50., 100., 1000., + 0.01, 0.03, 0.1, 0.3, 0.5, 0.75, 1., 1.5, 3., 5., 10., 20., 50., ]; let builder = if let Some((pushgateway_url, push_interval)) = pushgateway_config { diff --git a/core/lib/prover_utils/Cargo.toml b/core/lib/prover_utils/Cargo.toml index c92299487283..c1939867af82 100644 --- a/core/lib/prover_utils/Cargo.toml +++ b/core/lib/prover_utils/Cargo.toml @@ -20,3 +20,5 @@ regex = "1.7.2" tokio = "1.27.0" futures = { version = "0.3", features = ["compat"] } ctrlc = { version = "3.1", features = ["termination"] } +toml_edit = "0.14.4" +async-trait = "0.1" diff --git a/core/lib/prover_utils/src/lib.rs b/core/lib/prover_utils/src/lib.rs index 161370a0d39d..e010afc30004 100644 --- a/core/lib/prover_utils/src/lib.rs +++ b/core/lib/prover_utils/src/lib.rs @@ -6,7 +6,9 @@ use std::{fs::create_dir_all, io::Cursor, path::Path, time::Duration, time::Inst use futures::{channel::mpsc, executor::block_on, SinkExt}; +pub mod periodic_job; pub mod region_fetcher; +pub mod vk_commitment_helper; fn download_bytes(key_download_url: &str) -> reqwest::Result> { vlog::info!("Downloading initial setup from {:?}", key_download_url); diff --git a/core/bin/zksync_core/src/house_keeper/periodic_job.rs b/core/lib/prover_utils/src/periodic_job.rs similarity index 100% rename from core/bin/zksync_core/src/house_keeper/periodic_job.rs rename to core/lib/prover_utils/src/periodic_job.rs diff --git a/core/lib/prover_utils/src/vk_commitment_helper.rs b/core/lib/prover_utils/src/vk_commitment_helper.rs new file mode 100644 index 000000000000..c8f419b44c42 --- /dev/null +++ b/core/lib/prover_utils/src/vk_commitment_helper.rs @@ -0,0 +1,25 @@ +use std::fs; +use toml_edit::{Document, Item, Value}; + +pub fn get_toml_formatted_value(string_value: String) -> Item { + let mut value = Value::from(string_value); + value.decor_mut().set_prefix(""); + Item::Value(value) +} + +pub fn write_contract_toml(contract_doc: Document) { + let path = get_contract_toml_path(); + fs::write(path, contract_doc.to_string()).expect("Failed writing to contract.toml file"); +} + +pub fn read_contract_toml() -> Document { + let path = get_contract_toml_path(); + let toml_data = std::fs::read_to_string(path.clone()) + .unwrap_or_else(|_| panic!("contract.toml file does not exist on path {}", path)); + toml_data.parse::().expect("invalid config file") +} + +pub fn get_contract_toml_path() -> String { + let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| "/".into()); + format!("{}/etc/env/base/contracts.toml", zksync_home) +} diff --git a/core/lib/queued_job_processor/Cargo.toml b/core/lib/queued_job_processor/Cargo.toml index 51a48efa90b5..bbcfc5f15f0e 100644 --- a/core/lib/queued_job_processor/Cargo.toml +++ b/core/lib/queued_job_processor/Cargo.toml @@ -14,6 +14,5 @@ categories = ["cryptography"] async-trait = "0.1" tokio = { version = "1", features = ["time"] } -zksync_dal = {path = "../../lib/dal", version = "1.0" } -zksync_utils = {path = "../../lib/utils", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } diff --git a/core/lib/state/src/cache.rs b/core/lib/state/src/cache.rs index 53461d291a0c..7ae6c58a74dd 100644 --- a/core/lib/state/src/cache.rs +++ b/core/lib/state/src/cache.rs @@ -3,41 +3,60 @@ use std::{hash::Hash, time::Instant}; -type MokaBase = mini_moka::sync::Cache>; +type MokaBase = mini_moka::sync::Cache; + +/// Trait for values that can be put into [`Cache`]. The type param denotes the key type. +pub trait CacheValue: Clone + Send + Sync { + /// Weight of this value that determines when the cache LRU logic kicks in. Should be + /// exactly or approximately equal to the total byte size of the value, including heap-allocated + /// data (and, potentially, the key byte size if the value size is always small). + fn cache_weight(&self) -> u32; +} /// [`Cache`] implementation that uses LRU eviction policy. #[derive(Debug, Clone)] -pub struct Cache { +pub struct Cache { name: &'static str, - cache: MokaBase, + cache: Option>, } -impl Cache { +impl Cache +where + K: Eq + Hash + Send + Sync + 'static, + V: CacheValue + 'static, +{ /// Creates a new cache. /// /// # Panics /// - /// Panics if an invalid cache capacity (usize) is provided. - pub fn new(name: &'static str, capacity_mb: usize) -> Self { - let cache = MokaBase::::builder() - .weigher(|_, value| -> u32 { value.len().try_into().unwrap_or(u32::MAX) }) - .max_capacity((capacity_mb * 1_000_000) as u64) - .build(); + /// Panics if an invalid cache capacity is provided. + pub fn new(name: &'static str, capacity: u64) -> Self { + let cache = if capacity == 0 { + None + } else { + Some( + MokaBase::::builder() + .weigher(|_, value| value.cache_weight()) + .max_capacity(capacity) + .build(), + ) + }; Self { name, cache } } /// Gets an entry and pulls it to the front if it exists. - pub fn get(&self, key: &K) -> Option> { + pub fn get(&self, key: &K) -> Option { let start_instant = Instant::now(); - let entry = self.cache.get(key); + let entry = self.cache.as_ref()?.get(key); + // ^ We intentionally don't report metrics if there's no real cache. + metrics::histogram!( "server.state_cache.latency", start_instant.elapsed(), "name" => self.name, "method" => "get", ); - metrics::increment_counter!( "server.state_cache.requests", "name" => self.name, @@ -48,21 +67,75 @@ impl Cache { } /// Pushes an entry and performs LRU cache operations. - pub fn insert(&self, key: K, value: Vec) { + pub fn insert(&self, key: K, value: V) { let start_instant = Instant::now(); - self.cache.insert(key, value); + let Some(cache) = self.cache.as_ref() else { + return; + }; + // ^ We intentionally don't report metrics if there's no real cache. + cache.insert(key, value); + metrics::histogram!( "server.state_cache.latency", start_instant.elapsed(), "name" => self.name, "method" => "insert" ); + self.report_size(); + } - metrics::gauge!("server.state_cache.len", self.cache.entry_count() as f64, "name" => self.name); - metrics::gauge!( - "server.state_cache.used_memory", - self.cache.weighted_size() as f64, - "name" => self.name, - ); + pub(crate) fn report_size(&self) { + if let Some(cache) = &self.cache { + metrics::gauge!("server.state_cache.len", cache.entry_count() as f64, "name" => self.name); + metrics::gauge!( + "server.state_cache.used_memory", + cache.weighted_size() as f64, + "name" => self.name, + ); + } + } + + /// Removes the specified key from this cache. + pub fn remove(&self, key: &K) { + if let Some(cache) = &self.cache { + cache.invalidate(key); + } + } + + /// Removes all entries from this cache. + pub fn clear(&self) { + if let Some(cache) = &self.cache { + cache.invalidate_all(); + self.report_size(); + } + } + + #[cfg(test)] + pub(crate) fn estimated_len(&self) -> u64 { + self.cache.as_ref().map_or(0, MokaBase::entry_count) + } +} + +#[cfg(test)] +mod tests { + use zksync_types::H256; + + use super::*; + + #[test] + fn cache_with_zero_capacity() { + let zero_cache = Cache::>::new("test", 0); + zero_cache.insert(H256::zero(), vec![1, 2, 3]); + assert_eq!(zero_cache.get(&H256::zero()), None); + + // The zero-capacity `MokaBase` cache can actually contain items temporarily! + let not_quite_zero_cache = MokaBase::>::builder() + .weigher(|_, value| value.cache_weight()) + .max_capacity(0) + .build(); + not_quite_zero_cache.insert(H256::zero(), vec![1, 2, 3]); + assert_eq!(not_quite_zero_cache.get(&H256::zero()), Some(vec![1, 2, 3])); + // The item is evicted after the first access. + assert_eq!(not_quite_zero_cache.get(&H256::zero()), None); } } diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index c2980e5ce939..8f9b8ff4c190 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -21,15 +21,16 @@ mod cache; mod in_memory; mod postgres; mod rocksdb; +mod shadow_storage; mod storage_view; #[cfg(test)] mod test_utils; pub use self::{ in_memory::InMemoryStorage, - postgres::FactoryDepsCache, - postgres::PostgresStorage, + postgres::{PostgresStorage, PostgresStorageCaches}, rocksdb::RocksdbStorage, + shadow_storage::ShadowStorage, storage_view::{StorageView, StorageViewMetrics}, }; diff --git a/core/lib/state/src/postgres.rs b/core/lib/state/src/postgres.rs index 066d20663d14..bbacf1fd2002 100644 --- a/core/lib/state/src/postgres.rs +++ b/core/lib/state/src/postgres.rs @@ -1,22 +1,322 @@ -use tokio::{runtime::Handle, time::Instant}; +use tokio::{runtime::Handle, sync::mpsc, time::Instant}; -use crate::cache::Cache; -use crate::ReadStorage; -use zksync_dal::StorageProcessor; +use std::{ + mem, + sync::{Arc, RwLock}, +}; + +use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{L1BatchNumber, MiniblockNumber, StorageKey, StorageValue, H256}; -/// [`FactoryDepsCache`] type alias for smart contract source code cache -pub type FactoryDepsCache = Cache; +use crate::{ + cache::{Cache, CacheValue}, + ReadStorage, +}; + +/// Type alias for smart contract source code cache. +type FactoryDepsCache = Cache>; + +impl CacheValue for Vec { + fn cache_weight(&self) -> u32 { + self.len().try_into().expect("Cached bytes are too large") + } +} + +/// Type alias for initial writes caches. +type InitialWritesCache = Cache; + +impl CacheValue for L1BatchNumber { + #[allow(clippy::cast_possible_truncation)] // doesn't happen in practice + fn cache_weight(&self) -> u32 { + const WEIGHT: usize = mem::size_of::() + mem::size_of::(); + // ^ Since values are small in size, we want to account for key sizes as well + + WEIGHT as u32 + } +} + +impl CacheValue for StorageValue { + #[allow(clippy::cast_possible_truncation)] // doesn't happen in practice + fn cache_weight(&self) -> u32 { + const WEIGHT: usize = mem::size_of::() + mem::size_of::(); + // ^ Since values are small in size, we want to account for key sizes as well + + WEIGHT as u32 + } +} + +#[derive(Debug)] +struct ValuesCacheInner { + /// Miniblock for which `self.values` are valid. Has the same meaning as `miniblock_number` + /// in `PostgresStorage` (i.e., the latest sealed miniblock for which storage logs should + /// be taken into account). + valid_for: MiniblockNumber, + values: Cache, +} + +/// Cache for the VM storage. Only caches values for a single VM storage snapshot, which logically +/// corresponds to the latest sealed miniblock in Postgres. +/// +/// The cached snapshot can be updated, which will load changed storage keys from Postgres and remove +/// the (potentially stale) cached values for these keys. +/// +/// # Why wrap the cache in `RwLock`? +/// +/// We need to be sure that `valid_for` miniblock of the values cache has not changed while we are +/// loading or storing values in it. This is easiest to achieve using an `RwLock`. Note that +/// almost all cache ops require only shared access to the lock (including cache updates!); we only +/// need exclusive access when we are updating the `valid_for` miniblock. Further, the update itself +/// doesn't grab the lock until *after* the Postgres data has been loaded. (This works because we +/// know statically that there is a single thread updating the cache; hence, we have no contention +/// over updating the cache.) To summarize, `RwLock` should see barely any contention. +#[derive(Debug, Clone)] +struct ValuesCache(Arc>); + +impl ValuesCache { + fn new(capacity: u64) -> Self { + let inner = ValuesCacheInner { + valid_for: MiniblockNumber(0), + values: Cache::new("values_cache", capacity), + }; + Self(Arc::new(RwLock::new(inner))) + } + + /// *NB.* The returned value should be considered immediately stale; at best, it can be + /// the lower boundary on the current `valid_for` value. + fn valid_for(&self) -> MiniblockNumber { + self.0.read().expect("values cache is poisoned").valid_for + } + + /// Gets the cached value for `key` provided that the cache currently holds values + /// for `miniblock_number`. + fn get(&self, miniblock_number: MiniblockNumber, key: &StorageKey) -> Option { + let lock = self.0.read().expect("values cache is poisoned"); + if lock.valid_for == miniblock_number { + lock.values.get(&key.hashed_key()) + } else { + metrics::increment_counter!("server.state_cache.stale_values", "method" => "get"); + None + } + } + + /// Caches `value` for `key`, but only if the cache currently holds values for `miniblock_number`. + fn insert(&self, miniblock_number: MiniblockNumber, key: StorageKey, value: StorageValue) { + let lock = self.0.read().expect("values cache is poisoned"); + if lock.valid_for == miniblock_number { + lock.values.insert(key.hashed_key(), value); + } else { + metrics::increment_counter!("server.state_cache.stale_values", "method" => "insert"); + } + } + + #[allow(clippy::cast_precision_loss)] // acceptable for metrics + fn update( + &self, + from_miniblock: MiniblockNumber, + to_miniblock: MiniblockNumber, + rt_handle: &Handle, + connection: &mut StorageProcessor<'_>, + ) { + const MAX_MINIBLOCKS_LAG: u32 = 5; + + vlog::debug!( + "Updating storage values cache from miniblock {from_miniblock} to {to_miniblock}" + ); + + if to_miniblock.0 - from_miniblock.0 > MAX_MINIBLOCKS_LAG { + // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: + // evict *everything* from cache and call it a day. This should not happen too often in practice. + vlog::info!( + "Storage values cache is too far behind (current miniblock is {from_miniblock}; \ + requested update to {to_miniblock}); resetting the cache" + ); + let mut lock = self.0.write().expect("values cache is poisoned"); + assert_eq!(lock.valid_for, from_miniblock); + lock.valid_for = to_miniblock; + lock.values.clear(); + + metrics::increment_counter!("server.state_cache.values_emptied"); + } else { + let stage_started_at = Instant::now(); + let miniblocks = (from_miniblock + 1)..=to_miniblock; + let modified_keys = rt_handle.block_on( + connection + .storage_web3_dal() + .modified_keys_in_miniblocks(miniblocks.clone()), + ); + + let elapsed = stage_started_at.elapsed(); + metrics::histogram!( + "server.state_cache.values_update", + elapsed, + "stage" => "load_keys" + ); + metrics::histogram!( + "server.state_cache.values_update.modified_keys", + modified_keys.len() as f64 + ); + vlog::debug!( + "Loaded {modified_keys_len} modified storage keys from miniblocks {miniblocks:?}; \ + took {elapsed:?}", + modified_keys_len = modified_keys.len() + ); + + let stage_started_at = Instant::now(); + let mut lock = self.0.write().expect("values cache is poisoned"); + // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` + // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. + // E.g., we load data from Postgres beforehand. + assert_eq!(lock.valid_for, from_miniblock); + lock.valid_for = to_miniblock; + for modified_key in &modified_keys { + lock.values.remove(modified_key); + } + lock.values.report_size(); + drop(lock); + + metrics::histogram!( + "server.state_cache.values_update", + stage_started_at.elapsed(), + "stage" => "remove_stale_keys" + ); + } + metrics::gauge!( + "server.state_cache.values_valid_for_miniblock", + f64::from(to_miniblock.0) + ); + } +} + +#[derive(Debug, Clone)] +struct ValuesCacheAndUpdater { + cache: ValuesCache, + command_sender: mpsc::UnboundedSender, +} + +/// Caches used during VM execution. +/// +/// Currently, this struct includes the following caches: +/// +/// - Cache for smart contract bytecodes (never invalidated, since it is content-addressable) +/// - Cache for L1 batch numbers of initial writes for storage keys (never invalidated, except after +/// reverting L1 batch execution) +/// - Cache of the VM storage snapshot corresponding to the latest sealed miniblock +#[derive(Debug, Clone)] +pub struct PostgresStorageCaches { + factory_deps: FactoryDepsCache, + initial_writes: InitialWritesCache, + // Besides L1 batch numbers for initial writes, we also cache information that a certain key + // was not written to before the certain L1 batch (i.e., this lower boundary is the cached value). + // + // This is caused by the observation that a significant part of `is_write_initial()` queries returns `true` + // (i.e., the corresponding key was not written to). + // If we don't cache this information, we'll query Postgres multiple times for the same key even if we know + // it wasn't written to at the point that interests us. + negative_initial_writes: InitialWritesCache, + values: Option, +} + +impl PostgresStorageCaches { + const NEG_INITIAL_WRITES_NAME: &'static str = "negative_initial_writes_cache"; + + /// Creates caches with the specified capacities measured in bytes. + pub fn new(factory_deps_capacity: u64, initial_writes_capacity: u64) -> Self { + vlog::debug!( + "Initialized VM execution cache with {factory_deps_capacity}B capacity for factory deps, \ + {initial_writes_capacity}B capacity for initial writes" + ); + + Self { + factory_deps: FactoryDepsCache::new("factory_deps_cache", factory_deps_capacity), + initial_writes: InitialWritesCache::new( + "initial_writes_cache", + initial_writes_capacity / 2, + ), + negative_initial_writes: InitialWritesCache::new( + Self::NEG_INITIAL_WRITES_NAME, + initial_writes_capacity / 2, + ), + values: None, + } + } + + /// Configures the VM storage values cache. The returned closure is the background task that will update + /// the cache according to [`Self::schedule_values_update()`] calls. It should be spawned on a separate thread + /// or a blocking Tokio task. + /// + /// # Panics + /// + /// Panics if provided `capacity` is zero. (Check on the caller side beforehand if there is + /// such possibility.) + pub fn configure_storage_values_cache( + &mut self, + capacity: u64, + connection_pool: ConnectionPool, + rt_handle: Handle, + ) -> impl FnOnce() + Send { + assert!( + capacity > 0, + "Storage values cache capacity must be positive" + ); + vlog::debug!("Initializing VM storage values cache with {capacity}B capacity"); + + let (command_sender, mut command_receiver) = mpsc::unbounded_channel(); + let values_cache = ValuesCache::new(capacity); + self.values = Some(ValuesCacheAndUpdater { + cache: values_cache.clone(), + command_sender, + }); + + // We want to run updates on a single thread in order to not block VM execution on update + // and keep contention over the `ValuesCache` lock as low as possible. As a downside, + // `Self::schedule_values_update()` will produce some no-op update commands from concurrently + // executing VM instances. Due to built-in filtering, this seems manageable. + move || { + let mut current_miniblock = values_cache.valid_for(); + while let Some(to_miniblock) = command_receiver.blocking_recv() { + if to_miniblock <= current_miniblock { + continue; + } + let mut connection = rt_handle + .block_on(connection_pool.access_storage_tagged("values_cache_updater")); + values_cache.update(current_miniblock, to_miniblock, &rt_handle, &mut connection); + current_miniblock = to_miniblock; + } + } + } + + /// Schedules an update of the VM storage values cache to the specified miniblock. + /// + /// # Panics + /// + /// - Panics if the cache wasn't previously configured using [`Self::configure_storage_values_cache()`]. + /// - Panics if the cache update task returned from `configure_storage_values_cache()` has panicked. + pub fn schedule_values_update(&self, to_miniblock: MiniblockNumber) { + let values = self + .values + .as_ref() + .expect("`schedule_update()` called without configuring values cache"); + + if values.cache.valid_for() < to_miniblock { + // Filter out no-op updates right away in order to not store lots of them in RAM. + values + .command_sender + .send(to_miniblock) + .expect("values cache update task failed"); + } + } +} /// [`ReadStorage`] implementation backed by the Postgres database. #[derive(Debug)] pub struct PostgresStorage<'a> { rt_handle: Handle, connection: StorageProcessor<'a>, - block_number: MiniblockNumber, - l1_batch_number: L1BatchNumber, + miniblock_number: MiniblockNumber, + l1_batch_number_for_miniblock: L1BatchNumber, + pending_l1_batch_number: L1BatchNumber, consider_new_l1_batch: bool, - factory_deps_cache: Option, + caches: Option, } impl<'a> PostgresStorage<'a> { @@ -27,103 +327,157 @@ impl<'a> PostgresStorage<'a> { block_number: MiniblockNumber, consider_new_l1_batch: bool, ) -> PostgresStorage<'a> { - let mut dal = connection.storage_web3_dal(); - let l1_batch_number = rt_handle - .block_on(dal.get_provisional_l1_batch_number_of_miniblock_unchecked(block_number)) - .expect("cannot fetch L1 batch number for miniblock"); + let resolved = rt_handle + .block_on( + connection + .storage_web3_dal() + .resolve_l1_batch_number_of_miniblock(block_number), + ) + .expect("Failed resolving L1 batch number for miniblock"); Self { rt_handle, connection, - block_number, - l1_batch_number, + miniblock_number: block_number, + l1_batch_number_for_miniblock: resolved.expected_l1_batch(), + pending_l1_batch_number: resolved.pending_l1_batch, consider_new_l1_batch, - factory_deps_cache: None, + caches: None, } } - /// Sets the smart contract source code cache. + /// Sets the caches to use with the storage. #[must_use] - pub fn with_factory_deps_cache(self, cache: FactoryDepsCache) -> Self { + pub fn with_caches(self, mut caches: PostgresStorageCaches) -> Self { + let should_use_values_cache = caches.values.as_ref().map_or(false, |values| { + self.miniblock_number >= values.cache.valid_for() + }); + // Since "valid for" only increases with time, if `self.miniblock_number < valid_for`, + // all cache calls are guaranteed to miss. + + metrics::increment_counter!( + "server.state_cache.values_used", + "used" => if should_use_values_cache { "true" } else { "false" } + ); + if !should_use_values_cache { + caches.values = None; + } + Self { - factory_deps_cache: Some(cache), + caches: Some(caches), ..self } } + + /// This method is expected to be called for each write that was found in the database, and it decides + /// whether the change is initial or not. Even if a change is present in the DB, in some cases we would not consider it. + /// For example, in API we always represent the state at the beginning of an L1 batch, so we discard all the writes + /// that happened at the same batch or later (for historical `eth_call` requests). + fn write_counts(&self, write_l1_batch_number: L1BatchNumber) -> bool { + if self.consider_new_l1_batch { + self.l1_batch_number_for_miniblock >= write_l1_batch_number + } else { + self.l1_batch_number_for_miniblock > write_l1_batch_number + } + } + + fn values_cache(&self) -> Option<&ValuesCache> { + Some(&self.caches.as_ref()?.values.as_ref()?.cache) + } } impl ReadStorage for PostgresStorage<'_> { - fn read_value(&mut self, key: &StorageKey) -> StorageValue { + fn read_value(&mut self, &key: &StorageKey) -> StorageValue { let started_at = Instant::now(); - let mut dal = self.connection.storage_web3_dal(); - let value = self - .rt_handle - .block_on(async { - metrics::histogram!( - "state.postgres_storage.enter_context", - started_at.elapsed(), - "method" => "read_value" - ); - dal.get_historical_value_unchecked(key, self.block_number) - .await - }) - .unwrap(); + let values_cache = self.values_cache(); + let cached_value = values_cache.and_then(|cache| cache.get(self.miniblock_number, &key)); + + let value = cached_value.unwrap_or_else(|| { + let mut dal = self.connection.storage_web3_dal(); + let value = self + .rt_handle + .block_on(dal.get_historical_value_unchecked(&key, self.miniblock_number)) + .expect("Failed executing `read_value`"); + if let Some(cache) = self.values_cache() { + cache.insert(self.miniblock_number, key, value); + } + value + }); + metrics::histogram!("state.postgres_storage", started_at.elapsed(), "method" => "read_value"); value } fn is_write_initial(&mut self, key: &StorageKey) -> bool { - let mut dal = self.connection.storage_web3_dal(); let started_at = Instant::now(); - let initial_write_l1_batch_number = self - .rt_handle - .block_on(async { - metrics::histogram!( - "state.postgres_storage.enter_context", - started_at.elapsed(), - "method" => "is_write_initial" - ); - dal.get_l1_batch_number_for_initial_write(key).await - }) - .unwrap(); + let caches = self.caches.as_ref(); + let cached_value = caches.and_then(|caches| caches.initial_writes.get(key)); + + if cached_value.is_none() { + // Write is absent in positive cache, check whether it's present in the negative cache. + let cached_value = caches.and_then(|caches| caches.negative_initial_writes.get(key)); + if let Some(min_l1_batch_for_initial_write) = cached_value { + // We know that this slot was certainly not touched before `min_l1_batch_for_initial_write`. + // Try to use this knowledge to decide if the change is certainly initial. + // This is based on the hypothetical worst-case scenario, in which the key was + // written to at the earliest possible L1 batch (i.e., `min_l1_batch_for_initial_write`). + if !self.write_counts(min_l1_batch_for_initial_write) { + metrics::increment_counter!( + "server.state_cache.effective_values", + "name" => PostgresStorageCaches::NEG_INITIAL_WRITES_NAME + ); + return true; + } + } + } - metrics::histogram!("state.postgres_storage", started_at.elapsed(), "method" => "is_write_initial"); + let l1_batch_number = cached_value.or_else(|| { + let mut dal = self.connection.storage_web3_dal(); + let value = self + .rt_handle + .block_on(dal.get_l1_batch_number_for_initial_write(key)) + .expect("Failed executing `is_write_initial`"); - let contains_key = - initial_write_l1_batch_number.map_or(false, |initial_write_l1_batch_number| { - if self.consider_new_l1_batch { - self.l1_batch_number >= initial_write_l1_batch_number + if let Some(caches) = &self.caches { + if let Some(l1_batch_number) = value { + caches.negative_initial_writes.remove(key); + caches.initial_writes.insert(*key, l1_batch_number); } else { - self.l1_batch_number > initial_write_l1_batch_number + caches + .negative_initial_writes + .insert(*key, self.pending_l1_batch_number); + // The pending L1 batch might have been sealed since its number was requested from Postgres + // in `Self::new()`, so this is a somewhat conservative estimate. } - }); + } + value + }); + metrics::histogram!("state.postgres_storage", started_at.elapsed(), "method" => "is_write_initial"); + + let contains_key = l1_batch_number.map_or(false, |initial_write_l1_batch_number| { + self.write_counts(initial_write_l1_batch_number) + }); !contains_key } fn load_factory_dep(&mut self, hash: H256) -> Option> { let started_at = Instant::now(); let cached_value = self - .factory_deps_cache + .caches .as_ref() - .and_then(|cache| cache.get(&hash)); + .and_then(|caches| caches.factory_deps.get(&hash)); + let result = cached_value.or_else(|| { let mut dal = self.connection.storage_web3_dal(); let value = self .rt_handle - .block_on(async { - metrics::histogram!( - "state.postgres_storage.enter_context", - started_at.elapsed(), - "method" => "load_factory_dep" - ); - dal.get_factory_dep_unchecked(hash, self.block_number).await - }) - .unwrap(); + .block_on(dal.get_factory_dep_unchecked(hash, self.miniblock_number)) + .expect("Failed executing `load_factory_dep`"); - if let Some(cache) = &self.factory_deps_cache { + if let Some(caches) = &self.caches { // If we receive None, we won't cache it. if let Some(dep) = value.clone() { - cache.insert(hash, dep); + caches.factory_deps.insert(hash, dep); } }; @@ -135,28 +489,36 @@ impl ReadStorage for PostgresStorage<'_> { started_at.elapsed(), "method" => "load_factory_dep", ); - result } } #[cfg(test)] mod tests { - use std::collections::HashMap; + use std::{collections::HashMap, mem}; use db_test_macro::db_test; use zksync_dal::ConnectionPool; + use zksync_types::StorageLog; use super::*; use crate::test_utils::{ create_l1_batch, create_miniblock, gen_storage_logs, prepare_postgres, }; - fn test_postgres_storage_basics(pool: &ConnectionPool, rt_handle: Handle) { + fn test_postgres_storage_basics( + pool: &ConnectionPool, + rt_handle: Handle, + cache_initial_writes: bool, + ) { let mut connection = rt_handle.block_on(pool.access_storage()); rt_handle.block_on(prepare_postgres(&mut connection)); let mut storage = PostgresStorage::new(rt_handle, connection, MiniblockNumber(0), true); - assert_eq!(storage.l1_batch_number, L1BatchNumber(0)); + if cache_initial_writes { + let caches = PostgresStorageCaches::new(1_024, 1_024); + storage = storage.with_caches(caches); + } + assert_eq!(storage.l1_batch_number_for_miniblock, L1BatchNumber(0)); let existing_logs = gen_storage_logs(0..20); for log in &existing_logs { @@ -168,6 +530,11 @@ mod tests { assert!(storage.is_write_initial(&log.key)); } + if cache_initial_writes { + let caches = storage.caches.as_ref().unwrap(); + assert!(caches.initial_writes.estimated_len() > 0); + } + // Add a new miniblock to the storage storage.rt_handle.block_on(create_miniblock( &mut storage.connection, @@ -180,13 +547,16 @@ mod tests { assert!(storage.is_write_initial(&log.key)); } + let caches = mem::take(&mut storage.caches); let mut storage = PostgresStorage::new( storage.rt_handle, storage.connection, MiniblockNumber(1), true, ); - assert_eq!(storage.l1_batch_number, L1BatchNumber(1)); + storage.caches = caches; + + assert_eq!(storage.l1_batch_number_for_miniblock, L1BatchNumber(1)); for log in &non_existing_logs { assert!(storage.is_write_initial(&log.key)); } @@ -199,37 +569,46 @@ mod tests { )); // Miniblock #1 should not be seen by the "old" storage + let caches = mem::take(&mut storage.caches); let mut storage = PostgresStorage::new( storage.rt_handle, storage.connection, MiniblockNumber(0), true, ); - assert_eq!(storage.l1_batch_number, L1BatchNumber(0)); + storage.caches = caches; + + assert_eq!(storage.l1_batch_number_for_miniblock, L1BatchNumber(0)); for log in &non_existing_logs { assert!(storage.is_write_initial(&log.key)); } // ...but should be seen by the new one + let caches = mem::take(&mut storage.caches); let mut storage = PostgresStorage::new( storage.rt_handle, storage.connection, MiniblockNumber(1), true, ); - assert_eq!(storage.l1_batch_number, L1BatchNumber(1)); + storage.caches = caches; + + assert_eq!(storage.l1_batch_number_for_miniblock, L1BatchNumber(1)); for log in &non_existing_logs { assert!(!storage.is_write_initial(&log.key)); } // ...except if we set `consider_new_l1_batch` to `false` + let caches = mem::take(&mut storage.caches); let mut storage = PostgresStorage::new( storage.rt_handle, storage.connection, MiniblockNumber(1), false, ); - assert_eq!(storage.l1_batch_number, L1BatchNumber(1)); + storage.caches = caches; + + assert_eq!(storage.l1_batch_number_for_miniblock, L1BatchNumber(1)); for log in &non_existing_logs { assert!(storage.is_write_initial(&log.key)); } @@ -241,7 +620,16 @@ mod tests { #[db_test] async fn postgres_storage_basics(pool: ConnectionPool) { tokio::task::spawn_blocking(move || { - test_postgres_storage_basics(&pool, Handle::current()); + test_postgres_storage_basics(&pool, Handle::current(), false); + }) + .await + .unwrap(); + } + + #[db_test] + async fn postgres_storage_with_initial_writes_cache(pool: ConnectionPool) { + tokio::task::spawn_blocking(move || { + test_postgres_storage_basics(&pool, Handle::current(), true); }) .await .unwrap(); @@ -268,7 +656,7 @@ mod tests { MiniblockNumber(1), consider_new_l1_batch, ); - assert_eq!(storage.l1_batch_number, L1BatchNumber(1)); + assert_eq!(storage.l1_batch_number_for_miniblock, L1BatchNumber(1)); storage.rt_handle.block_on(create_l1_batch( &mut storage.connection, @@ -287,7 +675,7 @@ mod tests { MiniblockNumber(1), consider_new_l1_batch, ); - assert_eq!(storage.l1_batch_number, L1BatchNumber(1)); + assert_eq!(storage.l1_batch_number_for_miniblock, L1BatchNumber(1)); for log in &new_logs { assert_eq!(storage.is_write_initial(&log.key), !consider_new_l1_batch); } @@ -305,64 +693,248 @@ mod tests { .unwrap(); } - fn test_postgres_storage_factory_deps_cache( - pool: &ConnectionPool, - rt_handle: &Handle, - consider_new_l1_batch: bool, - ) { + fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { let mut connection = rt_handle.block_on(pool.access_storage()); rt_handle.block_on(prepare_postgres(&mut connection)); - let cache = FactoryDepsCache::new("test_factory_deps_cache", 128); - let mut storage = PostgresStorage::new( - rt_handle.clone(), - connection, - MiniblockNumber(1), - consider_new_l1_batch, - ) - .with_factory_deps_cache(cache.clone()); + + let caches = PostgresStorageCaches::new(128 * 1_024 * 1_024, 1_024); + let mut storage = PostgresStorage::new(rt_handle, connection, MiniblockNumber(1), true) + .with_caches(caches.clone()); let zero_addr = H256::zero(); // try load a non-existent contract let dep = storage.load_factory_dep(zero_addr); assert_eq!(dep, None); - assert_eq!(cache.get(&zero_addr.clone()), None); - drop(storage); // Drop the storage to free the connection. + assert_eq!(caches.factory_deps.get(&zero_addr), None); - // Prepare the new connection. - let mut connection = rt_handle.block_on(pool.access_storage()); // insert the contracts let mut contracts = HashMap::new(); contracts.insert(H256::zero(), vec![1, 2, 3]); - rt_handle.block_on( - connection + storage.rt_handle.block_on( + storage + .connection .storage_dal() .insert_factory_deps(MiniblockNumber(0), &contracts), ); + // Create the storage that should have the cache filled. let mut storage = PostgresStorage::new( - rt_handle.clone(), - connection, + storage.rt_handle, + storage.connection, MiniblockNumber(1), - consider_new_l1_batch, + true, ) - .with_factory_deps_cache(cache.clone()); + .with_caches(caches.clone()); - // fill the cache + // Fill the cache let dep = storage.load_factory_dep(zero_addr); - assert_eq!(dep, Some(vec![1, 2, 3])); - assert_eq!(cache.get(&zero_addr.clone()), Some(vec![1, 2, 3])); + assert_eq!(caches.factory_deps.get(&zero_addr), Some(vec![1, 2, 3])); } #[db_test] - async fn postgres_storage_factory_deps_cache(pool: ConnectionPool) { + async fn using_factory_deps_cache(pool: ConnectionPool) { let handle = Handle::current(); - tokio::task::spawn_blocking(move || { - println!("Testing FactoryDepsCache integration"); - test_postgres_storage_factory_deps_cache(&pool, &handle, true); - }) - .await - .unwrap(); + tokio::task::spawn_blocking(move || test_factory_deps_cache(&pool, handle)) + .await + .unwrap(); + } + + fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { + let connection = rt_handle.block_on(pool.access_storage()); + let caches = PostgresStorageCaches::new(1_024, 4 * 1_024 * 1_024); + let mut storage = PostgresStorage::new(rt_handle, connection, MiniblockNumber(0), false) + .with_caches(caches.clone()); + assert_eq!(storage.pending_l1_batch_number, L1BatchNumber(0)); + + storage + .rt_handle + .block_on(prepare_postgres(&mut storage.connection)); + + let mut logs = gen_storage_logs(100..120); + let non_existing_key = logs[19].key; + logs.truncate(10); + + assert!(storage.is_write_initial(&logs[0].key)); + assert!(storage.is_write_initial(&non_existing_key)); + assert_eq!( + caches.negative_initial_writes.get(&logs[0].key), + Some(L1BatchNumber(0)) + ); + assert_eq!( + caches.negative_initial_writes.get(&non_existing_key), + Some(L1BatchNumber(0)) + ); + assert!(storage.is_write_initial(&logs[0].key)); + assert!(storage.is_write_initial(&non_existing_key)); + + storage.rt_handle.block_on(create_miniblock( + &mut storage.connection, + MiniblockNumber(1), + logs.clone(), + )); + storage.rt_handle.block_on(create_l1_batch( + &mut storage.connection, + L1BatchNumber(1), + &logs, + )); + + let mut storage = PostgresStorage::new( + storage.rt_handle, + storage.connection, + MiniblockNumber(1), + false, + ) + .with_caches(caches.clone()); + + assert!(storage.is_write_initial(&logs[0].key)); + // ^ Since we don't consider the latest L1 batch + assert!(storage.is_write_initial(&non_existing_key)); + + // Check that the cache entries have been updated + assert_eq!( + caches.initial_writes.get(&logs[0].key), + Some(L1BatchNumber(1)) + ); + assert_eq!(caches.negative_initial_writes.get(&logs[0].key), None); + assert_eq!( + caches.negative_initial_writes.get(&non_existing_key), + Some(L1BatchNumber(2)) + ); + assert!(storage.is_write_initial(&logs[0].key)); + assert!(storage.is_write_initial(&non_existing_key)); + + let mut storage = PostgresStorage::new( + storage.rt_handle, + storage.connection, + MiniblockNumber(1), + true, + ) + .with_caches(caches.clone()); + assert!(!storage.is_write_initial(&logs[0].key)); + assert!(storage.is_write_initial(&non_existing_key)); + + // Check that the cache entries are still as expected. + assert_eq!( + caches.initial_writes.get(&logs[0].key), + Some(L1BatchNumber(1)) + ); + assert_eq!( + caches.negative_initial_writes.get(&non_existing_key), + Some(L1BatchNumber(2)) + ); + + let mut storage = PostgresStorage::new( + storage.rt_handle, + storage.connection, + MiniblockNumber(2), + false, + ) + .with_caches(caches); + + // Check that the cached value has been used + assert!(!storage.is_write_initial(&logs[0].key)); + assert!(storage.is_write_initial(&non_existing_key)); + } + + #[db_test] + async fn using_initial_writes_cache(pool: ConnectionPool) { + let handle = Handle::current(); + tokio::task::spawn_blocking(move || test_initial_writes_cache(&pool, handle)) + .await + .unwrap(); + } + + fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { + let mut caches = PostgresStorageCaches::new(1_024, 1_024); + let _ = + caches.configure_storage_values_cache(1_024 * 1_024, pool.clone(), rt_handle.clone()); + // We cannot use an update task since it requires having concurrent DB connections + // that don't work in tests. We'll update values cache manually instead. + let values_cache = caches.values.as_ref().unwrap().cache.clone(); + + let mut connection = rt_handle.block_on(pool.access_storage()); + rt_handle.block_on(prepare_postgres(&mut connection)); + + let mut storage = PostgresStorage::new(rt_handle, connection, MiniblockNumber(0), false) + .with_caches(caches.clone()); + let existing_key = gen_storage_logs(0..20)[1].key; + let value = storage.read_value(&existing_key); + assert!(!value.is_zero()); + + // Check that the value is now cached. + let cached_value = values_cache.get(MiniblockNumber(0), &existing_key).unwrap(); + assert_eq!(cached_value, value); + + let non_existing_key = gen_storage_logs(100..120)[0].key; + let value = storage.read_value(&non_existing_key); + assert_eq!(value, StorageValue::zero()); + + let cached_value = values_cache + .get(MiniblockNumber(0), &non_existing_key) + .unwrap(); + assert_eq!(cached_value, StorageValue::zero()); + + let logs = vec![ + StorageLog::new_write_log(existing_key, H256::repeat_byte(1)), + StorageLog::new_write_log(non_existing_key, H256::repeat_byte(2)), + ]; + storage.rt_handle.block_on(create_miniblock( + &mut storage.connection, + MiniblockNumber(1), + logs, + )); + + let mut storage = PostgresStorage::new( + storage.rt_handle, + storage.connection, + MiniblockNumber(1), + true, + ) + .with_caches(caches); + + // Cached values should not be updated so far, and they should not be used + assert_eq!(storage.read_value(&existing_key), H256::repeat_byte(1)); + assert_eq!(storage.read_value(&non_existing_key), H256::repeat_byte(2)); + + assert!(values_cache + .get(MiniblockNumber(1), &existing_key) + .is_none()); + let cached_value = values_cache.get(MiniblockNumber(0), &existing_key).unwrap(); + assert_ne!(cached_value, H256::repeat_byte(1)); + assert!(values_cache + .get(MiniblockNumber(1), &non_existing_key) + .is_none()); + let cached_value = values_cache + .get(MiniblockNumber(0), &non_existing_key) + .unwrap(); + assert_ne!(cached_value, H256::repeat_byte(2)); + + values_cache.update( + MiniblockNumber(0), + MiniblockNumber(1), + &storage.rt_handle, + &mut storage.connection, + ); + assert_eq!(values_cache.0.read().unwrap().valid_for, MiniblockNumber(1)); + + assert_eq!(storage.read_value(&existing_key), H256::repeat_byte(1)); + assert_eq!(storage.read_value(&non_existing_key), H256::repeat_byte(2)); + // Check that the values are now cached. + let cached_value = values_cache.get(MiniblockNumber(1), &existing_key).unwrap(); + assert_eq!(cached_value, H256::repeat_byte(1)); + let cached_value = values_cache + .get(MiniblockNumber(1), &non_existing_key) + .unwrap(); + assert_eq!(cached_value, H256::repeat_byte(2)); + } + + #[db_test] + async fn using_values_cache(pool: ConnectionPool) { + let handle = Handle::current(); + tokio::task::spawn_blocking(move || test_values_cache(&pool, handle)) + .await + .unwrap(); } } diff --git a/core/lib/state/src/rocksdb.rs b/core/lib/state/src/rocksdb.rs index 2212b8b291e7..09bdfa6a2f26 100644 --- a/core/lib/state/src/rocksdb.rs +++ b/core/lib/state/src/rocksdb.rs @@ -78,7 +78,7 @@ impl RocksdbStorage { /// in Postgres. pub async fn update_from_postgres(&mut self, conn: &mut StorageProcessor<'_>) { let stage_started_at: Instant = Instant::now(); - let latest_l1_batch_number = conn.blocks_dal().get_sealed_block_number().await; + let latest_l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await; vlog::debug!( "loading storage for l1 batch number {}", latest_l1_batch_number.0 diff --git a/core/lib/state/src/shadow_storage.rs b/core/lib/state/src/shadow_storage.rs new file mode 100644 index 000000000000..73f59beb6406 --- /dev/null +++ b/core/lib/state/src/shadow_storage.rs @@ -0,0 +1,66 @@ +use crate::ReadStorage; +use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; + +/// [`ReadStorage`] implementation backed by 2 different backends: +/// source_storage -- backend that will return values for function calls and be the source of truth +/// to_check_storage -- secondary storage, which will verify it's own return values against source_storage +/// Note that if to_check_storage value is different than source value, execution continues and metrics/ logs are emitted. +#[derive(Debug)] +pub struct ShadowStorage<'a, 'b> { + source_storage: Box, + to_check_storage: Box, + l1_batch_number: L1BatchNumber, +} + +impl<'a, 'b> ShadowStorage<'a, 'b> { + /// Creates a new storage using the 2 underlying [`ReadStorage`]s, first as source, the second to be checked against the source. + pub fn new( + source_storage: Box, + to_check_storage: Box, + l1_batch_number: L1BatchNumber, + ) -> Self { + Self { + source_storage, + to_check_storage, + l1_batch_number, + } + } +} + +impl ReadStorage for ShadowStorage<'_, '_> { + fn read_value(&mut self, &key: &StorageKey) -> StorageValue { + let source_value = self.source_storage.read_value(&key); + let expected_value = self.to_check_storage.read_value(&key); + let mut metric_value = 0.0; + if source_value != expected_value { + metric_value = 1.0; + vlog::error!("read_value({:?}) -- l1_batch_number={:?} -- expected source={:?} to be equal to to_check={:?}", key, self.l1_batch_number, source_value, expected_value); + } + metrics::histogram!("shadow_storage.read_value_mismatch", metric_value); + source_value + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let source_value = self.source_storage.is_write_initial(key); + let expected_value = self.to_check_storage.is_write_initial(key); + let mut metric_value = 0.0; + if source_value != expected_value { + metric_value = 1.0; + vlog::error!("is_write_initial({:?}) -- l1_batch_number={:?} -- expected source={:?} to be equal to to_check={:?}", key, self.l1_batch_number, source_value, expected_value); + } + metrics::histogram!("shadow_storage.is_write_initial_mismatch", metric_value); + source_value + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + let source_value = self.source_storage.load_factory_dep(hash); + let expected_value = self.to_check_storage.load_factory_dep(hash); + let mut metric_value = 0.0; + if source_value != expected_value { + metric_value = 1.0; + vlog::error!("load_factory_dep({:?}) -- l1_batch_number={:?} -- expected source={:?} to be equal to to_check={:?}", hash, self.l1_batch_number, source_value, expected_value); + } + metrics::histogram!("shadow_storage.load_factory_dep_mismatch", metric_value); + source_value + } +} diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 4e21f57b18bf..9e25cc51bbac 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -3,14 +3,17 @@ use zksync_dal::StorageProcessor; use zksync_types::{ block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, - zk_evm::aux_structures::{LogQuery, Timestamp}, - AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, H256, U256, + AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersion, StorageKey, + StorageLog, H256, }; use std::ops; pub(crate) async fn prepare_postgres(conn: &mut StorageProcessor<'_>) { if conn.blocks_dal().is_genesis_needed().await { + conn.protocol_versions_dal() + .save_protocol_version(ProtocolVersion::default()) + .await; // The created genesis block is likely to be invalid, but since it's not committed, // we don't really care. let genesis_storage_logs = gen_storage_logs(0..20); @@ -70,6 +73,7 @@ pub(crate) async fn create_miniblock( l1_gas_price: 0, l2_fair_gas_price: 0, base_system_contracts_hashes: Default::default(), + protocol_version: Some(Default::default()), }; conn.blocks_dal().insert_miniblock(&miniblock_header).await; @@ -85,36 +89,24 @@ pub(crate) async fn create_l1_batch( l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { - let mut header = L1BatchHeader::new(l1_batch_number, 0, Address::default(), Default::default()); + let mut header = L1BatchHeader::new( + l1_batch_number, + 0, + Address::default(), + Default::default(), + Default::default(), + ); header.is_finished = true; conn.blocks_dal() - .insert_l1_batch(&header, BlockGasCount::default()) + .insert_l1_batch(&header, &[], BlockGasCount::default()) .await; conn.blocks_dal() .mark_miniblocks_as_executed_in_l1_batch(l1_batch_number) .await; - let log_queries: Vec<_> = logs_for_initial_writes - .iter() - .map(write_log_to_query) - .collect(); + let mut written_keys: Vec<_> = logs_for_initial_writes.iter().map(|log| log.key).collect(); + written_keys.sort_unstable(); conn.storage_logs_dedup_dal() - .insert_initial_writes(l1_batch_number, &log_queries) + .insert_initial_writes(l1_batch_number, &written_keys) .await; } - -fn write_log_to_query(log: &StorageLog) -> LogQuery { - LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: 0, - aux_byte: 0, - shard_id: 0, - address: *log.key.address(), - key: U256::from_big_endian(log.key.key().as_bytes()), - read_value: U256::zero(), - written_value: U256::from_big_endian(log.value.as_bytes()), - rw_flag: true, - rollback: false, - is_service: false, - } -} diff --git a/core/lib/storage/Cargo.toml b/core/lib/storage/Cargo.toml index 9c09c2db3827..6f0faebc2b6e 100644 --- a/core/lib/storage/Cargo.toml +++ b/core/lib/storage/Cargo.toml @@ -14,7 +14,7 @@ vlog = { path = "../../lib/vlog", version = "1.0" } metrics = "0.20" num_cpus = "1.13" -rocksdb = { version = "0.18.0", default-features = false, features = ["snappy"] } +rocksdb = { version = "0.21.0", default-features = false, features = ["snappy"] } [dev-dependencies] tempfile = "3.0.2" diff --git a/core/lib/storage/src/db.rs b/core/lib/storage/src/db.rs index 2c9b202ce399..5f34e8afb45f 100644 --- a/core/lib/storage/src/db.rs +++ b/core/lib/storage/src/db.rs @@ -1,15 +1,17 @@ use rocksdb::{ - properties, BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, DBIterator, IteratorMode, - Options, ReadOptions, WriteOptions, DB, + properties, BlockBasedOptions, Cache, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, + IteratorMode, Options, PrefixRange, ReadOptions, WriteOptions, DB, }; -use std::collections::HashSet; -use std::fmt; -use std::marker::PhantomData; -use std::ops; -use std::path::Path; -use std::sync::{Condvar, Mutex, MutexGuard, PoisonError}; -use std::time::{Duration, Instant}; +use std::{ + collections::HashSet, + fmt, + marker::PhantomData, + ops, + path::Path, + sync::{Condvar, Mutex, MutexGuard, PoisonError}, + time::{Duration, Instant}, +}; use crate::metrics::{describe_metrics, RocksDBSizeStats, WriteMetrics}; @@ -60,6 +62,26 @@ impl WriteBatch<'_, CF> { } } +struct RocksDBCaches { + /// LRU block cache shared among all column families. + shared: Option, +} + +impl fmt::Debug for RocksDBCaches { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("RocksDBCaches") + .finish_non_exhaustive() + } +} + +impl RocksDBCaches { + fn new(capacity: Option) -> Self { + let shared = capacity.map(Cache::new_lru_cache); + Self { shared } + } +} + /// Thin wrapper around a RocksDB instance. #[derive(Debug)] pub struct RocksDB { @@ -68,15 +90,27 @@ pub struct RocksDB { sizes_reported_at: Mutex>, _registry_entry: RegistryEntry, _cf: PhantomData, + // Importantly, `Cache`s must be dropped after `DB`, so we place them as the last field + // (fields in a struct are dropped in the declaration order). + _caches: RocksDBCaches, } impl RocksDB { const SIZE_REPORT_INTERVAL: Duration = Duration::from_secs(1); pub fn new>(path: P, tune_options: bool) -> Self { + Self::with_cache(path, tune_options, None) + } + + pub fn with_cache>( + path: P, + tune_options: bool, + block_cache_capacity: Option, + ) -> Self { describe_metrics(); - let options = Self::rocksdb_options(tune_options); + let caches = RocksDBCaches::new(block_cache_capacity); + let options = Self::rocksdb_options(tune_options, None); let existing_cfs = DB::list_cf(&options, path.as_ref()).unwrap_or_else(|err| { vlog::warn!( "Failed getting column families for RocksDB `{}` at `{}`, assuming CFs are empty; {err}", @@ -109,7 +143,15 @@ impl RocksDB { // Open obsolete CFs as well; RocksDB initialization will panic otherwise. let cfs = cf_names.into_iter().chain(obsolete_cfs).map(|cf_name| { - ColumnFamilyDescriptor::new(cf_name, Self::rocksdb_options(tune_options)) + let mut block_based_options = BlockBasedOptions::default(); + if tune_options { + block_based_options.set_bloom_filter(10.0, false); + } + if let Some(cache) = &caches.shared { + block_based_options.set_block_cache(cache); + } + let cf_options = Self::rocksdb_options(tune_options, Some(block_based_options)); + ColumnFamilyDescriptor::new(cf_name, cf_options) }); let db = DB::open_cf_descriptors(&options, path, cfs).expect("failed to init rocksdb"); @@ -119,6 +161,7 @@ impl RocksDB { sizes_reported_at: Mutex::new(None), _registry_entry: RegistryEntry::new(), _cf: PhantomData, + _caches: caches, } } @@ -130,14 +173,17 @@ impl RocksDB { self } - fn rocksdb_options(tune_options: bool) -> Options { + fn rocksdb_options( + tune_options: bool, + block_based_options: Option, + ) -> Options { let mut options = Options::default(); options.create_missing_column_families(true); options.create_if_missing(true); if tune_options { options.increase_parallelism(num_cpus::get() as i32); - let mut block_based_options = BlockBasedOptions::default(); - block_based_options.set_bloom_filter(10.0, false); + } + if let Some(block_based_options) = block_based_options { options.set_block_based_table_factory(&block_based_options); } options @@ -169,11 +215,21 @@ impl RocksDB { .db .property_int_value_cf(cf, properties::SIZE_ALL_MEM_TABLES) .expect(ERROR_MSG)?; + let block_cache_size = self + .db + .property_int_value_cf(cf, properties::BLOCK_CACHE_USAGE) + .expect(ERROR_MSG)?; + let index_and_filters_size = self + .db + .property_int_value_cf(cf, properties::ESTIMATE_TABLE_READERS_MEM) + .expect(ERROR_MSG)?; Some(RocksDBSizeStats { estimated_live_data_size, total_sst_file_size, total_mem_table_size, + block_cache_size, + index_and_filters_size, }) } @@ -185,18 +241,13 @@ impl RocksDB { self.db.multi_get(keys) } - pub fn multi_get_cf( + pub fn multi_get_cf( &self, cf: CF, - keys: I, - ) -> Vec>, rocksdb::Error>> - where - K: AsRef<[u8]>, - I: IntoIterator, - { + keys: impl Iterator>, + ) -> Vec>, rocksdb::Error>> { let cf = self.column_family(cf); - let keys = keys.into_iter().map(|key| (cf, key)); - self.db.multi_get_cf(keys) + self.db.batched_multi_get_cf(cf, keys, false) } pub fn new_write_batch(&self) -> WriteBatch<'_, CF> { @@ -257,19 +308,29 @@ impl RocksDB { /// Iterates over key-value pairs in the specified column family `cf` in the lexical /// key order. The keys are filtered so that they start from the specified `prefix`. - pub fn prefix_iterator_cf(&self, cf: CF, prefix: &[u8]) -> DBIterator<'_> { + pub fn prefix_iterator_cf( + &self, + cf: CF, + prefix: &[u8], + ) -> impl Iterator, Box<[u8]>)> + '_ { let cf = self.column_family(cf); let mut options = ReadOptions::default(); - options.set_iterate_lower_bound(prefix); - if let Some(next_prefix) = next_prefix(prefix) { - options.set_iterate_upper_bound(next_prefix); - } - self.db.iterator_cf_opt(cf, options, IteratorMode::Start) + options.set_iterate_range(PrefixRange(prefix)); + self.db + .iterator_cf_opt(cf, options, IteratorMode::Start) + .map(Result::unwrap) + .fuse() + // ^ The rocksdb docs say that a raw iterator (which is used by the returned ordinary iterator) + // can become invalid "when it reaches the end of its defined range, or when it encounters an error." + // We panic on RocksDB errors elsewhere and fuse it to prevent polling after the end of the range. + // Thus, `unwrap()` should be safe. } } impl RocksDB<()> { /// Awaits termination of all running rocksdb instances. + /// + /// This method is blocking and should be wrapped in `spawn_blocking(_)` if run in the async context. pub fn await_rocksdb_termination() { let (lock, cvar) = &ROCKSDB_INSTANCE_COUNTER; let mut num_instances = lock.lock().unwrap(); @@ -313,35 +374,12 @@ impl Drop for RegistryEntry { } } -fn next_prefix(prefix: &[u8]) -> Option> { - let non_max_byte_idx = prefix - .iter() - .enumerate() - .rev() - .find_map(|(idx, &byte)| (byte != u8::MAX).then_some(idx))?; - // ^ If the prefix contains only `0xff` bytes, there is no larger prefix. - let mut next_prefix = prefix[..=non_max_byte_idx].to_vec(); - *next_prefix.last_mut().unwrap() += 1; - Some(next_prefix) -} - #[cfg(test)] mod tests { use tempfile::TempDir; use super::*; - #[test] - fn next_prefix_is_correct() { - assert_eq!(next_prefix(&[]), None); - assert_eq!(next_prefix(&[0xff]), None); - assert_eq!(next_prefix(&[0xff; 5]), None); - - assert_eq!(next_prefix(&[0]).unwrap(), [1]); - assert_eq!(next_prefix(&[0, 1, 2, 3]).unwrap(), [0, 1, 2, 4]); - assert_eq!(next_prefix(&[0, 1, 2, 0xff, 0xff]).unwrap(), [0, 1, 3]); - } - #[derive(Debug, Clone, Copy)] enum OldColumnFamilies { Default, diff --git a/core/lib/storage/src/metrics.rs b/core/lib/storage/src/metrics.rs index 265491fac247..ba7ae95f78f4 100644 --- a/core/lib/storage/src/metrics.rs +++ b/core/lib/storage/src/metrics.rs @@ -54,12 +54,18 @@ pub(crate) struct RocksDBSizeStats { pub total_sst_file_size: u64, /// Total size of all memtables in bytes. pub total_mem_table_size: u64, + /// Total size of block cache. + pub block_cache_size: u64, + /// Total size of index and Bloom filter blocks. + pub index_and_filters_size: u64, } impl RocksDBSizeStats { const ESTIMATED_LIVE_DATA_SIZE: &'static str = "rocksdb.live_data_size"; const TOTAL_SST_FILE_SIZE: &'static str = "rocksdb.total_sst_size"; const TOTAL_MEM_TABLE_SIZE: &'static str = "rocksdb.total_mem_table_size"; + const BLOCK_CACHE_SIZE: &'static str = "rocksdb.block_cache_size"; + const INDEX_AND_FILTERS_SIZE: &'static str = "rocksdb.index_and_filters_size"; fn describe() { metrics::describe_gauge!( @@ -77,6 +83,16 @@ impl RocksDBSizeStats { Unit::Bytes, "Total size of all mem tables in the column family of a RocksDB instance" ); + metrics::describe_gauge!( + Self::BLOCK_CACHE_SIZE, + Unit::Bytes, + "Total size of block cache in the column family of a RocksDB instance" + ); + metrics::describe_gauge!( + Self::INDEX_AND_FILTERS_SIZE, + Unit::Bytes, + "Total size of index and Bloom filters in the column family of a RocksDB instance" + ); } pub fn report(self, db_name: &'static str, cf_name: &'static str) { @@ -98,5 +114,17 @@ impl RocksDBSizeStats { "db" => db_name, "cf" => cf_name ); + metrics::gauge!( + Self::BLOCK_CACHE_SIZE, + self.block_cache_size as f64, + "db" => db_name, + "cf" => cf_name + ); + metrics::gauge!( + Self::INDEX_AND_FILTERS_SIZE, + self.index_and_filters_size as f64, + "db" => db_name, + "cf" => cf_name + ); } } diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 920df4e667b7..7fb7fa5e331e 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -15,14 +15,12 @@ zksync_config = { path = "../config", version = "1.0" } zksync_utils = { path = "../utils", version = "1.0" } zksync_basic_types = { path = "../basic_types", version = "1.0" } zksync_contracts = { path = "../contracts", version = "1.0" } -zksync_mini_merkle_tree = { path = "../mini_merkle_tree", version = "1.0"} +zksync_mini_merkle_tree = { path = "../mini_merkle_tree", version = "1.0" } # We need this import because we wanat DAL to be responsible for (de)serialization codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } -zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.3"} -zkevm-assembly = { git = "https://github.com/matter-labs/era-zkEVM-assembly.git", branch = "v1.3.2" } +zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.3" } -bigdecimal = { version = "=0.2.0", features = ["serde"] } chrono = { version = "0.4", features = ["serde", "rustc-serialize"] } metrics = "0.20" num = { version = "0.3.1", features = ["serde"] } @@ -30,16 +28,20 @@ once_cell = "1.7" rlp = "0.5" serde = "1.0.90" serde_json = "1.0.0" -serde_with = "1" +serde_with = { version = "1", features = ["base64"] } strum = { version = "0.24", features = ["derive"] } thiserror = "1.0" +num_enum = "0.6" # Crypto stuff parity-crypto = { version = "0.9", features = ["publickey"] } blake2 = "0.10" +# `ethereum-types` version used in `parity-crypto` +ethereum_types_old = { package = "ethereum-types", version = "0.12.0" } + [dev-dependencies] hex = "0.4" -secp256k1 = {version = "0.21", features = ["recovery"] } +secp256k1 = { version = "0.27", features = ["recovery"] } tokio = { version = "1", features = ["rt", "macros"] } serde_with = { version = "1", features = ["hex"] } diff --git a/core/lib/types/src/aggregated_operations.rs b/core/lib/types/src/aggregated_operations.rs index 9acdaef13317..18e2825b4cb7 100644 --- a/core/lib/types/src/aggregated_operations.rs +++ b/core/lib/types/src/aggregated_operations.rs @@ -1,71 +1,97 @@ -use crate::commitment::BlockWithMetadata; -use crate::U256; use codegen::serialize_proof; -use serde::{Deserialize, Serialize}; + +use std::{fmt, ops, str::FromStr}; + use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; use zkevm_test_harness::bellman::bn256::Bn256; use zkevm_test_harness::bellman::plonk::better_better_cs::proof::Proof; use zkevm_test_harness::witness::oracle::VmWitnessOracle; use zksync_basic_types::{ethabi::Token, L1BatchNumber}; -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct BlocksCommitOperation { - pub last_committed_block: BlockWithMetadata, - pub blocks: Vec, +use crate::{commitment::L1BatchWithMetadata, U256}; + +fn l1_batch_range_from_batches( + batches: &[L1BatchWithMetadata], +) -> ops::RangeInclusive { + let start = batches + .first() + .map(|l1_batch| l1_batch.header.number) + .unwrap_or_default(); + let end = batches + .last() + .map(|l1_batch| l1_batch.header.number) + .unwrap_or_default(); + start..=end +} + +#[derive(Debug, Clone)] +pub struct L1BatchCommitOperation { + pub last_committed_l1_batch: L1BatchWithMetadata, + pub l1_batches: Vec, } -impl BlocksCommitOperation { +impl L1BatchCommitOperation { pub fn get_eth_tx_args(&self) -> Vec { - let stored_block_info = self.last_committed_block.l1_header_data(); - let blocks_to_commit = self - .blocks + let stored_batch_info = self.last_committed_l1_batch.l1_header_data(); + let l1_batches_to_commit = self + .l1_batches .iter() - .map(|block| block.l1_commit_data()) + .map(L1BatchWithMetadata::l1_commit_data) .collect(); - vec![stored_block_info, Token::Array(blocks_to_commit)] + vec![stored_batch_info, Token::Array(l1_batches_to_commit)] } - pub fn block_range(&self) -> (L1BatchNumber, L1BatchNumber) { - let BlocksCommitOperation { blocks, .. } = self; - ( - blocks.first().map(|b| b.header.number).unwrap_or_default(), - blocks.last().map(|b| b.header.number).unwrap_or_default(), - ) + pub fn l1_batch_range(&self) -> ops::RangeInclusive { + l1_batch_range_from_batches(&self.l1_batches) } } -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct BlocksCreateProofOperation { - pub blocks: Vec, +#[derive(Debug, Clone)] +pub struct L1BatchCreateProofOperation { + pub l1_batches: Vec, pub proofs_to_pad: usize, } #[derive(Clone)] -pub struct BlockProofForL1 { +pub struct L1BatchProofForL1 { pub aggregation_result_coords: [[u8; 32]; 4], pub scheduler_proof: Proof>>, } -#[derive(Clone)] -pub struct BlocksProofOperation { - pub prev_block: BlockWithMetadata, - pub blocks: Vec, - pub proofs: Vec, +impl fmt::Debug for L1BatchProofForL1 { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("L1BatchProofForL1") + .field("aggregation_result_coords", &self.aggregation_result_coords) + .finish_non_exhaustive() + } +} + +#[derive(Debug, Clone)] +pub struct L1BatchProofOperation { + pub prev_l1_batch: L1BatchWithMetadata, + pub l1_batches: Vec, + pub proofs: Vec, pub should_verify: bool, } -impl BlocksProofOperation { +impl L1BatchProofOperation { pub fn get_eth_tx_args(&self) -> Vec { - let prev_block = self.prev_block.l1_header_data(); - let blocks_arg = Token::Array(self.blocks.iter().map(|b| b.l1_header_data()).collect()); + let prev_l1_batch = self.prev_l1_batch.l1_header_data(); + let batches_arg = self + .l1_batches + .iter() + .map(L1BatchWithMetadata::l1_header_data) + .collect(); + let batches_arg = Token::Array(batches_arg); if self.should_verify { // currently we only support submitting a single proof assert_eq!(self.proofs.len(), 1); - assert_eq!(self.blocks.len(), 1); + assert_eq!(self.l1_batches.len(), 1); - let BlockProofForL1 { + let L1BatchProofForL1 { aggregation_result_coords, scheduler_proof, } = self.proofs.first().unwrap(); @@ -82,150 +108,111 @@ impl BlocksProofOperation { Token::Array(proof.into_iter().map(Token::Uint).collect()), ]); - vec![prev_block, blocks_arg, proof_input] + vec![prev_l1_batch, batches_arg, proof_input] } else { vec![ - prev_block, - blocks_arg, + prev_l1_batch, + batches_arg, Token::Tuple(vec![Token::Array(vec![]), Token::Array(vec![])]), ] } } - pub fn block_range(&self) -> (L1BatchNumber, L1BatchNumber) { - let BlocksProofOperation { blocks, .. } = self; - ( - blocks.first().map(|c| c.header.number).unwrap_or_default(), - blocks.last().map(|c| c.header.number).unwrap_or_default(), - ) + pub fn l1_batch_range(&self) -> ops::RangeInclusive { + l1_batch_range_from_batches(&self.l1_batches) } } -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct BlocksExecuteOperation { - pub blocks: Vec, +#[derive(Debug, Clone)] +pub struct L1BatchExecuteOperation { + pub l1_batches: Vec, } -impl BlocksExecuteOperation { - fn get_eth_tx_args_for_block(block: &BlockWithMetadata) -> Token { - block.l1_header_data() - } - +impl L1BatchExecuteOperation { pub fn get_eth_tx_args(&self) -> Vec { vec![Token::Array( - self.blocks + self.l1_batches .iter() - .map(BlocksExecuteOperation::get_eth_tx_args_for_block) + .map(L1BatchWithMetadata::l1_header_data) .collect(), )] } - pub fn block_range(&self) -> (L1BatchNumber, L1BatchNumber) { - let BlocksExecuteOperation { blocks } = self; - ( - blocks.first().map(|b| b.header.number).unwrap_or_default(), - blocks.last().map(|b| b.header.number).unwrap_or_default(), - ) + pub fn l1_batch_range(&self) -> ops::RangeInclusive { + l1_batch_range_from_batches(&self.l1_batches) } } -#[derive(Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum AggregatedActionType { - CommitBlocks, - PublishProofBlocksOnchain, - ExecuteBlocks, + Commit, + PublishProofOnchain, + Execute, } -impl std::string::ToString for AggregatedActionType { - fn to_string(&self) -> String { +impl AggregatedActionType { + pub fn as_str(self) -> &'static str { + // "Blocks" suffixes are there for legacy reasons match self { - AggregatedActionType::CommitBlocks => "CommitBlocks".to_owned(), - AggregatedActionType::PublishProofBlocksOnchain => { - "PublishProofBlocksOnchain".to_owned() - } - AggregatedActionType::ExecuteBlocks => "ExecuteBlocks".to_owned(), + Self::Commit => "CommitBlocks", + Self::PublishProofOnchain => "PublishProofBlocksOnchain", + Self::Execute => "ExecuteBlocks", } } } -impl std::str::FromStr for AggregatedActionType { - type Err = String; +impl fmt::Display for AggregatedActionType { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str(self.as_str()) + } +} + +impl FromStr for AggregatedActionType { + type Err = &'static str; fn from_str(s: &str) -> Result { match s { - "CommitBlocks" => Ok(Self::CommitBlocks), - "PublishProofBlocksOnchain" => Ok(Self::PublishProofBlocksOnchain), - "ExecuteBlocks" => Ok(Self::ExecuteBlocks), - _ => Err("Incorrect aggregated action type".to_owned()), + "CommitBlocks" => Ok(Self::Commit), + "PublishProofBlocksOnchain" => Ok(Self::PublishProofOnchain), + "ExecuteBlocks" => Ok(Self::Execute), + _ => Err( + "Incorrect aggregated action type; expected one of `CommitBlocks`, `PublishProofBlocksOnchain`, \ + `ExecuteBlocks`", + ), } } } #[allow(clippy::large_enum_variant)] -#[derive(Clone)] +#[derive(Debug, Clone)] pub enum AggregatedOperation { - CommitBlocks(BlocksCommitOperation), - PublishProofBlocksOnchain(BlocksProofOperation), - ExecuteBlocks(BlocksExecuteOperation), + Commit(L1BatchCommitOperation), + PublishProofOnchain(L1BatchProofOperation), + Execute(L1BatchExecuteOperation), } impl AggregatedOperation { pub fn get_action_type(&self) -> AggregatedActionType { match self { - AggregatedOperation::CommitBlocks(..) => AggregatedActionType::CommitBlocks, - AggregatedOperation::PublishProofBlocksOnchain(..) => { - AggregatedActionType::PublishProofBlocksOnchain - } - AggregatedOperation::ExecuteBlocks(..) => AggregatedActionType::ExecuteBlocks, + Self::Commit(_) => AggregatedActionType::Commit, + Self::PublishProofOnchain(_) => AggregatedActionType::PublishProofOnchain, + Self::Execute(_) => AggregatedActionType::Execute, } } - pub fn get_block_range(&self) -> (L1BatchNumber, L1BatchNumber) { + pub fn l1_batch_range(&self) -> ops::RangeInclusive { match self { - AggregatedOperation::CommitBlocks(op) => op.block_range(), - AggregatedOperation::PublishProofBlocksOnchain(op) => op.block_range(), - AggregatedOperation::ExecuteBlocks(op) => op.block_range(), + Self::Commit(op) => op.l1_batch_range(), + Self::PublishProofOnchain(op) => op.l1_batch_range(), + Self::Execute(op) => op.l1_batch_range(), } } pub fn get_action_caption(&self) -> &'static str { match self { - AggregatedOperation::CommitBlocks(_) => "commit", - AggregatedOperation::PublishProofBlocksOnchain(_) => "proof", - AggregatedOperation::ExecuteBlocks(_) => "execute", + Self::Commit(_) => "commit", + Self::PublishProofOnchain(_) => "proof", + Self::Execute(_) => "execute", } } - - pub fn is_commit(&self) -> bool { - matches!(self.get_action_type(), AggregatedActionType::CommitBlocks) - } - - pub fn is_execute(&self) -> bool { - matches!(self.get_action_type(), AggregatedActionType::ExecuteBlocks) - } - - pub fn is_publish_proofs(&self) -> bool { - matches!( - self.get_action_type(), - AggregatedActionType::PublishProofBlocksOnchain - ) - } -} - -impl From for AggregatedOperation { - fn from(other: BlocksCommitOperation) -> Self { - Self::CommitBlocks(other) - } -} - -impl From for AggregatedOperation { - fn from(other: BlocksProofOperation) -> Self { - Self::PublishProofBlocksOnchain(other) - } -} - -impl From for AggregatedOperation { - fn from(other: BlocksExecuteOperation) -> Self { - Self::ExecuteBlocks(other) - } } diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 04869b573220..dd4bec61708b 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -1,22 +1,25 @@ +use chrono::{DateTime, Utc}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use strum::Display; + +use zksync_basic_types::{ + web3::types::{Bytes, H160, H256, H64, U256, U64}, + L1BatchNumber, +}; +use zksync_contracts::BaseSystemContractsHashes; -use crate::explorer_api::TransactionStatus; +use crate::protocol_version::L1VerifierConfig; pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; use crate::vm_trace::{Call, CallType}; use crate::web3::types::{AccessList, Index, H2048}; -use crate::{Address, MiniblockNumber}; -use chrono::{DateTime, Utc}; -pub use zksync_basic_types::web3::{ - self, ethabi, - types::{Bytes, Work, H160, H256, H64, U256, U64}, -}; +use crate::{Address, MiniblockNumber, ProtocolVersionId}; pub mod en; /// Block Number -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Display)] pub enum BlockNumber { /// Alias for BlockNumber::Latest. Committed, @@ -91,7 +94,7 @@ impl<'de> Deserialize<'de> for BlockNumber { /// This is an utility structure that cannot be (de)serialized, it has to be created manually. /// The reason is because Web3 API provides multiple methods for referring block either by hash or number, /// and with such an ID it will be possible to avoid a lot of boilerplate. -#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize, Display)] #[serde(untagged)] pub enum BlockId { /// By Hash @@ -100,6 +103,16 @@ pub enum BlockId { Number(BlockNumber), } +impl BlockId { + /// Extract block's id variant name. + pub fn extract_block_tag(&self) -> String { + match self { + BlockId::Number(block_number) => block_number.to_string(), + BlockId::Hash(_) => "hash".to_string(), + } + } +} + /// Helper struct for EIP-1898. #[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -509,6 +522,15 @@ pub struct Transaction { pub l1_batch_tx_index: Option, } +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum TransactionStatus { + Pending, + Included, + Verified, + Failed, +} + #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct TransactionDetails { @@ -585,6 +607,20 @@ impl From for DebugCall { } } +#[derive(Default, Serialize, Deserialize, Clone, Debug)] +pub struct ProtocolVersion { + /// Protocol version ID + pub version_id: u16, + /// Timestamp at which upgrade should be performed + pub timestamp: u64, + /// Verifier configuration + pub verification_keys_hashes: L1VerifierConfig, + /// Hashes of base system contracts (bootloader and default account) + pub base_system_contracts: BaseSystemContractsHashes, + /// L2 Upgrade transaction hash + pub l2_system_upgrade_tx_hash: Option, +} + #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub enum SupportedTracers { @@ -603,3 +639,48 @@ pub struct TracerConfig { pub tracer: SupportedTracers, pub tracer_config: CallTracerConfig, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum BlockStatus { + Sealed, + Verified, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BlockDetailsBase { + pub timestamp: u64, + pub l1_tx_count: usize, + pub l2_tx_count: usize, + pub root_hash: Option, + pub status: BlockStatus, + pub commit_tx_hash: Option, + pub committed_at: Option>, + pub prove_tx_hash: Option, + pub proven_at: Option>, + pub execute_tx_hash: Option, + pub executed_at: Option>, + pub l1_gas_price: u64, + pub l2_fair_gas_price: u64, + pub base_system_contracts_hashes: BaseSystemContractsHashes, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BlockDetails { + pub number: MiniblockNumber, + pub l1_batch_number: L1BatchNumber, + #[serde(flatten)] + pub base: BlockDetailsBase, + pub operator_address: Address, + pub protocol_version: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct L1BatchDetails { + pub number: L1BatchNumber, + #[serde(flatten)] + pub base: BlockDetailsBase, +} diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 3e47f92a946b..170bfdaee5a1 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,13 +1,14 @@ use serde::{Deserialize, Serialize}; -use std::fmt::{Debug, Formatter}; -use std::ops::{Add, AddAssign}; + +use std::{fmt, ops}; + use zksync_basic_types::{H2048, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; use crate::{ l2_to_l1_log::L2ToL1Log, priority_op_onchain_data::PriorityOpOnchainData, - pubdata_packing::pack_storage_log, web3::signing::keccak256, AccountTreeId, Address, - L1BatchNumber, MiniblockNumber, StorageKey, StorageLogKind, WitnessStorageLog, + web3::signing::keccak256, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, + ProtocolVersionId, Transaction, }; /// Represents a successfully deployed smart contract. @@ -41,16 +42,14 @@ pub struct L1BatchHeader { pub l1_tx_count: u16, /// Total number of processed txs that was requested offchain pub l2_tx_count: u16, - /// The data of the processed priority operations hash which must be sent to the smart contract + /// The data of the processed priority operations hash which must be sent to the smart contract. pub priority_ops_onchain_data: Vec, - /// all L2 -> L1 logs in the block + /// All L2 -> L1 logs in the block. pub l2_to_l1_logs: Vec, - /// preimages of the hashes that were sent as value of L2 logs by special system L2 contract + /// Preimages of the hashes that were sent as value of L2 logs by special system L2 contract. pub l2_to_l1_messages: Vec>, /// Bloom filter for the event logs in the block. pub bloom: H2048, - /// Initial value of the bootloader's heap - pub initial_bootloader_contents: Vec<(usize, U256)>, /// Hashes of contracts used this block pub used_contract_hashes: Vec, /// The EIP1559 base_fee used in this block. @@ -60,6 +59,8 @@ pub struct L1BatchHeader { /// The L2 gas price that the operator agrees on. pub l2_fair_gas_price: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, + /// Version of protocol used for the L1 batch. + pub protocol_version: Option, } /// Holder for the miniblock metadata that is not available from transactions themselves. @@ -75,6 +76,15 @@ pub struct MiniblockHeader { pub l1_gas_price: u64, // L1 gas price assumed in the corresponding batch pub l2_fair_gas_price: u64, // L2 gas price assumed in the corresponding batch pub base_system_contracts_hashes: BaseSystemContractsHashes, + pub protocol_version: Option, +} + +/// Data needed to re-execute miniblock. +#[derive(Debug)] +pub struct MiniblockReexecuteData { + pub number: MiniblockNumber, + pub timestamp: u64, + pub txs: Vec, } impl L1BatchHeader { @@ -83,6 +93,7 @@ impl L1BatchHeader { timestamp: u64, fee_account_address: Address, base_system_contracts_hashes: BaseSystemContractsHashes, + protocol_version: ProtocolVersionId, ) -> L1BatchHeader { Self { number, @@ -95,12 +106,12 @@ impl L1BatchHeader { l2_to_l1_logs: vec![], l2_to_l1_messages: vec![], bloom: H2048::default(), - initial_bootloader_contents: vec![], used_contract_hashes: vec![], base_fee_per_gas: 0, l1_gas_price: 0, l2_fair_gas_price: 0, base_system_contracts_hashes, + protocol_version: Some(protocol_version), } } @@ -123,28 +134,6 @@ impl L1BatchHeader { } } -/// Utility structure that holds the block header together with its logs required to generate the witness -#[derive(Debug)] -pub struct WitnessBlockWithLogs { - pub header: L1BatchHeader, - pub storage_logs: Vec, -} - -impl WitnessBlockWithLogs { - /// Packs the logs into the byte sequence. - /// Used for the onchain data availability. - pub fn compress_logs(&self, hash_fn: F) -> Vec - where - F: Fn(&StorageKey) -> Vec + Copy, - { - self.storage_logs - .iter() - .filter(|log| log.storage_log.kind == StorageLogKind::Write) - .flat_map(|l| pack_storage_log(&l.storage_log, hash_fn)) - .collect() - } -} - #[derive(Clone, Copy, Eq, PartialEq, Default)] pub struct BlockGasCount { pub commit: u32, @@ -152,31 +141,25 @@ pub struct BlockGasCount { pub execute: u32, } -impl Debug for BlockGasCount { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "c:{}/p:{}/e:{}", self.commit, self.prove, self.execute)?; - Ok(()) +impl fmt::Debug for BlockGasCount { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + formatter, + "c:{}/p:{}/e:{}", + self.commit, self.prove, self.execute + ) } } impl BlockGasCount { - pub fn has_greater_than(&self, bound: u32) -> bool { + pub fn any_field_greater_than(&self, bound: u32) -> bool { self.commit > bound || self.prove > bound || self.execute > bound } } -impl AddAssign for BlockGasCount { - fn add_assign(&mut self, other: Self) { - *self = Self { - commit: self.commit + other.commit, - prove: self.prove + other.prove, - execute: self.execute + other.execute, - }; - } -} +impl ops::Add for BlockGasCount { + type Output = Self; -impl Add for BlockGasCount { - type Output = BlockGasCount; fn add(self, rhs: Self) -> Self::Output { Self { commit: self.commit + rhs.commit, @@ -185,3 +168,13 @@ impl Add for BlockGasCount { } } } + +impl ops::AddAssign for BlockGasCount { + fn add_assign(&mut self, other: Self) { + *self = Self { + commit: self.commit + other.commit, + prove: self.prove + other.prove, + execute: self.execute + other.execute, + }; + } +} diff --git a/core/lib/types/src/circuit.rs b/core/lib/types/src/circuit.rs index eaf8fdd0953e..940b4ecf273b 100644 --- a/core/lib/types/src/circuit.rs +++ b/core/lib/types/src/circuit.rs @@ -9,5 +9,6 @@ pub const SCHEDULER_UPPER_BOUND: u32 = (LEAF_SPLITTING_FACTOR * NODE_SPLITTING_F pub const LEAF_CIRCUIT_INDEX: u8 = 2; pub const NODE_CIRCUIT_INDEX: u8 = 1; +pub const SCHEDULER_CIRCUIT_INDEX: u8 = 0; pub const GEOMETRY_CONFIG: GeometryConfig = get_geometry_config(); diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 55a45d8e6f23..89207309f206 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -1,33 +1,34 @@ //! Data structures that have more metadata than their primary versions declared in this crate. -//! For example, block defined here has the `root_hash` field which is absent in the usual `Block`. +//! For example, L1 batch defined here has the `root_hash` field which is absent in `L1BatchHeader`. //! //! Existence of this module is caused by the execution model of zkSync: when executing transactions, //! we aim to avoid expensive operations like the state root hash recalculation. State root hash is not -//! required for the rollup to execute blocks, it's needed for the proof generation and the Ethereum +//! required for the rollup to execute L1 batches, it's needed for the proof generation and the Ethereum //! transactions, thus the calculations are done separately and asynchronously. -use std::collections::HashMap; -use std::convert::TryFrom; -use std::fmt::Debug; - use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, convert::TryFrom}; + use zksync_config::constants::ZKPORTER_IS_AVAILABLE; use zksync_mini_merkle_tree::MiniMerkleTree; -use crate::circuit::GEOMETRY_CONFIG; -use crate::ethabi::Token; -use crate::l2_to_l1_log::L2ToL1Log; -use crate::web3::signing::keccak256; -use crate::writes::{InitialStorageWrite, RepeatedStorageWrite}; -use crate::{block::L1BatchHeader, H256, KNOWN_CODES_STORAGE_ADDRESS, U256}; +use crate::{ + block::L1BatchHeader, + circuit::GEOMETRY_CONFIG, + ethabi::Token, + l2_to_l1_log::L2ToL1Log, + web3::signing::keccak256, + writes::{InitialStorageWrite, RepeatedStorageWrite}, + H256, KNOWN_CODES_STORAGE_ADDRESS, U256, +}; /// Type that can be serialized for commitment. pub trait SerializeCommitment { /// Size of the structure in bytes. const SERIALIZED_SIZE: usize; - /// The number of objects of this type that can be included in the block. - const LIMIT_PER_BLOCK: usize; + /// The number of objects of this type that can be included in a single L1 batch. + const LIMIT_PER_L1_BATCH: usize; /// Serializes this struct into the provided buffer, which is guaranteed to have byte length /// [`Self::SERIALIZED_SIZE`]. fn serialize_commitment(&self, buffer: &mut [u8]); @@ -48,9 +49,9 @@ pub(crate) fn serialize_commitments(values: &[I]) -> Vec input } -/// Precalculated data for the block that was used in commitment and L1 transaction -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct BlockMetadata { +/// Precalculated data for the L1 batch that was used in commitment and L1 transaction. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct L1BatchMetadata { pub root_hash: H256, pub rollup_last_leaf_index: u64, pub merkle_root_hash: H256, @@ -59,51 +60,53 @@ pub struct BlockMetadata { pub commitment: H256, pub l2_l1_messages_compressed: Vec, pub l2_l1_merkle_root: H256, - pub block_meta_params: BlockMetaParameters, + pub block_meta_params: L1BatchMetaParameters, pub aux_data_hash: H256, pub meta_parameters_hash: H256, pub pass_through_data_hash: H256, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct BlockWithMetadata { +pub struct L1BatchWithMetadata { pub header: L1BatchHeader, - pub metadata: BlockMetadata, + pub metadata: L1BatchMetadata, pub factory_deps: Vec>, } -impl BlockWithMetadata { +impl L1BatchWithMetadata { pub fn new( header: L1BatchHeader, - metadata: BlockMetadata, + metadata: L1BatchMetadata, unsorted_factory_deps: HashMap>, ) -> Self { Self { - factory_deps: Self::factory_deps_in_appearance_order(&header, &unsorted_factory_deps), + factory_deps: Self::factory_deps_in_appearance_order(&header, &unsorted_factory_deps) + .map(<[u8]>::to_vec) + .collect(), header, metadata, } } - /// Creates an array of factory deps in the order in which they appeared in a block - fn factory_deps_in_appearance_order( - header: &L1BatchHeader, - unsorted_factory_deps: &HashMap>, - ) -> Vec> { - let mut result = Vec::with_capacity(unsorted_factory_deps.len()); - - for log in &header.l2_to_l1_logs { + /// Iterates over factory deps in the order in which they appeared in this L1 batch. + pub fn factory_deps_in_appearance_order<'a>( + header: &'a L1BatchHeader, + unsorted_factory_deps: &'a HashMap>, + ) -> impl Iterator + 'a { + header.l2_to_l1_logs.iter().filter_map(move |log| { if log.sender == KNOWN_CODES_STORAGE_ADDRESS { - result.push( - unsorted_factory_deps - .get(&log.key) - .unwrap_or_else(|| panic!("Failed to get bytecode that was marked as known on L2 block: bytecodehash: {:?}, block number {:?}", &log.key, header.number)) - .clone(), - ); + let bytecode = unsorted_factory_deps.get(&log.key).unwrap_or_else(|| { + panic!( + "Failed to get bytecode that was marked as known: bytecode_hash {:?}, \ + L1 batch number {:?}", + log.key, header.number + ); + }); + Some(bytecode.as_slice()) + } else { + None } - } - - result + }) } pub fn l1_header_data(&self) -> Token { @@ -164,7 +167,7 @@ impl BlockWithMetadata { impl SerializeCommitment for L2ToL1Log { const SERIALIZED_SIZE: usize = 88; - const LIMIT_PER_BLOCK: usize = GEOMETRY_CONFIG.limit_for_l1_messages_merklizer as usize; + const LIMIT_PER_L1_BATCH: usize = GEOMETRY_CONFIG.limit_for_l1_messages_merklizer as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { buffer[0] = self.shard_id; @@ -178,7 +181,8 @@ impl SerializeCommitment for L2ToL1Log { impl SerializeCommitment for InitialStorageWrite { const SERIALIZED_SIZE: usize = 64; - const LIMIT_PER_BLOCK: usize = GEOMETRY_CONFIG.limit_for_initial_writes_pubdata_hasher as usize; + const LIMIT_PER_L1_BATCH: usize = + GEOMETRY_CONFIG.limit_for_initial_writes_pubdata_hasher as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { self.key.to_little_endian(&mut buffer[0..32]); @@ -188,7 +192,7 @@ impl SerializeCommitment for InitialStorageWrite { impl SerializeCommitment for RepeatedStorageWrite { const SERIALIZED_SIZE: usize = 40; - const LIMIT_PER_BLOCK: usize = + const LIMIT_PER_L1_BATCH: usize = GEOMETRY_CONFIG.limit_for_repeated_writes_pubdata_hasher as usize; fn serialize_commitment(&self, buffer: &mut [u8]) { @@ -199,7 +203,7 @@ impl SerializeCommitment for RepeatedStorageWrite { /// Block Output produced by Virtual Machine #[derive(Debug, Clone)] -struct BlockAuxiliaryOutput { +struct L1BatchAuxiliaryOutput { // We use initial fields for debugging #[allow(dead_code)] l2_l1_logs: Vec, @@ -216,7 +220,7 @@ struct BlockAuxiliaryOutput { repeated_writes_hash: H256, } -impl BlockAuxiliaryOutput { +impl L1BatchAuxiliaryOutput { fn new( l2_l1_logs: Vec, initial_writes: Vec, @@ -235,7 +239,7 @@ impl BlockAuxiliaryOutput { .map(|chunk| <[u8; L2ToL1Log::SERIALIZED_SIZE]>::try_from(chunk).unwrap()); // ^ Skip first 4 bytes of the serialized logs (i.e., the number of logs). let l2_l1_logs_merkle_root = - MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_BLOCK).merkle_root(); + MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_L1_BATCH).merkle_root(); Self { l2_l1_logs_compressed, @@ -267,16 +271,15 @@ impl BlockAuxiliaryOutput { } } -/// Meta parameters for block. They are the same for each block per run, excluding timestamp. -/// We keep timestamp in seconds here for consistency with the crypto team -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct BlockMetaParameters { +/// Meta parameters for an L1 batch. They are the same for each L1 batch per run. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct L1BatchMetaParameters { pub zkporter_is_available: bool, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, } -impl BlockMetaParameters { +impl L1BatchMetaParameters { pub fn to_bytes(&self) -> Vec { const SERIALIZED_SIZE: usize = 4 + 1 + 32 + 32; let mut result = Vec::with_capacity(SERIALIZED_SIZE); @@ -298,11 +301,11 @@ struct RootState { } #[derive(Debug, Clone, Serialize, Deserialize)] -struct BlockPassThroughData { +struct L1BatchPassThroughData { shared_states: Vec, } -impl BlockPassThroughData { +impl L1BatchPassThroughData { pub fn to_bytes(&self) -> Vec { // We assume that currently we have only two shared state: Rollup and ZkPorter where porter is always zero const SERIALIZED_SIZE: usize = 8 + 32 + 8 + 32; @@ -325,21 +328,21 @@ impl BlockPassThroughData { } #[derive(Debug, Clone)] -pub struct BlockCommitment { - pass_through_data: BlockPassThroughData, - auxiliary_output: BlockAuxiliaryOutput, - meta_parameters: BlockMetaParameters, +pub struct L1BatchCommitment { + pass_through_data: L1BatchPassThroughData, + auxiliary_output: L1BatchAuxiliaryOutput, + meta_parameters: L1BatchMetaParameters, } #[derive(Debug, Clone)] -pub struct BlockCommitmentHash { +pub struct L1BatchCommitmentHash { pub pass_through_data: H256, pub aux_output: H256, pub meta_parameters: H256, pub commitment: H256, } -impl BlockCommitment { +impl L1BatchCommitment { pub fn new( l2_to_l1_logs: Vec, rollup_last_leaf_index: u64, @@ -349,27 +352,27 @@ impl BlockCommitment { bootloader_code_hash: H256, default_aa_code_hash: H256, ) -> Self { - let meta_parameters = BlockMetaParameters { + let meta_parameters = L1BatchMetaParameters { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash, default_aa_code_hash, }; Self { - pass_through_data: BlockPassThroughData { + pass_through_data: L1BatchPassThroughData { shared_states: vec![ RootState { last_leaf_index: rollup_last_leaf_index, root_hash: rollup_root_hash, }, - // Despite the fact, that zk_porter is not available we have to add params about it. + // Despite the fact that zk_porter is not available we have to add params about it. RootState { last_leaf_index: 0, root_hash: H256::zero(), }, ], }, - auxiliary_output: BlockAuxiliaryOutput::new( + auxiliary_output: L1BatchAuxiliaryOutput::new( l2_to_l1_logs, initial_writes, repeated_writes, @@ -378,7 +381,7 @@ impl BlockCommitment { } } - pub fn meta_parameters(&self) -> BlockMetaParameters { + pub fn meta_parameters(&self) -> L1BatchMetaParameters { self.meta_parameters.clone() } @@ -410,7 +413,7 @@ impl BlockCommitment { self.auxiliary_output.repeated_writes_hash } - pub fn hash(&self) -> BlockCommitmentHash { + pub fn hash(&self) -> L1BatchCommitmentHash { let mut result = vec![]; let pass_through_data_hash = self.pass_through_data.hash(); result.extend_from_slice(pass_through_data_hash.as_bytes()); @@ -420,7 +423,7 @@ impl BlockCommitment { result.extend_from_slice(auxiliary_output_hash.as_bytes()); let hash = keccak256(&result); let commitment = H256::from_slice(&hash); - BlockCommitmentHash { + L1BatchCommitmentHash { pass_through_data: pass_through_data_hash, aux_output: auxiliary_output_hash, meta_parameters: metadata_hash, @@ -435,7 +438,7 @@ mod tests { use serde_with::serde_as; use crate::commitment::{ - BlockAuxiliaryOutput, BlockCommitment, BlockMetaParameters, BlockPassThroughData, + L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchMetaParameters, L1BatchPassThroughData, }; use crate::l2_to_l1_log::L2ToL1Log; use crate::writes::{InitialStorageWrite, RepeatedStorageWrite}; @@ -481,9 +484,9 @@ mod tests { #[derive(Debug, Serialize, Deserialize)] struct CommitmentTest { - pass_through_data: BlockPassThroughData, + pass_through_data: L1BatchPassThroughData, auxiliary_input: BlockAuxiliaryInput, - meta_parameters: BlockMetaParameters, + meta_parameters: L1BatchMetaParameters, expected_outputs: ExpectedOutput, } @@ -500,18 +503,20 @@ mod tests { .initial_writes .clone() .into_iter() - .map(|a| InitialStorageWrite { + .enumerate() + .map(|(index, a)| InitialStorageWrite { + index: index as u64 + 1, key: U256::from_dec_str(&a.key).unwrap(), value: a.value, }) .collect(); - let auxiliary_output = BlockAuxiliaryOutput::new( + let auxiliary_output = L1BatchAuxiliaryOutput::new( commitment_test.auxiliary_input.l2_l1_logs.clone(), initial_writes, commitment_test.auxiliary_input.repeated_writes.clone(), ); - let commitment = BlockCommitment { + let commitment = L1BatchCommitment { pass_through_data: commitment_test.pass_through_data, auxiliary_output, meta_parameters: commitment_test.meta_parameters, diff --git a/core/lib/types/src/contract_verification_api.rs b/core/lib/types/src/contract_verification_api.rs new file mode 100644 index 000000000000..a7feb5116f2f --- /dev/null +++ b/core/lib/types/src/contract_verification_api.rs @@ -0,0 +1,277 @@ +use std::{collections::HashMap, fmt}; + +use chrono::{DateTime, Utc}; +use serde::{ + de::{Deserializer, Error, MapAccess, Unexpected, Visitor}, + Deserialize, Serialize, +}; + +use crate::{Address, Bytes}; + +pub use crate::Execute as ExecuteData; + +#[derive(Debug, Clone, Serialize)] +#[serde(tag = "codeFormat", content = "sourceCode")] +pub enum SourceCodeData { + #[serde(rename = "solidity-single-file")] + SolSingleFile(String), + #[serde(rename = "solidity-standard-json-input")] + StandardJsonInput(serde_json::Map), + #[serde(rename = "vyper-multi-file")] + VyperMultiFile(HashMap), + #[serde(rename = "yul-single-file")] + YulSingleFile(String), +} + +impl SourceCodeData { + pub fn compiler_type(&self) -> CompilerType { + match self { + SourceCodeData::SolSingleFile(_) + | SourceCodeData::StandardJsonInput(_) + | SourceCodeData::YulSingleFile(_) => CompilerType::Solc, + SourceCodeData::VyperMultiFile(_) => CompilerType::Vyper, + } + } +} + +// Implementing Custom deserializer which deserializes `SourceCodeData` +// as `SingleFile` if `codeFormat` is not specified. +// Serde doesn't support this feature: https://github.com/serde-rs/serde/issues/2231 +impl<'de> Deserialize<'de> for SourceCodeData { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_map(SourceCodeVisitor) + } +} + +struct SourceCodeVisitor; + +impl<'de> Visitor<'de> for SourceCodeVisitor { + type Value = SourceCodeData; + fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("source code data") + } + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut source_code = None; + let mut r#type = None; + while let Some(key) = map.next_key::()? { + match &*key { + "sourceCode" => source_code = Some(map.next_value::()?), + "codeFormat" => r#type = Some(map.next_value::()?), + _ => continue, + } + } + let result = match r#type.as_deref() { + Some("solidity-single-file") | None => { + let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; + SourceCodeData::SolSingleFile( + value + .as_str() + .ok_or_else(|| { + A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) + })? + .to_string(), + ) + } + Some("yul-single-file") => { + let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; + SourceCodeData::YulSingleFile( + value + .as_str() + .ok_or_else(|| { + A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) + })? + .to_string(), + ) + } + Some("solidity-standard-json-input") => { + let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; + SourceCodeData::StandardJsonInput( + value + .as_object() + .ok_or_else(|| { + A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) + })? + .clone(), + ) + } + Some("vyper-multi-file") => { + let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; + let obj = value + .as_object() + .ok_or_else(|| { + A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) + })? + .clone(); + let sources = serde_json::from_value(serde_json::Value::Object(obj)) + .map_err(|_| A::Error::custom("invalid object"))?; + SourceCodeData::VyperMultiFile(sources) + } + Some(x) => { + return Err(A::Error::unknown_variant( + x, + &[ + "solidity-single-file", + "solidity-standard-json-input", + "yul-single-file", + "vyper-multi-file", + ], + )) + } + }; + Ok(result) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct VerificationIncomingRequest { + pub contract_address: Address, + #[serde(flatten)] + pub source_code_data: SourceCodeData, + pub contract_name: String, + #[serde(flatten)] + pub compiler_versions: CompilerVersions, + pub optimization_used: bool, + pub optimizer_mode: Option, + #[serde(default)] + pub constructor_arguments: Bytes, + #[serde(default)] + pub is_system: bool, +} + +#[derive(Debug, Eq, PartialEq, Clone, Copy)] +pub enum CompilerType { + Solc, + Vyper, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum CompilerVersions { + #[serde(rename_all = "camelCase")] + Solc { + compiler_zksolc_version: String, + compiler_solc_version: String, + }, + #[serde(rename_all = "camelCase")] + Vyper { + compiler_zkvyper_version: String, + compiler_vyper_version: String, + }, +} + +impl CompilerVersions { + pub fn compiler_type(&self) -> CompilerType { + match self { + CompilerVersions::Solc { .. } => CompilerType::Solc, + CompilerVersions::Vyper { .. } => CompilerType::Vyper, + } + } + + pub fn zk_compiler_version(&self) -> String { + match self { + CompilerVersions::Solc { + compiler_zksolc_version, + .. + } => compiler_zksolc_version.clone(), + CompilerVersions::Vyper { + compiler_zkvyper_version, + .. + } => compiler_zkvyper_version.clone(), + } + } + + pub fn compiler_version(&self) -> String { + match self { + CompilerVersions::Solc { + compiler_solc_version, + .. + } => compiler_solc_version.clone(), + CompilerVersions::Vyper { + compiler_vyper_version, + .. + } => compiler_vyper_version.clone(), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct VerificationRequest { + pub id: usize, + #[serde(flatten)] + pub req: VerificationIncomingRequest, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CompilationArtifacts { + pub bytecode: Vec, + pub abi: serde_json::Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct VerificationInfo { + pub request: VerificationRequest, + pub artifacts: CompilationArtifacts, + pub verified_at: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct VerificationRequestStatus { + pub status: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub compilation_errors: Option>, +} + +#[derive(Debug)] +pub enum DeployContractCalldata { + Deploy(Vec), + Ignore, +} + +#[cfg(test)] +mod tests { + use super::SourceCodeData; + + #[test] + fn source_code_deserialization() { + let single_file_str = r#"{"codeFormat": "solidity-single-file", "sourceCode": "text"}"#; + let single_file_result = serde_json::from_str::(single_file_str); + assert!(matches!( + single_file_result, + Ok(SourceCodeData::SolSingleFile(_)) + )); + + let stand_json_input_str = + r#"{"codeFormat": "solidity-standard-json-input", "sourceCode": {}}"#; + let stand_json_input_result = serde_json::from_str::(stand_json_input_str); + assert!(matches!( + stand_json_input_result, + Ok(SourceCodeData::StandardJsonInput(_)) + )); + + let type_not_specified_str = r#"{"sourceCode": "text"}"#; + let type_not_specified_result = + serde_json::from_str::(type_not_specified_str); + assert!(matches!( + type_not_specified_result, + Ok(SourceCodeData::SolSingleFile(_)) + )); + + let type_not_specified_object_str = r#"{"sourceCode": {}}"#; + let type_not_specified_object_result = + serde_json::from_str::(type_not_specified_object_str); + assert!(type_not_specified_object_result.is_err()); + } +} diff --git a/core/lib/types/src/contracts.rs b/core/lib/types/src/contracts.rs new file mode 100644 index 000000000000..6b72375202a4 --- /dev/null +++ b/core/lib/types/src/contracts.rs @@ -0,0 +1,75 @@ +use std::mem; + +use crate::{ + ethabi::Token, + web3::contract::{tokens::Tokenizable, Error}, + Address, +}; + +/// Multicall3 contract aggregate method input vector struct. +pub struct Multicall3Call { + pub target: Address, + pub allow_failure: bool, + pub calldata: Vec, +} + +impl Tokenizable for Multicall3Call { + fn into_token(self) -> Token { + Token::Tuple(vec![ + self.target.into_token(), + self.allow_failure.into_token(), + self.calldata.into_token(), + ]) + } + fn from_token(token: Token) -> Result { + let Token::Tuple(mut result_token) = token else { + return Err(error(&[token], "Multicall3Call")); + }; + let [Token::Address(target), Token::Bool(allow_failure), Token::Bytes(calldata)] = + result_token.as_mut_slice() + else { + return Err(error(&result_token, "Multicall3Call")); + }; + + Ok(Multicall3Call { + target: *target, + allow_failure: *allow_failure, + calldata: mem::take(calldata), + }) + } +} + +/// Multicall3 contract call's output vector struct. +pub struct Multicall3Result { + pub success: bool, + pub return_data: Vec, +} + +impl Tokenizable for Multicall3Result { + fn from_token(token: Token) -> Result { + let Token::Tuple(mut result_token) = token else { + return Err(error(&[token], "Multicall3Result")); + }; + let [Token::Bool(success), Token::Bytes(return_data)] = result_token.as_mut_slice() else { + return Err(error(&result_token, "Multicall3Result")); + }; + + Ok(Multicall3Result { + success: *success, + return_data: mem::take(return_data), + }) + } + + fn into_token(self) -> Token { + Token::Tuple(vec![ + Token::Bool(self.success), + Token::Bytes(self.return_data), + ]) + } +} + +fn error(token: &[Token], result_struct_name: &str) -> Error { + Error::InvalidOutputType(format!( + "Expected `{result_struct_name}` token, got token: {token:?}" + )) +} diff --git a/core/lib/types/src/explorer_api.rs b/core/lib/types/src/explorer_api.rs deleted file mode 100644 index 546206766f9a..000000000000 --- a/core/lib/types/src/explorer_api.rs +++ /dev/null @@ -1,587 +0,0 @@ -use serde::de::{Deserializer, Error, MapAccess, Unexpected, Visitor}; -use std::{collections::HashMap, fmt}; -use zksync_contracts::BaseSystemContractsHashes; - -use bigdecimal::BigDecimal; -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; - -use crate::{api::Log, Address, Bytes, Execute, L1BatchNumber, MiniblockNumber, Nonce, H256, U256}; - -use serde_with::rust::display_fromstr::deserialize as deserialize_fromstr; - -pub use crate::Execute as ExecuteData; - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] -#[serde(rename_all = "camelCase")] -pub enum PaginationDirection { - Newer, - Older, -} - -#[derive(Debug, Deserialize, Serialize, Clone, Copy)] -#[serde(rename_all = "camelCase")] -pub struct PaginationQuery { - // There is known problem with serde flatten and serde_urlencoded. - // It is described here https://github.com/nox/serde_urlencoded/issues/33 - // A workaround is described here https://docs.rs/serde_qs/0.9.1/serde_qs/index.html#flatten-workaround. - // It includes using of `deserialize_with` - #[serde(deserialize_with = "deserialize_fromstr")] - pub limit: usize, - #[serde(deserialize_with = "deserialize_fromstr", default)] - pub offset: usize, - pub direction: PaginationDirection, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct BlocksQuery { - pub from: Option, - #[serde(flatten)] - pub pagination: PaginationQuery, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct L1BatchesQuery { - pub from: Option, - #[serde(flatten)] - pub pagination: PaginationQuery, -} - -#[derive(Debug, Clone, Copy)] -pub struct TxPosition { - pub block_number: MiniblockNumber, - pub tx_index: Option, -} - -#[derive(Debug, Serialize, Deserialize, Clone, Copy)] -#[serde(rename_all = "camelCase")] -pub struct TransactionsQuery { - pub from_block_number: Option, - pub from_tx_index: Option, - pub block_number: Option, - pub l1_batch_number: Option, - pub address: Option
, - pub account_address: Option
, - pub contract_address: Option
, - #[serde(flatten)] - pub pagination: PaginationQuery, -} - -impl TransactionsQuery { - pub fn tx_position(&self) -> Option { - self.from_block_number.map(|block_number| TxPosition { - block_number, - tx_index: self.from_tx_index, - }) - } -} - -#[derive(Debug, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionsResponse { - pub list: Vec, - pub total: usize, -} - -#[derive(Debug, Serialize, Deserialize, Clone, Copy)] -#[serde(rename_all = "camelCase")] -pub struct EventsQuery { - pub from_block_number: Option, - pub contract_address: Option
, - #[serde(flatten)] - pub pagination: PaginationQuery, -} - -#[derive(Debug, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct EventsResponse { - pub list: Vec, - pub total: usize, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "camelCase")] -pub enum TransactionData { - Execute(ExecuteData), -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum BlockStatus { - Sealed, - Verified, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum TransactionStatus { - Pending, - Included, - Verified, - Failed, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct BlockPageItem { - pub number: MiniblockNumber, - pub l1_tx_count: usize, - pub l2_tx_count: usize, - pub hash: Option, - pub status: BlockStatus, - pub timestamp: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionResponse { - #[serde(flatten)] - pub tx: TransactionDetails, - pub logs: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionDetails { - pub transaction_hash: H256, - pub data: Execute, - pub is_l1_originated: bool, - pub status: TransactionStatus, - pub fee: U256, - pub nonce: Option, - pub block_number: Option, - pub l1_batch_number: Option, - pub block_hash: Option, - pub index_in_block: Option, - pub initiator_address: Address, - pub received_at: DateTime, - pub miniblock_timestamp: Option, - pub eth_commit_tx_hash: Option, - pub eth_prove_tx_hash: Option, - pub eth_execute_tx_hash: Option, - pub erc20_transfers: Vec, - /// It is `Some` only if the transaction calls `transfer` method of some ERC20 token. - #[serde(skip_serializing_if = "Option::is_none")] - pub transfer: Option, - pub balance_changes: Vec, - pub r#type: u32, -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct Erc20TransferInfo { - pub token_info: ExplorerTokenInfo, - pub from: Address, - pub to: Address, - pub amount: U256, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum BalanceChangeType { - Transfer, - Deposit, - Withdrawal, - Fee, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct BalanceChangeInfo { - pub token_info: ExplorerTokenInfo, - pub from: Address, - pub to: Address, - pub amount: U256, - pub r#type: BalanceChangeType, -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct ExplorerTokenInfo { - pub l1_address: Address, - pub l2_address: Address, - pub address: Address, - pub symbol: String, - pub name: String, - pub decimals: u8, - pub usd_price: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct BalanceItem { - pub token_info: ExplorerTokenInfo, - pub balance: U256, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum AccountType { - EOA, - Contract, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct AccountDetails { - pub address: Address, - pub balances: HashMap, - pub sealed_nonce: Nonce, - pub verified_nonce: Nonce, - pub account_type: AccountType, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ContractDetails { - #[serde(flatten)] - pub info: ContractBasicInfo, - #[serde(flatten)] - pub stats: ContractStats, - pub balances: HashMap, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", tag = "type", content = "info")] -pub enum AddressDetails { - Account(AccountDetails), - Contract(ContractDetails), -} - -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -#[serde(rename_all = "camelCase")] -pub struct ContractStats { - pub total_transactions: usize, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ContractBasicInfo { - pub address: Address, - pub bytecode: Bytes, - pub creator_address: Address, - pub creator_tx_hash: H256, - pub created_in_block_number: MiniblockNumber, - pub verification_info: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct BlockDetails { - pub number: MiniblockNumber, - pub l1_batch_number: L1BatchNumber, - pub timestamp: u64, - pub l1_tx_count: usize, - pub l2_tx_count: usize, - pub root_hash: Option, - pub status: BlockStatus, - pub commit_tx_hash: Option, - pub committed_at: Option>, - pub prove_tx_hash: Option, - pub proven_at: Option>, - pub execute_tx_hash: Option, - pub executed_at: Option>, - pub l1_gas_price: u64, - pub l2_fair_gas_price: u64, - pub base_system_contracts_hashes: BaseSystemContractsHashes, - pub operator_address: Address, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct L1BatchDetails { - pub number: L1BatchNumber, - pub timestamp: u64, - pub l1_tx_count: usize, - pub l2_tx_count: usize, - pub root_hash: Option, - pub status: BlockStatus, - pub commit_tx_hash: Option, - pub committed_at: Option>, - pub prove_tx_hash: Option, - pub proven_at: Option>, - pub execute_tx_hash: Option, - pub executed_at: Option>, - pub l1_gas_price: u64, - pub l2_fair_gas_price: u64, - pub base_system_contracts_hashes: BaseSystemContractsHashes, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct L1BatchPageItem { - pub number: L1BatchNumber, - pub timestamp: u64, - pub l1_tx_count: usize, - pub l2_tx_count: usize, - pub root_hash: Option, - pub status: BlockStatus, -} - -#[derive(Debug, Clone, Serialize)] -#[serde(tag = "codeFormat", content = "sourceCode")] -pub enum SourceCodeData { - #[serde(rename = "solidity-single-file")] - SolSingleFile(String), - #[serde(rename = "solidity-standard-json-input")] - StandardJsonInput(serde_json::Map), - #[serde(rename = "vyper-multi-file")] - VyperMultiFile(HashMap), - #[serde(rename = "yul-single-file")] - YulSingleFile(String), -} - -impl SourceCodeData { - pub fn compiler_type(&self) -> CompilerType { - match self { - SourceCodeData::SolSingleFile(_) - | SourceCodeData::StandardJsonInput(_) - | SourceCodeData::YulSingleFile(_) => CompilerType::Solc, - SourceCodeData::VyperMultiFile(_) => CompilerType::Vyper, - } - } -} - -// Implementing Custom deserializer which deserializes `SourceCodeData` -// as `SingleFile` if `codeFormat` is not specified. -// Serde doesn't support this feature: https://github.com/serde-rs/serde/issues/2231 -impl<'de> Deserialize<'de> for SourceCodeData { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_map(SourceCodeVisitor) - } -} - -struct SourceCodeVisitor; - -impl<'de> Visitor<'de> for SourceCodeVisitor { - type Value = SourceCodeData; - fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("source code data") - } - fn visit_map(self, mut map: A) -> Result - where - A: MapAccess<'de>, - { - let mut source_code = None; - let mut r#type = None; - while let Some(key) = map.next_key::()? { - match &*key { - "sourceCode" => source_code = Some(map.next_value::()?), - "codeFormat" => r#type = Some(map.next_value::()?), - _ => continue, - } - } - let result = match r#type.as_deref() { - Some("solidity-single-file") | None => { - let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; - SourceCodeData::SolSingleFile( - value - .as_str() - .ok_or_else(|| { - A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) - })? - .to_string(), - ) - } - Some("yul-single-file") => { - let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; - SourceCodeData::YulSingleFile( - value - .as_str() - .ok_or_else(|| { - A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) - })? - .to_string(), - ) - } - Some("solidity-standard-json-input") => { - let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; - SourceCodeData::StandardJsonInput( - value - .as_object() - .ok_or_else(|| { - A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) - })? - .clone(), - ) - } - Some("vyper-multi-file") => { - let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; - let obj = value - .as_object() - .ok_or_else(|| { - A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) - })? - .clone(); - let sources = serde_json::from_value(serde_json::Value::Object(obj)) - .map_err(|_| A::Error::custom("invalid object"))?; - SourceCodeData::VyperMultiFile(sources) - } - Some(x) => { - return Err(A::Error::unknown_variant( - x, - &[ - "solidity-single-file", - "solidity-standard-json-input", - "yul-single-file", - "vyper-multi-file", - ], - )) - } - }; - Ok(result) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct VerificationIncomingRequest { - pub contract_address: Address, - #[serde(flatten)] - pub source_code_data: SourceCodeData, - pub contract_name: String, - #[serde(flatten)] - pub compiler_versions: CompilerVersions, - pub optimization_used: bool, - pub optimizer_mode: Option, - #[serde(default)] - pub constructor_arguments: Bytes, - #[serde(default)] - pub is_system: bool, -} - -#[derive(Debug, Eq, PartialEq, Clone, Copy)] -pub enum CompilerType { - Solc, - Vyper, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum CompilerVersions { - #[serde(rename_all = "camelCase")] - Solc { - compiler_zksolc_version: String, - compiler_solc_version: String, - }, - #[serde(rename_all = "camelCase")] - Vyper { - compiler_zkvyper_version: String, - compiler_vyper_version: String, - }, -} - -impl CompilerVersions { - pub fn compiler_type(&self) -> CompilerType { - match self { - CompilerVersions::Solc { .. } => CompilerType::Solc, - CompilerVersions::Vyper { .. } => CompilerType::Vyper, - } - } - - pub fn zk_compiler_version(&self) -> String { - match self { - CompilerVersions::Solc { - compiler_zksolc_version, - .. - } => compiler_zksolc_version.clone(), - CompilerVersions::Vyper { - compiler_zkvyper_version, - .. - } => compiler_zkvyper_version.clone(), - } - } - - pub fn compiler_version(&self) -> String { - match self { - CompilerVersions::Solc { - compiler_solc_version, - .. - } => compiler_solc_version.clone(), - CompilerVersions::Vyper { - compiler_vyper_version, - .. - } => compiler_vyper_version.clone(), - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct VerificationRequest { - pub id: usize, - #[serde(flatten)] - pub req: VerificationIncomingRequest, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct CompilationArtifacts { - pub bytecode: Vec, - pub abi: serde_json::Value, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct VerificationInfo { - pub request: VerificationRequest, - pub artifacts: CompilationArtifacts, - pub verified_at: DateTime, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct VerificationRequestStatus { - pub status: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub error: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub compilation_errors: Option>, -} - -#[derive(Debug)] -pub enum DeployContractCalldata { - Deploy(Vec), - Ignore, -} - -#[cfg(test)] -mod tests { - use super::SourceCodeData; - - #[test] - fn source_code_deserialization() { - let single_file_str = r#"{"codeFormat": "solidity-single-file", "sourceCode": "text"}"#; - let single_file_result = serde_json::from_str::(single_file_str); - assert!(matches!( - single_file_result, - Ok(SourceCodeData::SolSingleFile(_)) - )); - - let stand_json_input_str = - r#"{"codeFormat": "solidity-standard-json-input", "sourceCode": {}}"#; - let stand_json_input_result = serde_json::from_str::(stand_json_input_str); - assert!(matches!( - stand_json_input_result, - Ok(SourceCodeData::StandardJsonInput(_)) - )); - - let type_not_specified_str = r#"{"sourceCode": "text"}"#; - let type_not_specified_result = - serde_json::from_str::(type_not_specified_str); - assert!(matches!( - type_not_specified_result, - Ok(SourceCodeData::SolSingleFile(_)) - )); - - let type_not_specified_object_str = r#"{"sourceCode": {}}"#; - let type_not_specified_object_result = - serde_json::from_str::(type_not_specified_object_str); - assert!(type_not_specified_object_result.is_err()); - } -} diff --git a/core/lib/types/src/fee.rs b/core/lib/types/src/fee.rs index 9c4fff9b11b5..970b8d95664e 100644 --- a/core/lib/types/src/fee.rs +++ b/core/lib/types/src/fee.rs @@ -63,7 +63,7 @@ pub fn encoding_len( const BASE_LEN: usize = 1 + 19 + 5; // All of the fields are encoded as `bytes`, so their encoding takes ceil(len, 32) slots. - // Factory deps are encoded as an array of bytes32. + // For factory deps we only provide hashes, which are encoded as an array of bytes32. let dynamic_len = ceil_div(data_len, 32) + ceil_div(signature_len, 32) + ceil_div(paymaster_input_len, 32) diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index 1dc35ec63562..5438dd7a767b 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -5,7 +5,7 @@ use std::convert::TryFrom; use zksync_basic_types::{ ethabi::{decode, ParamType, Token}, - Address, Log, PriorityOpId, H160, H256, U256, + Address, L1BlockNumber, Log, PriorityOpId, H160, H256, U256, }; use zksync_utils::u256_to_account_address; @@ -15,7 +15,7 @@ use crate::{ l2::TransactionType, priority_op_onchain_data::{PriorityOpOnchainData, PriorityOpOnchainMetadata}, tx::Execute, - ExecuteTransactionCommon, + ExecuteTransactionCommon, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, }; use super::Transaction; @@ -69,6 +69,10 @@ impl TryFrom for PriorityQueueType { } } +pub fn is_l1_tx_type(tx_type: u8) -> bool { + tx_type == PRIORITY_OPERATION_L2_TX_TYPE || tx_type == PROTOCOL_UPGRADE_TX_TYPE +} + #[derive(Default, Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct L1TxCommonData { @@ -152,7 +156,7 @@ impl From for Transaction { } impl TryFrom for L1Tx { - type Error = (); + type Error = &'static str; fn try_from(value: Transaction) -> Result { let Transaction { @@ -166,7 +170,10 @@ impl TryFrom for L1Tx { common_data, received_timestamp_ms, }), - ExecuteTransactionCommon::L2(_) => Err(()), + ExecuteTransactionCommon::L2(_) => Err("Cannot convert L2Tx to L1Tx"), + ExecuteTransactionCommon::ProtocolUpgrade(_) => { + Err("Cannot convert ProtocolUpgradeTx to L1Tx") + } } } } @@ -176,8 +183,8 @@ impl L1Tx { self.common_data.serial_id } - pub fn eth_block(&self) -> u64 { - self.common_data.eth_block + pub fn eth_block(&self) -> L1BlockNumber { + L1BlockNumber(self.common_data.eth_block as u32) } pub fn hash(&self) -> H256 { @@ -249,7 +256,7 @@ impl TryFrom for L1Tx { assert_eq!(transaction.len(), 16); let tx_type = transaction.remove(0).into_uint().unwrap(); - assert_eq!(tx_type.clone(), U256::from(255u8)); // L1TxType + assert_eq!(tx_type.clone(), U256::from(PRIORITY_OPERATION_L2_TX_TYPE)); let sender = transaction.remove(0).into_address().unwrap(); let contract_address = transaction.remove(0).into_address().unwrap(); diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 1a96c5da9a9f..5497a0fdc96b 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -8,7 +8,7 @@ use crate::{ api, tx::primitives::PackedEthSignature, tx::Execute, web3::types::U64, Address, Bytes, EIP712TypedStructure, Eip712Domain, ExecuteTransactionCommon, InputData, L2ChainId, Nonce, StructBuilder, Transaction, EIP_1559_TX_TYPE, EIP_712_TX_TYPE, H256, - PRIORITY_OPERATION_L2_TX_TYPE, U256, + PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use serde::{Deserialize, Serialize}; @@ -30,6 +30,7 @@ pub enum TransactionType { // Eip 712 transaction with additional fields specified for zksync EIP712Transaction = EIP_712_TX_TYPE as u32, PriorityOpTransaction = PRIORITY_OPERATION_L2_TX_TYPE as u32, + ProtocolUpgradeTransaction = PROTOCOL_UPGRADE_TX_TYPE as u32, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] @@ -375,7 +376,7 @@ impl From for api::Transaction { } impl TryFrom for L2Tx { - type Error = (); + type Error = &'static str; fn try_from(value: Transaction) -> Result { let Transaction { @@ -384,12 +385,15 @@ impl TryFrom for L2Tx { received_timestamp_ms, } = value; match common_data { - ExecuteTransactionCommon::L1(_) => Err(()), + ExecuteTransactionCommon::L1(_) => Err("Cannot convert L1Tx to L2Tx"), ExecuteTransactionCommon::L2(common_data) => Ok(L2Tx { execute, common_data, received_timestamp_ms, }), + ExecuteTransactionCommon::ProtocolUpgrade(_) => { + Err("Cannot convert ProtocolUpgradeTx to L2Tx") + } } } } diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index c3145be1926c..0f8bb0d136ca 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -14,9 +14,11 @@ pub use crate::{Nonce, H256, U256, U64}; pub type SerialId = u64; use crate::l2::TransactionType; +use crate::protocol_version::ProtocolUpgradeTxCommonData; pub use event::{VmEvent, VmEventGroupKey}; pub use l1::L1TxCommonData; pub use l2::L2TxCommonData; +pub use protocol_version::{ProtocolUpgrade, ProtocolVersion, ProtocolVersionId}; pub use storage::*; pub use tx::primitives::*; pub use tx::Execute; @@ -28,14 +30,15 @@ pub mod aggregated_operations; pub mod block; pub mod circuit; pub mod commitment; +pub mod contract_verification_api; +pub mod contracts; pub mod event; -pub mod explorer_api; pub mod fee; pub mod l1; pub mod l2; pub mod l2_to_l1_log; pub mod priority_op_onchain_data; -pub mod pubdata_packing; +pub mod protocol_version; pub mod storage; pub mod storage_writes_deduplicator; pub mod system_contracts; @@ -47,8 +50,11 @@ pub mod api; pub mod eth_sender; pub mod helpers; pub mod proofs; +pub mod prover_server_api; pub mod transaction_request; pub mod utils; +pub mod vk_transform; + /// Denotes the first byte of the special zkSync's EIP-712-signed transaction. pub const EIP_712_TX_TYPE: u8 = 0x71; @@ -61,9 +67,12 @@ pub const EIP_2930_TX_TYPE: u8 = 0x01; /// Denotes the first byte of some legacy transaction, which type is unknown to the server. pub const LEGACY_TX_TYPE: u8 = 0x0; -/// Denotes the first byte of some legacy transaction, which type is unknown to the server. +/// Denotes the first byte of the priority transaction. pub const PRIORITY_OPERATION_L2_TX_TYPE: u8 = 0xff; +/// Denotes the first byte of the protocol upgrade transaction. +pub const PROTOCOL_UPGRADE_TX_TYPE: u8 = 0xfe; + #[derive(Clone, Serialize, Deserialize)] pub struct Transaction { pub common_data: ExecuteTransactionCommon, @@ -95,6 +104,7 @@ impl Transaction { match &self.common_data { ExecuteTransactionCommon::L1(_) => None, ExecuteTransactionCommon::L2(tx) => Some(tx.nonce), + ExecuteTransactionCommon::ProtocolUpgrade(_) => None, } } @@ -106,6 +116,7 @@ impl Transaction { match &self.common_data { ExecuteTransactionCommon::L1(tx) => tx.tx_format(), ExecuteTransactionCommon::L2(tx) => tx.transaction_type, + ExecuteTransactionCommon::ProtocolUpgrade(tx) => tx.tx_format(), } } @@ -113,6 +124,7 @@ impl Transaction { match &self.common_data { ExecuteTransactionCommon::L1(data) => data.hash(), ExecuteTransactionCommon::L2(data) => data.hash(), + ExecuteTransactionCommon::ProtocolUpgrade(data) => data.hash(), } } @@ -121,6 +133,7 @@ impl Transaction { match &self.common_data { ExecuteTransactionCommon::L1(data) => data.sender, ExecuteTransactionCommon::L2(data) => data.initiator_address, + ExecuteTransactionCommon::ProtocolUpgrade(data) => data.sender, } } @@ -136,6 +149,7 @@ impl Transaction { paymaster } } + ExecuteTransactionCommon::ProtocolUpgrade(data) => data.sender, } } @@ -143,6 +157,7 @@ impl Transaction { match &self.common_data { ExecuteTransactionCommon::L1(data) => data.gas_limit, ExecuteTransactionCommon::L2(data) => data.fee.gas_limit, + ExecuteTransactionCommon::ProtocolUpgrade(data) => data.gas_limit, } } @@ -150,6 +165,7 @@ impl Transaction { match &self.common_data { ExecuteTransactionCommon::L1(data) => data.max_fee_per_gas, ExecuteTransactionCommon::L2(data) => data.fee.max_fee_per_gas, + ExecuteTransactionCommon::ProtocolUpgrade(data) => data.max_fee_per_gas, } } @@ -157,6 +173,7 @@ impl Transaction { match &self.common_data { ExecuteTransactionCommon::L1(data) => data.gas_per_pubdata_limit, ExecuteTransactionCommon::L2(data) => data.fee.gas_per_pubdata_limit, + ExecuteTransactionCommon::ProtocolUpgrade(data) => data.gas_per_pubdata_limit, } } @@ -175,6 +192,7 @@ impl Transaction { l2_common_data.signature.len(), l2_common_data.paymaster_params.paymaster_input.len(), ), + ExecuteTransactionCommon::ProtocolUpgrade(_) => (0, 0), }; encoding_len( @@ -201,6 +219,7 @@ pub struct InputData { pub enum ExecuteTransactionCommon { L1(L1TxCommonData), L2(L2TxCommonData), + ProtocolUpgrade(ProtocolUpgradeTxCommonData), } impl fmt::Display for ExecuteTransactionCommon { @@ -208,6 +227,9 @@ impl fmt::Display for ExecuteTransactionCommon { match self { ExecuteTransactionCommon::L1(data) => write!(f, "L1TxCommonData: {:?}", data), ExecuteTransactionCommon::L2(data) => write!(f, "L2TxCommonData: {:?}", data), + ExecuteTransactionCommon::ProtocolUpgrade(data) => { + write!(f, "ProtocolUpgradeTxCommonData: {:?}", data) + } } } } diff --git a/core/lib/types/src/proofs.rs b/core/lib/types/src/proofs.rs index 39a71f645414..28d25900231d 100644 --- a/core/lib/types/src/proofs.rs +++ b/core/lib/types/src/proofs.rs @@ -1,5 +1,6 @@ use std::convert::{TryFrom, TryInto}; use std::fmt::Debug; +use std::net::IpAddr; use std::ops::Add; use std::str::FromStr; @@ -66,7 +67,7 @@ pub struct WitnessGeneratorJobMetadata { /// Represents the sequential number of the proof aggregation round. /// Mostly used to be stored in `aggregation_round` column in `prover_jobs` table -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub enum AggregationRound { BasicCircuits = 0, LeafAggregation = 1, @@ -428,6 +429,24 @@ pub struct StuckJobs { pub attempts: u64, } +#[derive(Debug, Clone)] +pub struct SocketAddress { + pub host: IpAddr, + pub port: u16, +} + +#[derive(Debug)] +pub enum GpuProverInstanceStatus { + // The instance is available for processing. + Available, + // The instance is running at full capacity. + Full, + // The instance is reserved by an synthesizer. + Reserved, + // The instance is not alive anymore. + Dead, +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/types/src/protocol_version.rs b/core/lib/types/src/protocol_version.rs new file mode 100644 index 000000000000..403a2d7e2fb2 --- /dev/null +++ b/core/lib/types/src/protocol_version.rs @@ -0,0 +1,476 @@ +use crate::ethabi::{decode, encode, ParamType, Token}; +use crate::helpers::unix_timestamp_ms; +use crate::web3::contract::{tokens::Detokenize, Error}; +use crate::web3::signing::keccak256; +use crate::{ + Address, Execute, ExecuteTransactionCommon, Log, Transaction, TransactionType, H256, + PROTOCOL_UPGRADE_TX_TYPE, U256, +}; +use num_enum::TryFromPrimitive; +use serde::{Deserialize, Serialize}; +use std::convert::{TryFrom, TryInto}; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_utils::u256_to_account_address; + +#[repr(u16)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, TryFromPrimitive, +)] +pub enum ProtocolVersionId { + Version0 = 0, + Version1, + Version2, + Version3, + Version4, + Version5, + Version6, + Version7, + Version8, + Version9, + Version10, + Version11, + Version12, + Version13, +} + +impl ProtocolVersionId { + pub fn latest() -> Self { + Self::Version12 + } + + pub fn next() -> Self { + Self::Version13 + } +} + +impl Default for ProtocolVersionId { + fn default() -> Self { + Self::latest() + } +} + +impl TryFrom for ProtocolVersionId { + type Error = String; + + fn try_from(value: U256) -> Result { + if value > U256::from(u16::MAX) { + Err(format!("unknown protocol version ID: {}", value)) + } else { + (value.as_u32() as u16) + .try_into() + .map_err(|_| format!("unknown protocol version ID: {}", value)) + } + } +} + +#[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct VerifierParams { + pub recursion_node_level_vk_hash: H256, + pub recursion_leaf_level_vk_hash: H256, + pub recursion_circuits_set_vks_hash: H256, +} + +impl Detokenize for VerifierParams { + fn from_tokens(tokens: Vec) -> Result { + if tokens.len() != 1 { + return Err(Error::Abi(crate::ethabi::Error::InvalidData)); + } + + let tokens = match tokens[0].clone() { + Token::Tuple(tokens) => tokens, + _ => return Err(Error::Abi(crate::ethabi::Error::InvalidData)), + }; + + let vks_vec: Vec = tokens + .into_iter() + .map(|token| H256::from_slice(&token.into_fixed_bytes().unwrap())) + .collect(); + Ok(VerifierParams { + recursion_node_level_vk_hash: vks_vec[0], + recursion_leaf_level_vk_hash: vks_vec[1], + recursion_circuits_set_vks_hash: vks_vec[2], + }) + } +} + +#[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct L1VerifierConfig { + pub params: VerifierParams, + pub recursion_scheduler_level_vk_hash: H256, +} + +/// Protocol upgrade proposal from L1. +/// Most of the fields are optional meaning if value is none +/// then this field is not changed within an upgrade. +#[derive(Debug, Clone, Default)] +pub struct ProtocolUpgrade { + /// New protocol version ID. + pub id: ProtocolVersionId, + /// New bootloader code hash. + pub bootloader_code_hash: Option, + /// New default account code hash. + pub default_account_code_hash: Option, + /// New verifier params. + pub verifier_params: Option, + /// New verifier address. + pub verifier_address: Option
, + /// Timestamp after which upgrade can be executed. + pub timestamp: u64, + /// L2 upgrade transaction. + pub tx: Option, +} + +impl TryFrom for ProtocolUpgrade { + type Error = crate::ethabi::Error; + + fn try_from(event: Log) -> Result { + let facet_cut_param_type = ParamType::Tuple(vec![ + ParamType::Address, + ParamType::Uint(8), + ParamType::Bool, + ParamType::Array(Box::new(ParamType::FixedBytes(4))), + ]); + let diamond_cut_data_param_type = ParamType::Tuple(vec![ + ParamType::Array(Box::new(facet_cut_param_type)), + ParamType::Address, + ParamType::Bytes, + ]); + let mut decoded = decode( + &[diamond_cut_data_param_type, ParamType::FixedBytes(32)], + &event.data.0, + )?; + + let init_calldata = match decoded.remove(0) { + Token::Tuple(tokens) => tokens[2].clone().into_bytes().unwrap(), + _ => unreachable!(), + }; + + let transaction_param_type = ParamType::Tuple(vec![ + ParamType::Uint(256), // txType + ParamType::Uint(256), // sender + ParamType::Uint(256), // to + ParamType::Uint(256), // gasLimit + ParamType::Uint(256), // gasPerPubdataLimit + ParamType::Uint(256), // maxFeePerGas + ParamType::Uint(256), // maxPriorityFeePerGas + ParamType::Uint(256), // paymaster + ParamType::Uint(256), // nonce (serial ID) + ParamType::Uint(256), // value + ParamType::FixedArray(Box::new(ParamType::Uint(256)), 4), // reserved + ParamType::Bytes, // calldata + ParamType::Bytes, // signature + ParamType::Array(Box::new(ParamType::Uint(256))), // factory deps + ParamType::Bytes, // paymaster input + ParamType::Bytes, // reservedDynamic + ]); + let verifier_params_type = ParamType::Tuple(vec![ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ]); + + let mut decoded = decode( + &[ParamType::Tuple(vec![ + transaction_param_type, // transaction data + ParamType::Array(Box::new(ParamType::Bytes)), //factory deps + ParamType::FixedBytes(32), // bootloader code hash + ParamType::FixedBytes(32), // default account code hash + ParamType::Address, // verifier address + verifier_params_type, // verifier params + ParamType::Bytes, // l1 custom data + ParamType::Bytes, // l1 post-upgrade custom data + ParamType::Uint(256), // timestamp + ParamType::Uint(256), // version id + ParamType::Address, // allow list address + ])], + &init_calldata[4..], + )?; + + let mut decoded = match decoded.remove(0) { + Token::Tuple(x) => x, + _ => unreachable!(), + }; + + let mut transaction = match decoded.remove(0) { + Token::Tuple(x) => x, + _ => unreachable!(), + }; + let factory_deps = decoded.remove(0).into_array().unwrap(); + + let tx = { + let canonical_tx_hash = H256(keccak256(&encode(&[Token::Tuple(transaction.clone())]))); + + assert_eq!(transaction.len(), 16); + + let tx_type = transaction.remove(0).into_uint().unwrap(); + if tx_type == PROTOCOL_UPGRADE_TX_TYPE.into() { + // There is an upgrade tx. Decoding it. + let sender = transaction.remove(0).into_uint().unwrap(); + let sender = u256_to_account_address(&sender); + + let contract_address = transaction.remove(0).into_uint().unwrap(); + let contract_address = u256_to_account_address(&contract_address); + + let gas_limit = transaction.remove(0).into_uint().unwrap(); + + let gas_per_pubdata_limit = transaction.remove(0).into_uint().unwrap(); + + let max_fee_per_gas = transaction.remove(0).into_uint().unwrap(); + + let max_priority_fee_per_gas = transaction.remove(0).into_uint().unwrap(); + assert_eq!(max_priority_fee_per_gas, U256::zero()); + + let paymaster = transaction.remove(0).into_uint().unwrap(); + let paymaster = u256_to_account_address(&paymaster); + assert_eq!(paymaster, Address::zero()); + + let upgrade_id = transaction.remove(0).into_uint().unwrap(); + + let msg_value = transaction.remove(0).into_uint().unwrap(); + + let reserved = transaction + .remove(0) + .into_fixed_array() + .unwrap() + .into_iter() + .map(|token| token.into_uint().unwrap()) + .collect::>(); + assert_eq!(reserved.len(), 4); + + let to_mint = reserved[0]; + let refund_recipient = u256_to_account_address(&reserved[1]); + + // All other reserved fields should be zero + for item in reserved.iter().skip(2) { + assert_eq!(item, &U256::zero()); + } + + let calldata = transaction.remove(0).into_bytes().unwrap(); + + let signature = transaction.remove(0).into_bytes().unwrap(); + assert_eq!(signature.len(), 0); + + let _factory_deps_hashes = transaction.remove(0).into_array().unwrap(); + + let paymaster_input = transaction.remove(0).into_bytes().unwrap(); + assert_eq!(paymaster_input.len(), 0); + + let reserved_dynamic = transaction.remove(0).into_bytes().unwrap(); + assert_eq!(reserved_dynamic.len(), 0); + + let eth_hash = event + .transaction_hash + .expect("Event transaction hash is missing"); + let eth_block = event + .block_number + .expect("Event block number is missing") + .as_u64(); + + let common_data = ProtocolUpgradeTxCommonData { + canonical_tx_hash, + sender, + upgrade_id: (upgrade_id.as_u32() as u16).try_into().unwrap(), + to_mint, + refund_recipient, + gas_limit, + max_fee_per_gas, + gas_per_pubdata_limit, + eth_hash, + eth_block, + }; + + let factory_deps = factory_deps + .into_iter() + .map(|t| t.into_bytes().unwrap()) + .collect(); + + let execute = Execute { + contract_address, + calldata: calldata.to_vec(), + factory_deps: Some(factory_deps), + value: msg_value, + }; + + Some(ProtocolUpgradeTx { + common_data, + execute, + received_timestamp_ms: unix_timestamp_ms(), + }) + } else if tx_type == U256::zero() { + // There is no upgrade tx. + None + } else { + panic!("Unexpected tx type {} when decoding upgrade", tx_type); + } + }; + let bootloader_code_hash = H256::from_slice(&decoded.remove(0).into_fixed_bytes().unwrap()); + let default_account_code_hash = + H256::from_slice(&decoded.remove(0).into_fixed_bytes().unwrap()); + let verifier_address = decoded.remove(0).into_address().unwrap(); + let mut verifier_params = match decoded.remove(0) { + Token::Tuple(tx) => tx, + _ => unreachable!(), + }; + let recursion_node_level_vk_hash = + H256::from_slice(&verifier_params.remove(0).into_fixed_bytes().unwrap()); + let recursion_leaf_level_vk_hash = + H256::from_slice(&verifier_params.remove(0).into_fixed_bytes().unwrap()); + let recursion_circuits_set_vks_hash = + H256::from_slice(&verifier_params.remove(0).into_fixed_bytes().unwrap()); + + let _l1_custom_data = decoded.remove(0); + let _l1_post_upgrade_custom_data = decoded.remove(0); + let timestamp = decoded.remove(0).into_uint().unwrap(); + let version_id = decoded.remove(0).into_uint().unwrap(); + if version_id > u16::MAX.into() { + panic!("Version ID is too big, max expected is {}", u16::MAX); + } + + let _allow_list_address = decoded.remove(0).into_address().unwrap(); + + Ok(Self { + id: ProtocolVersionId::try_from(version_id.as_u32() as u16) + .expect("Version is not supported"), + bootloader_code_hash: (bootloader_code_hash != H256::zero()) + .then_some(bootloader_code_hash), + default_account_code_hash: (default_account_code_hash != H256::zero()) + .then_some(default_account_code_hash), + verifier_params: (recursion_node_level_vk_hash != H256::zero() + && recursion_leaf_level_vk_hash != H256::zero() + && recursion_circuits_set_vks_hash != H256::zero()) + .then_some(VerifierParams { + recursion_node_level_vk_hash, + recursion_leaf_level_vk_hash, + recursion_circuits_set_vks_hash, + }), + verifier_address: (verifier_address != Address::zero()).then_some(verifier_address), + timestamp: timestamp.as_u64(), + tx, + }) + } +} + +#[derive(Debug, Clone, Default)] +pub struct ProtocolVersion { + /// Protocol version ID + pub id: ProtocolVersionId, + /// Timestamp at which upgrade should be performed + pub timestamp: u64, + /// Verifier configuration + pub l1_verifier_config: L1VerifierConfig, + /// Hashes of base system contracts (bootloader and default account) + pub base_system_contracts_hashes: BaseSystemContractsHashes, + /// Verifier contract address on L1 + pub verifier_address: Address, + /// L2 Upgrade transaction. + pub tx: Option, +} + +impl ProtocolVersion { + /// Returns new protocol version parameters after applying provided upgrade. + pub fn apply_upgrade( + &self, + upgrade: ProtocolUpgrade, + new_scheduler_vk_hash: Option, + ) -> ProtocolVersion { + ProtocolVersion { + id: upgrade.id, + timestamp: upgrade.timestamp, + l1_verifier_config: L1VerifierConfig { + params: upgrade + .verifier_params + .unwrap_or(self.l1_verifier_config.params), + recursion_scheduler_level_vk_hash: new_scheduler_vk_hash + .unwrap_or(self.l1_verifier_config.recursion_scheduler_level_vk_hash), + }, + base_system_contracts_hashes: BaseSystemContractsHashes { + bootloader: upgrade + .bootloader_code_hash + .unwrap_or(self.base_system_contracts_hashes.bootloader), + default_aa: upgrade + .default_account_code_hash + .unwrap_or(self.base_system_contracts_hashes.default_aa), + }, + verifier_address: upgrade.verifier_address.unwrap_or(self.verifier_address), + tx: upgrade.tx, + } + } +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ProtocolUpgradeTxCommonData { + /// Sender of the transaction. + pub sender: Address, + /// ID of the upgrade. + pub upgrade_id: ProtocolVersionId, + /// The maximal fee per gas to be used for L1->L2 transaction + pub max_fee_per_gas: U256, + /// The maximum number of gas that a transaction can spend at a price of gas equals 1. + pub gas_limit: U256, + /// The maximum number of gas per 1 byte of pubdata. + pub gas_per_pubdata_limit: U256, + /// Hash of the corresponding Ethereum transaction. Size should be 32 bytes. + pub eth_hash: H256, + /// Block in which Ethereum transaction was included. + pub eth_block: u64, + /// Tx hash of the transaction in the zkSync network. Calculated as the encoded transaction data hash. + pub canonical_tx_hash: H256, + /// The amount of ETH that should be minted with this transaction + pub to_mint: U256, + /// The recipient of the refund of the transaction + pub refund_recipient: Address, +} + +impl ProtocolUpgradeTxCommonData { + pub fn hash(&self) -> H256 { + self.canonical_tx_hash + } + + pub fn tx_format(&self) -> TransactionType { + TransactionType::ProtocolUpgradeTransaction + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProtocolUpgradeTx { + pub execute: Execute, + pub common_data: ProtocolUpgradeTxCommonData, + pub received_timestamp_ms: u64, +} + +impl From for Transaction { + fn from(tx: ProtocolUpgradeTx) -> Self { + let ProtocolUpgradeTx { + execute, + common_data, + received_timestamp_ms, + } = tx; + Self { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(common_data), + execute, + received_timestamp_ms, + } + } +} + +impl TryFrom for ProtocolUpgradeTx { + type Error = &'static str; + + fn try_from(value: Transaction) -> Result { + let Transaction { + common_data, + execute, + received_timestamp_ms, + } = value; + match common_data { + ExecuteTransactionCommon::L1(_) => Err("Cannot convert L1Tx to ProtocolUpgradeTx"), + ExecuteTransactionCommon::L2(_) => Err("Cannot convert L2Tx to ProtocolUpgradeTx"), + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => Ok(ProtocolUpgradeTx { + execute, + common_data, + received_timestamp_ms, + }), + } + } +} diff --git a/core/lib/types/src/prover_server_api/mod.rs b/core/lib/types/src/prover_server_api/mod.rs new file mode 100644 index 000000000000..237b1fbc9c51 --- /dev/null +++ b/core/lib/types/src/prover_server_api/mod.rs @@ -0,0 +1,41 @@ +use crate::proofs::PrepareBasicCircuitsJob; +use serde::{Deserialize, Serialize}; +use serde_with::base64::Base64; +use serde_with::serde_as; + +use zksync_basic_types::L1BatchNumber; + +#[derive(Debug, Serialize, Deserialize)] +pub struct ProofGenerationData { + pub l1_batch_number: L1BatchNumber, + pub data: PrepareBasicCircuitsJob, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ProofGenerationDataRequest {} + +#[derive(Debug, Serialize, Deserialize)] +pub enum ProofGenerationDataResponse { + Success(ProofGenerationData), + Error(String), +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct SubmitProofRequest { + // we have to use the RawProof until the SNARK Proof wrapper is implemented + // https://linear.app/matterlabs/issue/CRY-1/implement-snark-wrapper-for-boojum + pub proof: RawProof, +} + +#[serde_as] +#[derive(Debug, Serialize, Deserialize)] +pub struct RawProof { + #[serde_as(as = "Base64")] + pub proof: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum SubmitProofResponse { + Success, + Error(String), +} diff --git a/core/lib/types/src/pubdata_packing.rs b/core/lib/types/src/pubdata_packing.rs deleted file mode 100644 index 3f3955a6231a..000000000000 --- a/core/lib/types/src/pubdata_packing.rs +++ /dev/null @@ -1,263 +0,0 @@ -//! Utilities to efficiently pack pubdata (aka storage access slots). -//! -//! Overall the idea is following: -//! If we have a type with at most X bytes, and *most likely* some leading bytes will be -//! zeroes, we can implement a simple variadic length encoding, compressing the type -//! as . In case of `uint32` it will allow us to represent number -//! `0x00000022` as `0x0122`. First byte represents the length: 1 byte. Second byte represents -//! the value itself. Knowledge about the type of encoded value is implied: without such a knowledge, -//! we wouldn't be able to parse pubdata at all. -//! -//! Drawbacks of such an approach are the following: -//! -//! - In case of no leading zeroes, we spend one more byte per value. This one is a minor, because as long -//! as there are more values which *have* leading zeroes, we don't lose anything. -//! - We use complex access keys which may have two components: 32-bit writer ID + 256-bit key, and encoding this -//! pair may be actually longer than just using the hash (which represents the actual key in the tree). -//! To overcome this drawback, we will make the value `0xFF` of the length byte special: this value means that instead -//! of packed key fields we have just an unpacked final hash. At the time of packing we will generate both forms -//! and choose the shorter one. - -use zksync_basic_types::AccountTreeId; -use zksync_utils::h256_to_u256; - -use crate::{StorageKey, StorageLog, H256}; - -const ACCOUNT_TREE_ID_SIZE: usize = 21; -const U256_SIZE: usize = 32; - -const fn max_encoded_size(field_size: usize) -> usize { - field_size + 1 -} - -pub fn pack_smart_contract(account_id: AccountTreeId, bytecode: Vec) -> Vec { - let max_size = max_encoded_size(ACCOUNT_TREE_ID_SIZE) + bytecode.len(); - let mut packed = Vec::with_capacity(max_size); - - packed.append(&mut encode_account_tree_id(account_id)); - - packed -} - -pub const fn max_log_size() -> usize { - // Key is encoded as U168 + U256, value is U256. - max_encoded_size(ACCOUNT_TREE_ID_SIZE) + max_encoded_size(U256_SIZE) * 2 -} - -pub fn pack_storage_log(log: &StorageLog, _hash_key: F) -> Vec -where - F: FnOnce(&StorageKey) -> Vec, -{ - pack_storage_log_packed_old(log) -} - -/// Does not pack anything; just encodes account address, storage key and storage value as bytes. -/// Encoding is exactly 20 + 32 + 32 bytes in size. -pub fn pack_storage_log_unpacked(log: &StorageLog) -> Vec { - log.to_bytes() -} - -/// Packs address, storage key and storage value as 3 separate values. -/// Encoding is at most 21 + 33 + 33 bytes in size. -pub fn pack_storage_log_packed_old(log: &StorageLog) -> Vec { - let mut packed_log = Vec::with_capacity(max_log_size()); - - packed_log.append(&mut encode_key(&log.key, |key| { - key.key().to_fixed_bytes().to_vec() - })); - packed_log.append(&mut encode_h256(log.value)); - - packed_log -} - -/// Computes the hash of the (address, storage key) and packs the storage value. -/// Encoding is at most 32 + 33 bytes in size. -pub fn pack_storage_log_packed_new(log: &StorageLog) -> Vec { - let mut packed_log = Vec::with_capacity(max_log_size()); - - packed_log.extend_from_slice(&log.key.hashed_key().to_fixed_bytes()); - packed_log.append(&mut encode_h256(log.value)); - - packed_log -} - -fn encode_key(key: &StorageKey, hash_key: F) -> Vec -where - F: FnOnce(&StorageKey) -> Vec, -{ - let mut key_hash = Vec::with_capacity(max_encoded_size(U256_SIZE)); - key_hash.push(0xFFu8); - key_hash.append(&mut hash_key(key)); - - let encoded_storage_key = encode_h256(*key.key()); - - let mut storage_key_part = if encoded_storage_key.len() <= key_hash.len() { - encoded_storage_key - } else { - key_hash - }; - - let mut encoded_key = - Vec::with_capacity(max_encoded_size(U256_SIZE) + max_encoded_size(ACCOUNT_TREE_ID_SIZE)); - encoded_key.append(&mut encode_account_tree_id(*key.account())); - encoded_key.append(&mut storage_key_part); - - encoded_key -} - -fn encode_account_tree_id(val: AccountTreeId) -> Vec { - let mut result = vec![0; 21]; - result[0] = 20; - result[1..].copy_from_slice(&val.to_fixed_bytes()); - - result -} - -fn encode_h256(val: H256) -> Vec { - let val = h256_to_u256(val); - let leading_zero_bytes = (val.leading_zeros() / 8) as usize; - let result_vec_length = (1 + U256_SIZE) - leading_zero_bytes; - let val_len = result_vec_length - 1; - let mut result = vec![0; result_vec_length]; - - let mut val_bytes = [0u8; 32]; - val.to_big_endian(&mut val_bytes); - - result[0] = val_len as u8; - if val_len > 0 { - result[1..].copy_from_slice(&val_bytes[leading_zero_bytes..]); - } - - result -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{AccountTreeId, Address, U256}; - use zksync_utils::{u256_to_h256, u64_to_h256}; - - fn check_encoding(f: F, input: impl Into, output: &str) - where - F: Fn(T) -> Vec, - { - let output = hex::decode(output).unwrap(); - assert_eq!(f(input.into()), output); - } - - #[test] - fn u256_encoding() { - let test_vector = vec![ - (u64_to_h256(0x00_00_00_00_u64), "00"), - (u64_to_h256(0x00_00_00_01_u64), "0101"), - (u64_to_h256(0x00_00_00_FF_u64), "01FF"), - (u64_to_h256(0x00_00_01_00_u64), "020100"), - (u64_to_h256(0x10_01_00_00_u64), "0410010000"), - (u64_to_h256(0xFF_FF_FF_FF_u64), "04FFFFFFFF"), - ]; - - for (input, output) in test_vector { - check_encoding(encode_h256, input, output); - } - - let max = u256_to_h256(U256::max_value()); - check_encoding( - encode_h256, - max, - "20FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ); - - let one_leading_zero_bit = U256::max_value() >> 1; - assert_eq!(one_leading_zero_bit.leading_zeros(), 1); - let one_leading_zero_bit = u256_to_h256(one_leading_zero_bit); - check_encoding( - encode_h256, - one_leading_zero_bit, - "207FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ); - - let one_leading_zero_byte = U256::max_value() >> 8; - assert_eq!(one_leading_zero_byte.leading_zeros(), 8); - let one_leading_zero_byte = u256_to_h256(one_leading_zero_byte); - check_encoding( - encode_h256, - one_leading_zero_byte, - "1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ); - } - - fn pseudo_hash(key: &StorageKey) -> Vec { - // Just return something 32-bit long. - key.key().to_fixed_bytes().to_vec() - } - - #[test] - fn key_encoding() { - // Raw key must be encoded in the compressed form, because hash will be longer. - let short_key = StorageKey::new( - AccountTreeId::new(Address::from_slice(&[0x0A; 20])), - u64_to_h256(0xDEAD_F00D_u64), - ); - // `140A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A` is encoding of `AccountTreeId`. - // 0x14 is number of bytes that should be decoded. - let expected_output = "140A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A04DEADF00D"; - - check_encoding( - |key| encode_key(key, pseudo_hash), - &short_key, - expected_output, - ); - } - - /// Compares multiple packing approaches we have. Does not assert anything. - /// If you see this test and know that the packing algorithm is already chosen and used in - /// production, please remvoe this test. Also, remove the similar tests in the `runtime_context` - /// module of `zksync_state` crate. - #[test] - fn pack_log_comparison() { - let log1 = StorageLog::new_write_log( - StorageKey::new(AccountTreeId::new(Address::random()), H256::random()), - H256::random(), - ); - let log2 = StorageLog::new_write_log( - StorageKey::new( - AccountTreeId::new(Address::repeat_byte(0x11)), - H256::repeat_byte(0x22), - ), - H256::repeat_byte(0x33), - ); - let log3 = StorageLog::new_write_log( - StorageKey::new( - AccountTreeId::new(Address::repeat_byte(0x11)), - H256::from_low_u64_be(0x01), - ), - H256::from_low_u64_be(0x02), - ); - let log4 = StorageLog::new_write_log( - StorageKey::new( - AccountTreeId::new(Address::repeat_byte(0x11)), - H256::repeat_byte(0x22), - ), - H256::from_low_u64_be(0x02), - ); - - let test_vector = &[ - (log1, "Random values"), - (log2, "32-byte key/value"), - (log3, "1-byte key/value"), - (log4, "32-byte key/1-byte value"), - ]; - - for (log, description) in test_vector { - let no_packing = pack_storage_log_unpacked(log); - let old_packing = pack_storage_log_packed_old(log); - let new_packing = pack_storage_log_packed_new(log); - - println!("Packing {}", description); - println!("No packing: {} bytes", no_packing.len()); - println!("Old packing: {} bytes", old_packing.len()); - println!("New packing: {} bytes", new_packing.len()); - println!("-----------------------"); - } - } -} diff --git a/core/lib/types/src/storage/log.rs b/core/lib/types/src/storage/log.rs index 7d08a7541b1b..bac739904624 100644 --- a/core/lib/types/src/storage/log.rs +++ b/core/lib/types/src/storage/log.rs @@ -1,9 +1,12 @@ use serde::{Deserialize, Serialize}; -use zk_evm::aux_structures::LogQuery; + +use std::mem; + +use zk_evm::aux_structures::{LogQuery, Timestamp}; use zksync_basic_types::AccountTreeId; use zksync_utils::u256_to_h256; -use super::{StorageKey, StorageValue, H256}; +use crate::{StorageKey, StorageValue, U256}; #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub enum StorageLogKind { @@ -51,23 +54,30 @@ impl StorageLog { } } - /// Encodes the log key and value into a byte sequence. - pub fn to_bytes(&self) -> Vec { - // Concatenate account, key and value. - let mut output = self.key.account().to_fixed_bytes().to_vec(); - output.extend_from_slice(&self.key.key().to_fixed_bytes()); - output.extend_from_slice(self.value.as_fixed_bytes()); + /// Converts this log to a log query that could be used in tests. + pub fn to_test_log_query(&self) -> LogQuery { + let mut read_value = U256::zero(); + let mut written_value = U256::from_big_endian(self.value.as_bytes()); + if self.kind == StorageLogKind::Read { + mem::swap(&mut read_value, &mut written_value); + } - output + LogQuery { + timestamp: Timestamp(0), + tx_number_in_block: 0, + aux_byte: 0, + shard_id: 0, + address: *self.key.address(), + key: U256::from_big_endian(self.key.key().as_bytes()), + read_value, + written_value, + rw_flag: matches!(self.kind, StorageLogKind::Write), + rollback: false, + is_service: false, + } } } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct WitnessStorageLog { - pub storage_log: StorageLog, - pub previous_value: H256, -} - #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum StorageLogQueryType { Read, diff --git a/core/lib/types/src/storage/writes.rs b/core/lib/types/src/storage/writes.rs index f06ff3d5649f..9bdacebbbc3e 100644 --- a/core/lib/types/src/storage/writes.rs +++ b/core/lib/types/src/storage/writes.rs @@ -5,8 +5,9 @@ use zksync_basic_types::U256; /// In vm there are two types of writes Initial and Repeated. After the first write to the leaf, /// we assign an index to it and in the future we should use index instead of full key. /// It allows us to compress the data. -#[derive(Clone, Debug, Deserialize, Serialize, Default, Eq, PartialEq)] +#[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct InitialStorageWrite { + pub index: u64, pub key: U256, pub value: H256, } @@ -27,10 +28,12 @@ mod tests { fn calculate_hash_for_storage_writes() { let initial_writes = vec![ InitialStorageWrite { + index: 1, key: U256::from(1u32), value: H256::from([1; 32]), }, InitialStorageWrite { + index: 2, key: U256::from(2u32), value: H256::from([3; 32]), }, diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index e23c4f886466..7b2034abdcfd 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -1,6 +1,7 @@ use zksync_basic_types::{AccountTreeId, Address, U256}; use zksync_config::constants::{ - BOOTLOADER_UTILITIES_ADDRESS, BYTECODE_COMPRESSOR_ADDRESS, EVENT_WRITER_ADDRESS, + BOOTLOADER_UTILITIES_ADDRESS, BYTECODE_COMPRESSOR_ADDRESS, COMPLEX_UPGRADER_ADDRESS, + EVENT_WRITER_ADDRESS, }; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage}; @@ -113,6 +114,12 @@ static SYSTEM_CONTRACTS: Lazy> = Lazy::new(|| { BYTECODE_COMPRESSOR_ADDRESS, ContractLanguage::Sol, ), + ( + "", + "ComplexUpgrader", + COMPLEX_UPGRADER_ADDRESS, + ContractLanguage::Sol, + ), ] .map(|(path, name, address, contract_lang)| DeployedContract { account_id: AccountTreeId::new(address), diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 89fd70b0a4f5..7ccc19e9b148 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -537,7 +537,6 @@ impl TransactionRequest { pub fn from_bytes( bytes: &[u8], chain_id: u16, - max_tx_size: usize, ) -> Result<(Self, H256), SerializationTransactionError> { let rlp; let mut tx = match bytes.first() { @@ -653,8 +652,6 @@ impl TransactionRequest { H256(keccak256(bytes)) }; - check_tx_data(&tx, max_tx_size)?; - Ok((tx, hash)) } @@ -738,27 +735,25 @@ impl TransactionRequest { } } -impl TryFrom for L2Tx { - type Error = SerializationTransactionError; - - fn try_from(value: TransactionRequest) -> Result { +impl L2Tx { + pub fn from_request( + value: TransactionRequest, + max_tx_size: usize, + ) -> Result { let fee = value.get_fee_data_checked()?; let nonce = value.get_nonce_checked()?; - // Attempt to decode factory deps. - let factory_deps = value + + let raw_signature = value.get_signature().unwrap_or_default(); + // Destruct `eip712_meta` in one go to avoid cloning. + let (factory_deps, paymaster_params) = value .eip712_meta - .as_ref() - .and_then(|meta| meta.factory_deps.clone()); + .map(|eip712_meta| (eip712_meta.factory_deps, eip712_meta.paymaster_params)) + .unwrap_or_default(); + if let Some(deps) = factory_deps.as_ref() { validate_factory_deps(deps)?; } - let paymaster_params = value - .eip712_meta - .as_ref() - .and_then(|meta| meta.paymaster_params.clone()) - .unwrap_or_default(); - let mut tx = L2Tx::new( value .to @@ -769,7 +764,7 @@ impl TryFrom for L2Tx { value.from.unwrap_or_default(), value.value, factory_deps, - paymaster_params, + paymaster_params.unwrap_or_default(), ); tx.common_data.transaction_type = match value.transaction_type.map(|t| t.as_u64() as u8) { @@ -779,9 +774,24 @@ impl TryFrom for L2Tx { _ => TransactionType::LegacyTransaction, }; // For fee calculation we use the same structure, as a result, signature may not be provided - tx.set_raw_signature(value.get_signature().unwrap_or_default()); + tx.set_raw_signature(raw_signature); + + tx.check_encoded_size(max_tx_size)?; Ok(tx) } + + /// Ensures that encoded transaction size is not greater than `max_tx_size`. + fn check_encoded_size(&self, max_tx_size: usize) -> Result<(), SerializationTransactionError> { + // since abi_encoding_len returns 32-byte words multiplication on 32 is needed + let tx_size = self.abi_encoding_len() * 32; + if tx_size > max_tx_size { + return Err(SerializationTransactionError::OversizedData( + max_tx_size, + tx_size, + )); + }; + Ok(()) + } } impl From for CallRequest { @@ -811,43 +821,29 @@ impl From for CallRequest { } } -pub fn tx_req_from_call_req( - call_request: CallRequest, - max_tx_size: usize, -) -> Result { - let calldata = call_request.data.unwrap_or_default(); - - let transaction_request = TransactionRequest { - nonce: call_request.nonce.unwrap_or_default(), - from: call_request.from, - to: call_request.to, - value: call_request.value.unwrap_or_default(), - gas_price: call_request.gas_price.unwrap_or_default(), - gas: call_request.gas.unwrap_or_default(), - input: calldata, - transaction_type: call_request.transaction_type, - access_list: call_request.access_list, - eip712_meta: call_request.eip712_meta, - ..Default::default() - }; - check_tx_data(&transaction_request, max_tx_size)?; - Ok(transaction_request) -} - -pub fn l2_tx_from_call_req( - call_request: CallRequest, - max_tx_size: usize, -) -> Result { - let tx_request: TransactionRequest = tx_req_from_call_req(call_request, max_tx_size)?; - let l2_tx = tx_request.try_into()?; - Ok(l2_tx) +impl From for TransactionRequest { + fn from(call_request: CallRequest) -> Self { + TransactionRequest { + nonce: call_request.nonce.unwrap_or_default(), + from: call_request.from, + to: call_request.to, + value: call_request.value.unwrap_or_default(), + gas_price: call_request.gas_price.unwrap_or_default(), + gas: call_request.gas.unwrap_or_default(), + input: call_request.data.unwrap_or_default(), + transaction_type: call_request.transaction_type, + access_list: call_request.access_list, + eip712_meta: call_request.eip712_meta, + ..Default::default() + } + } } impl TryFrom for L1Tx { type Error = SerializationTransactionError; fn try_from(tx: CallRequest) -> Result { // L1 transactions have no limitations on the transaction size. - let tx: L2Tx = l2_tx_from_call_req(tx, USED_BOOTLOADER_MEMORY_BYTES)?; + let tx: L2Tx = L2Tx::from_request(tx.into(), USED_BOOTLOADER_MEMORY_BYTES)?; // Note, that while the user has theoretically provided the fee for ETH on L1, // the payment to the operator as well as refunds happen on L2 and so all the ETH @@ -908,22 +904,6 @@ pub fn validate_factory_deps( Ok(()) } -fn check_tx_data( - tx_request: &TransactionRequest, - max_tx_size: usize, -) -> Result<(), SerializationTransactionError> { - let l2_tx: L2Tx = tx_request.clone().try_into()?; - // since abi_encoding_len returns 32-byte words multiplication on 32 is needed - let tx_size = l2_tx.abi_encoding_len() * 32; - if tx_size > max_tx_size { - return Err(SerializationTransactionError::OversizedData( - max_tx_size, - tx_size, - )); - }; - Ok(()) -} - #[cfg(test)] mod tests { use super::*; @@ -936,7 +916,6 @@ mod tests { #[tokio::test] async fn decode_real_tx() { - let random_tx_max_size = 1_000_000; // bytes let accounts = crate::web3::api::Accounts::new(TestTransport::default()); let pk = hex::decode("4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318") @@ -958,12 +937,8 @@ mod tests { access_list: None, }; let signed_tx = accounts.sign_transaction(tx.clone(), &key).await.unwrap(); - let (tx2, _) = TransactionRequest::from_bytes( - signed_tx.raw_transaction.0.as_slice(), - 270, - random_tx_max_size, - ) - .unwrap(); + let (tx2, _) = + TransactionRequest::from_bytes(signed_tx.raw_transaction.0.as_slice(), 270).unwrap(); assert_eq!(tx.gas, tx2.gas); assert_eq!(tx.gas_price.unwrap(), tx2.gas_price); assert_eq!(tx.nonce.unwrap(), tx2.nonce); @@ -974,7 +949,6 @@ mod tests { #[test] fn decode_rlp() { - let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -997,7 +971,7 @@ mod tests { let mut rlp = RlpStream::new(); tx.rlp(&mut rlp, 270, Some(&signature)); let data = rlp.out().to_vec(); - let (tx2, _) = TransactionRequest::from_bytes(&data, 270, random_tx_max_size).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes(&data, 270).unwrap(); assert_eq!(tx.gas, tx2.gas); assert_eq!(tx.gas_price, tx2.gas_price); assert_eq!(tx.nonce, tx2.nonce); @@ -1014,7 +988,6 @@ mod tests { #[test] fn decode_eip712_with_meta() { - let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1054,14 +1027,13 @@ mod tests { tx.r = Some(U256::from_big_endian(signature.r())); tx.s = Some(U256::from_big_endian(signature.s())); - let (tx2, _) = TransactionRequest::from_bytes(&data, 270, random_tx_max_size).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes(&data, 270).unwrap(); assert_eq!(tx, tx2); } #[test] fn check_recovered_public_key_eip712() { - let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1091,15 +1063,13 @@ mod tests { let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(270)); - let (decoded_tx, _) = - TransactionRequest::from_bytes(encoded_tx.as_slice(), 270, random_tx_max_size).unwrap(); + let (decoded_tx, _) = TransactionRequest::from_bytes(encoded_tx.as_slice(), 270).unwrap(); let recovered_signer = decoded_tx.from.unwrap(); assert_eq!(address, recovered_signer); } #[test] fn check_recovered_public_key_eip712_with_wrong_chain_id() { - let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1132,8 +1102,7 @@ mod tests { let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(270)); - let decoded_tx = - TransactionRequest::from_bytes(encoded_tx.as_slice(), 272, random_tx_max_size); + let decoded_tx = TransactionRequest::from_bytes(encoded_tx.as_slice(), 272); assert_eq!( decoded_tx, Err(SerializationTransactionError::WrongChainId(Some(270))) @@ -1142,7 +1111,6 @@ mod tests { #[test] fn check_recovered_public_key_eip1559() { - let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1174,15 +1142,13 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let (decoded_tx, _) = - TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size).unwrap(); + let (decoded_tx, _) = TransactionRequest::from_bytes(data.as_slice(), 270).unwrap(); let recovered_signer = decoded_tx.from.unwrap(); assert_eq!(address, recovered_signer); } #[test] fn check_recovered_public_key_eip1559_with_wrong_chain_id() { - let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1213,7 +1179,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size); + let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), 270); assert_eq!( decoded_tx, Err(SerializationTransactionError::WrongChainId(Some(272))) @@ -1222,7 +1188,6 @@ mod tests { #[test] fn check_decode_eip1559_with_access_list() { - let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1254,7 +1219,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let res = TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size); + let res = TransactionRequest::from_bytes(data.as_slice(), 270); assert_eq!( res, Err(SerializationTransactionError::AccessListsNotSupported) @@ -1263,7 +1228,6 @@ mod tests { #[test] fn check_failed_to_decode_eip2930() { - let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1292,7 +1256,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_2930_TX_TYPE); - let res = TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size); + let res = TransactionRequest::from_bytes(data.as_slice(), 270); assert_eq!( res, Err(SerializationTransactionError::AccessListsNotSupported) @@ -1308,7 +1272,8 @@ mod tests { value: U256::zero(), ..Default::default() }; - let execute_tx1: Result = tx1.try_into(); + let execute_tx1: Result = + L2Tx::from_request(tx1, usize::MAX); assert!(execute_tx1.is_ok()); let tx2 = TransactionRequest { @@ -1318,7 +1283,8 @@ mod tests { value: U256::zero(), ..Default::default() }; - let execute_tx2: Result = tx2.try_into(); + let execute_tx2: Result = + L2Tx::from_request(tx2, usize::MAX); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::TooBigNonce @@ -1334,7 +1300,8 @@ mod tests { gas_price: U256::MAX, ..Default::default() }; - let execute_tx1: Result = tx1.try_into(); + let execute_tx1: Result = + L2Tx::from_request(tx1, usize::MAX); assert_eq!( execute_tx1.unwrap_err(), SerializationTransactionError::TooHighGas( @@ -1349,7 +1316,8 @@ mod tests { max_priority_fee_per_gas: Some(U256::MAX), ..Default::default() }; - let execute_tx2: Result = tx2.try_into(); + let execute_tx2: Result = + L2Tx::from_request(tx2, usize::MAX); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::TooHighGas( @@ -1368,7 +1336,8 @@ mod tests { ..Default::default() }; - let execute_tx3: Result = tx3.try_into(); + let execute_tx3: Result = + L2Tx::from_request(tx3, usize::MAX); assert_eq!( execute_tx3.unwrap_err(), SerializationTransactionError::TooHighGas( @@ -1420,8 +1389,9 @@ mod tests { tx.v = Some(U64::from(signature.v())); tx.r = Some(U256::from_big_endian(signature.r())); tx.s = Some(U256::from_big_endian(signature.s())); + let request = TransactionRequest::from_bytes(data.as_slice(), 270).unwrap(); assert!(matches!( - TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size), + L2Tx::from_request(request.0, random_tx_max_size), Err(SerializationTransactionError::OversizedData(_, _)) )) } @@ -1446,7 +1416,7 @@ mod tests { }; let try_to_l2_tx: Result = - l2_tx_from_call_req(call_request, random_tx_max_size); + L2Tx::from_request(call_request.into(), random_tx_max_size); assert!(matches!( try_to_l2_tx, @@ -1470,18 +1440,21 @@ mod tests { access_list: None, eip712_meta: None, }; - let tx_request = tx_req_from_call_req( - call_request_with_nonce.clone(), + let l2_tx = L2Tx::from_request( + call_request_with_nonce.clone().into(), USED_BOOTLOADER_MEMORY_BYTES, ) .unwrap(); - assert_eq!(tx_request.nonce, U256::from(123u32)); + assert_eq!(l2_tx.nonce(), Nonce(123u32)); let mut call_request_without_nonce = call_request_with_nonce; call_request_without_nonce.nonce = None; - let tx_request = - tx_req_from_call_req(call_request_without_nonce, USED_BOOTLOADER_MEMORY_BYTES).unwrap(); - assert_eq!(tx_request.nonce, U256::from(0u32)); + let l2_tx = L2Tx::from_request( + call_request_without_nonce.into(), + USED_BOOTLOADER_MEMORY_BYTES, + ) + .unwrap(); + assert_eq!(l2_tx.nonce(), Nonce(0u32)); } } diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index c00c8ca00cf9..043dfaeac9b7 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -27,7 +27,7 @@ impl EIP712TypedStructure for Execute { fn build_structure(&self, builder: &mut BUILDER) { builder.add_member("to", &U256::from(self.contract_address.as_bytes())); builder.add_member("value", &self.value); - builder.add_member("data", &self.calldata().as_slice()); + builder.add_member("data", &self.calldata.as_slice()); // Factory deps are not included into the transaction signature, since they are parsed from the // transaction metadata. // Note that for the deploy transactions all the dependencies are implicitly included into the "calldataHash" @@ -36,8 +36,8 @@ impl EIP712TypedStructure for Execute { } impl Execute { - pub fn calldata(&self) -> Vec { - self.calldata.clone() + pub fn calldata(&self) -> &[u8] { + &self.calldata } /// Prepares calldata to invoke deployer contract. diff --git a/core/lib/types/src/tx/primitives/eip712_signature/member_types.rs b/core/lib/types/src/tx/primitives/eip712_signature/member_types.rs index 1b967c04dedf..cc4906ef7e8f 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/member_types.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/member_types.rs @@ -1,7 +1,7 @@ use crate::tx::primitives::eip712_signature::typed_structure::{ EncodedStructureMember, StructMember, }; -use parity_crypto::Keccak256; +use crate::web3::signing::keccak256; use zksync_basic_types::{Address, H256, U256}; impl StructMember for String { @@ -13,7 +13,7 @@ impl StructMember for String { } fn encode_member_data(&self) -> H256 { - self.keccak256().into() + keccak256(self.as_bytes()).into() } } @@ -39,7 +39,7 @@ impl StructMember for &[u8] { } fn encode_member_data(&self) -> H256 { - self.keccak256().into() + keccak256(self).into() } } @@ -56,7 +56,7 @@ impl StructMember for &[H256] { .iter() .flat_map(|hash| hash.as_bytes().to_vec()) .collect(); - bytes.keccak256().into() + keccak256(&bytes).into() } } diff --git a/core/lib/types/src/tx/primitives/eip712_signature/tests.rs b/core/lib/types/src/tx/primitives/eip712_signature/tests.rs index ec2accc0b0b2..70ae415531c9 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/tests.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/tests.rs @@ -3,7 +3,7 @@ use crate::tx::primitives::eip712_signature::{ typed_structure::{EIP712TypedStructure, Eip712Domain}, }; use crate::tx::primitives::{eip712_signature::utils::get_eip712_json, PackedEthSignature}; -use parity_crypto::Keccak256; +use crate::web3::signing::keccak256; use serde::Serialize; use std::str::FromStr; use zksync_basic_types::{Address, H256, U256}; @@ -103,7 +103,7 @@ fn test_encode_eip712_typed_struct() { H256::from_str("3b98b16ad068d9d8854a6a416bd476de44a4933ec5104d7c786a422ab262ed14").unwrap() ); - let private_key = b"cow".keccak256().into(); + let private_key = keccak256(b"cow").into(); let address_owner = PackedEthSignature::address_from_private_key(&private_key).unwrap(); let signature = PackedEthSignature::sign_typed_data(&private_key, &domain, &message).unwrap(); diff --git a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs index 408d818e343d..5ad48995a5c7 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs @@ -1,4 +1,4 @@ -use parity_crypto::Keccak256; +use crate::web3::signing::keccak256; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -126,7 +126,7 @@ pub trait EIP712TypedStructure: Serialize { // hashStruct(s : 𝕊) = keccak256(keccak256(encodeType(typeOf(s))) ‖ encodeData(s)). let type_hash = { let encode_type = self.encode_type(); - encode_type.keccak256() + keccak256(encode_type.as_bytes()) }; let encode_data = self.encode_data(); @@ -136,7 +136,7 @@ pub trait EIP712TypedStructure: Serialize { bytes.extend_from_slice(data.as_bytes()); } - bytes.keccak256().into() + keccak256(&bytes).into() } fn get_json_types(&self) -> Vec { diff --git a/core/lib/types/src/tx/primitives/packed_eth_signature.rs b/core/lib/types/src/tx/primitives/packed_eth_signature.rs index 15084e765837..57f065115e72 100644 --- a/core/lib/types/src/tx/primitives/packed_eth_signature.rs +++ b/core/lib/types/src/tx/primitives/packed_eth_signature.rs @@ -1,6 +1,7 @@ use crate::tx::primitives::eip712_signature::typed_structure::{ EIP712TypedStructure, Eip712Domain, }; +use ethereum_types_old::H256 as ParityCryptoH256; use parity_crypto::{ publickey::{ public_to_address, recover, sign, Error as ParityCryptoError, KeyPair, @@ -64,8 +65,11 @@ impl PackedEthSignature { private_key: &H256, signed_bytes: &H256, ) -> Result { - let secret_key = (*private_key).into(); - let signature = sign(&secret_key, signed_bytes)?; + let private_key = ParityCryptoH256::from_slice(&private_key.0); + let signed_bytes = ParityCryptoH256::from_slice(&signed_bytes.0); + + let secret_key = private_key.into(); + let signature = sign(&secret_key, &signed_bytes)?; Ok(PackedEthSignature(signature)) } @@ -76,8 +80,10 @@ impl PackedEthSignature { domain: &Eip712Domain, typed_struct: &impl EIP712TypedStructure, ) -> Result { - let secret_key = (*private_key).into(); - let signed_bytes = Self::typed_data_to_signed_bytes(domain, typed_struct); + let private_key = ParityCryptoH256::from_slice(&private_key.0); + let secret_key = private_key.into(); + let signed_bytes = + ParityCryptoH256::from(Self::typed_data_to_signed_bytes(domain, typed_struct).0); let signature = sign(&secret_key, &signed_bytes)?; Ok(PackedEthSignature(signature)) } @@ -104,17 +110,23 @@ impl PackedEthSignature { &self, signed_bytes: &H256, ) -> Result { - let public_key = recover(&self.0, signed_bytes)?; - Ok(public_to_address(&public_key)) + let signed_bytes = ParityCryptoH256::from_slice(&signed_bytes.0); + let public_key = recover(&self.0, &signed_bytes)?; + let address = public_to_address(&public_key); + Ok(Address::from(address.0)) } /// Get Ethereum address from private key. pub fn address_from_private_key(private_key: &H256) -> Result { - Ok(KeyPair::from_secret((*private_key).into())?.address()) + let private_key = ParityCryptoH256::from_slice(&private_key.0); + let address = KeyPair::from_secret(private_key.into())?.address(); + Ok(Address::from(address.0)) } pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self { - PackedEthSignature(ETHSignature::from_rsv(r, s, v)) + let r = ParityCryptoH256::from_slice(&r.0); + let s = ParityCryptoH256::from_slice(&s.0); + PackedEthSignature(ETHSignature::from_rsv(&r, &s, v)) } pub fn r(&self) -> &[u8] { diff --git a/core/lib/types/src/utils.rs b/core/lib/types/src/utils.rs index 76c3e488b95c..e90d77923b22 100644 --- a/core/lib/types/src/utils.rs +++ b/core/lib/types/src/utils.rs @@ -1,17 +1,11 @@ use crate::system_contracts::DEPLOYMENT_NONCE_INCREMENT; use crate::L2_ETH_TOKEN_ADDRESS; use crate::{web3::signing::keccak256, AccountTreeId, StorageKey, U256}; -use once_cell::sync::Lazy; -use parity_crypto::Keccak256; -use std::collections::HashMap; - -use std::mem; -use std::sync::Mutex; use std::time::Instant; use zksync_basic_types::{Address, H256}; -use zksync_utils::{address_to_h256, h256_to_u256, u256_to_h256}; +use zksync_utils::{address_to_h256, u256_to_h256}; /// Transforms the *full* account nonce into an *account* nonce. /// Full nonce is a composite one: it includes both account nonce (number of transactions @@ -30,8 +24,6 @@ pub fn nonces_to_full_nonce(tx_nonce: U256, deploy_nonce: U256) -> U256 { DEPLOYMENT_NONCE_INCREMENT * deploy_nonce + tx_nonce } -static CACHE: Lazy>> = Lazy::new(|| Mutex::new(HashMap::new())); - fn key_for_eth_balance(address: &Address) -> H256 { let address_h256 = address_to_h256(address); @@ -43,33 +35,20 @@ fn key_for_eth_balance(address: &Address) -> H256 { fn key_for_erc20_balance(address: &Address) -> H256 { let started_at = Instant::now(); let address_h256 = address_to_h256(address); - let address_u256 = h256_to_u256(address_h256); - let mut hash_map = CACHE.lock().unwrap(); - - metrics::gauge!( - "server.compute_storage_key_for_erc20_cache_size", - hash_map.len() as f64 - ); - metrics::gauge!( - "server.compute_storage_key_for_erc20_cache_size_bytes", - mem::size_of_val(&hash_map) as f64 - ); - - let hash = hash_map.entry(address_u256).or_insert_with(|| { - // 20 bytes address first gets aligned to 32 bytes with index of `balanceOf` storage slot - // of default ERC20 contract and to then to 64 bytes. - - let slot_index = H256::from_low_u64_be(51); - let bytes = [address_h256.as_bytes(), slot_index.as_bytes()].concat(); - keccak256(&bytes).into() - }); + // 20 bytes address first gets aligned to 32 bytes with index of `balanceOf` storage slot + // of default ERC20 contract and to then to 64 bytes. + let slot_index = H256::from_low_u64_be(51); + let mut bytes = [0_u8; 64]; + bytes[..32].copy_from_slice(address_h256.as_bytes()); + bytes[32..].copy_from_slice(slot_index.as_bytes()); + let hash = H256(keccak256(&bytes)); metrics::histogram!( "server.compute_storage_key_for_erc20_balance_latency", started_at.elapsed() ); - *hash + hash } /// Create a storage key to access the balance from supported token contract balances @@ -94,16 +73,16 @@ pub fn storage_key_for_eth_balance(address: &Address) -> StorageKey { /// Pre-calculated the address of the to-be-deployed contract (via CREATE, not CREATE2). pub fn deployed_address_create(sender: Address, deploy_nonce: U256) -> Address { - let prefix_bytes = "zksyncCreate".as_bytes().keccak256(); + let prefix_bytes = keccak256("zksyncCreate".as_bytes()); let address_bytes = address_to_h256(&sender); let nonce_bytes = u256_to_h256(deploy_nonce); - let mut bytes = vec![]; - bytes.extend_from_slice(&prefix_bytes); - bytes.extend_from_slice(address_bytes.as_bytes()); - bytes.extend_from_slice(nonce_bytes.as_bytes()); + let mut bytes = [0u8; 96]; + bytes[..32].copy_from_slice(&prefix_bytes); + bytes[32..64].copy_from_slice(address_bytes.as_bytes()); + bytes[64..].copy_from_slice(nonce_bytes.as_bytes()); - Address::from_slice(&bytes.keccak256()[12..]) + Address::from_slice(&keccak256(&bytes)[12..]) } #[cfg(test)] diff --git a/core/lib/types/src/vk_transform.rs b/core/lib/types/src/vk_transform.rs new file mode 100644 index 000000000000..dfa022fb7c1b --- /dev/null +++ b/core/lib/types/src/vk_transform.rs @@ -0,0 +1,100 @@ +use crate::{ethabi::Token, H256}; +use std::str::FromStr; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{ + bn256::{Bn256, Fq, Fr, G1Affine}, + plonk::better_better_cs::setup::VerificationKey, + CurveAffine, PrimeField, + }, + ff::to_hex, + witness::{ + oracle::VmWitnessOracle, + recursive_aggregation::{compute_vk_encoding_and_committment, erase_vk_type}, + }, +}; + +/// Calculates commitment for vk from L1 verifier contract. +pub fn l1_vk_commitment(token: Token) -> H256 { + let vk = vk_from_token(token); + generate_vk_commitment(vk) +} + +pub fn generate_vk_commitment( + vk: VerificationKey>>, +) -> H256 { + let (_, scheduler_vk_commitment) = compute_vk_encoding_and_committment(erase_vk_type(vk)); + let scheduler_commitment_hex = format!("0x{}", to_hex(&scheduler_vk_commitment)); + H256::from_str(&scheduler_commitment_hex).expect("invalid scheduler commitment") +} + +fn vk_from_token( + vk_token: Token, +) -> VerificationKey>> { + let tokens = unwrap_tuple(vk_token); + + // Sets only fields of `VerificationKey` struct that are needed for computing commitment. + let mut vk = VerificationKey::empty(); + vk.n = tokens[0].clone().into_uint().unwrap().as_usize(); + vk.num_inputs = tokens[1].clone().into_uint().unwrap().as_usize(); + vk.gate_selectors_commitments = tokens[3] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g1_affine_from_token) + .collect(); + vk.gate_setup_commitments = tokens[4] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g1_affine_from_token) + .collect(); + vk.permutation_commitments = tokens[5] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g1_affine_from_token) + .collect(); + vk.lookup_selector_commitment = Some(g1_affine_from_token(tokens[6].clone())); + vk.lookup_tables_commitments = tokens[7] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g1_affine_from_token) + .collect(); + vk.lookup_table_type_commitment = Some(g1_affine_from_token(tokens[8].clone())); + vk.non_residues = tokens[9] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(fr_from_token) + .collect(); + + vk +} + +fn g1_affine_from_token(token: Token) -> G1Affine { + let tokens = unwrap_tuple(token); + G1Affine::from_xy_unchecked( + Fq::from_str(&tokens[0].clone().into_uint().unwrap().to_string()).unwrap(), + Fq::from_str(&tokens[1].clone().into_uint().unwrap().to_string()).unwrap(), + ) +} + +fn fr_from_token(token: Token) -> Fr { + let tokens = unwrap_tuple(token); + Fr::from_str(&tokens[0].clone().into_uint().unwrap().to_string()).unwrap() +} + +fn unwrap_tuple(token: Token) -> Vec { + if let Token::Tuple(tokens) = token { + tokens + } else { + panic!("Tuple was expected, got: {}", token); + } +} diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index 362653e5ac8e..d846d7375692 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -15,7 +15,7 @@ vlog = { path = "../../lib/vlog", version = "1.0" } zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.3"} num = { version = "0.3.1", features = ["serde"] } -bigdecimal = { version = "=0.2.0", features = ["serde"]} +bigdecimal = { version = "0.2.2", features = ["serde"]} serde = { version = "1.0", features = ["derive"] } tokio = { version = "1", features = ["time"] } anyhow = "1.0" diff --git a/core/lib/utils/src/convert.rs b/core/lib/utils/src/convert.rs index 3cd3fa71974e..1281812bbb4a 100644 --- a/core/lib/utils/src/convert.rs +++ b/core/lib/utils/src/convert.rs @@ -109,16 +109,8 @@ pub fn le_chunks_to_words(chunks: Vec<[u8; 32]>) -> Vec { .collect() } -pub fn be_chunks_to_words(chunks: Vec<[u8; 32]>) -> Vec { - chunks - .into_iter() - .map(|el| U256::from_big_endian(&el)) - .collect() -} - -pub fn bytes_to_le_words(vec: Vec) -> Vec { - ensure_chunkable(&vec); - vec.chunks(32).map(U256::from_little_endian).collect() +pub fn be_chunks_to_h256_words(chunks: Vec<[u8; 32]>) -> Vec { + chunks.into_iter().map(|el| H256::from_slice(&el)).collect() } pub fn bytes_to_be_words(vec: Vec) -> Vec { diff --git a/core/lib/vm/Cargo.toml b/core/lib/vm/Cargo.toml index 9515d01d6fb0..275b1237dba6 100644 --- a/core/lib/vm/Cargo.toml +++ b/core/lib/vm/Cargo.toml @@ -11,13 +11,12 @@ categories = ["cryptography"] [dependencies] zkevm-assembly = { git = "https://github.com/matter-labs/era-zkEVM-assembly.git", branch = "v1.3.2" } -zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.3" } +zk_evm = { git = "https://github.com/matter-labs/erazk_evm.git", branch = "v1.3.3" } zksync_config = { path = "../config", version = "1.0" } zksync_types = { path = "../types", version = "1.0" } zksync_utils = { path = "../utils", version = "1.0" } -zksync_state = {path = "../state", version = "1.0" } -zksync_eth_signer = {path = "../eth_signer", version = "1.0"} +zksync_state = { path = "../state", version = "1.0" } zksync_contracts = { path = "../contracts" } vlog = { path = "../vlog", version = "1.0" } @@ -26,9 +25,10 @@ anyhow = "1.0" hex = "0.4" itertools = "0.10" metrics = "0.20" -ethabi = "16.0.0" +ethabi = "18.0.0" once_cell = "1.7" thiserror = "1.0" [dev-dependencies] -tokio = { version = "1", features = ["time"] } \ No newline at end of file +zksync_eth_signer = { path = "../eth_signer", version = "1.0" } +tokio = { version = "1", features = ["time"] } diff --git a/core/lib/vm/src/bootloader_state.rs b/core/lib/vm/src/bootloader_state.rs index 8aa81b289da8..2ecb845dfa64 100644 --- a/core/lib/vm/src/bootloader_state.rs +++ b/core/lib/vm/src/bootloader_state.rs @@ -69,7 +69,6 @@ impl BootloaderState { /// Returns the size of the transaction with given index. /// Panics if there is no such transaction. - /// Use it after #[allow(dead_code)] pub(crate) fn get_tx_size(&self, tx_index: usize) -> usize { self.tx_sizes[tx_index] diff --git a/core/lib/vm/src/events.rs b/core/lib/vm/src/events.rs index d9a2a10406bc..0d11d9102ea2 100644 --- a/core/lib/vm/src/events.rs +++ b/core/lib/vm/src/events.rs @@ -1,6 +1,6 @@ use zk_evm::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_words, h256_to_account_address, u256_to_h256}; +use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; #[derive(Clone)] pub struct SolidityLikeEvent { @@ -16,10 +16,7 @@ impl SolidityLikeEvent { VmEvent { location: (block_number, self.tx_number_in_block as u32), address: self.address, - indexed_topics: be_chunks_to_words(self.topics) - .into_iter() - .map(u256_to_h256) - .collect(), + indexed_topics: be_chunks_to_h256_words(self.topics), value: self.data, } } diff --git a/core/lib/vm/src/history_recorder.rs b/core/lib/vm/src/history_recorder.rs index f6fa0c6fb756..279e91fae568 100644 --- a/core/lib/vm/src/history_recorder.rs +++ b/core/lib/vm/src/history_recorder.rs @@ -1,8 +1,4 @@ -use std::{ - collections::HashMap, - fmt::Debug, - hash::{BuildHasherDefault, Hash, Hasher}, -}; +use std::{collections::HashMap, fmt::Debug, hash::Hash}; use zk_evm::{ aux_structures::Timestamp, @@ -51,7 +47,7 @@ pub trait HistoryMode: private::Sealed + Debug + Clone + Default { fn clone_history(history: &Self::History) -> Self::History where T::HistoryRecord: Clone; - fn mutate_history)>( + fn mutate_history)>( recorder: &mut HistoryRecorder, f: F, ); @@ -92,11 +88,11 @@ impl HistoryMode for HistoryEnabled { { history.clone() } - fn mutate_history)>( + fn mutate_history)>( recorder: &mut HistoryRecorder, f: F, ) { - f(&mut recorder.history) + f(&mut recorder.inner, &mut recorder.history) } fn borrow_history) -> R, R>( recorder: &HistoryRecorder, @@ -111,7 +107,7 @@ impl HistoryMode for HistoryDisabled { type History = (); fn clone_history(_: &Self::History) -> Self::History {} - fn mutate_history)>( + fn mutate_history)>( _: &mut HistoryRecorder, _: F, ) { @@ -186,7 +182,7 @@ impl HistoryRecorder { } /// If history exists, modify it using `f`. - pub fn mutate_history)>(&mut self, f: F) { + pub fn mutate_history)>(&mut self, f: F) { H::mutate_history(self, f); } @@ -202,7 +198,7 @@ impl HistoryRecorder { ) -> T::ReturnValue { let (reversed_item, return_value) = self.inner.apply_historic_record(item); - self.mutate_history(|history| { + self.mutate_history(|_, history| { let last_recorded_timestamp = history.last().map(|(t, _)| *t).unwrap_or(Timestamp(0)); let timestamp = normalize_timestamp(timestamp); assert!( @@ -218,7 +214,7 @@ impl HistoryRecorder { /// Deletes all the history for its component, making /// its current state irreversible pub fn delete_history(&mut self) { - self.mutate_history(|h| h.clear()) + self.mutate_history(|_, h| h.clear()) } } @@ -512,31 +508,69 @@ impl AppDataFrameManagerWithHistory { } } -#[derive(Default)] -pub struct NoopHasher(u64); +const PRIMITIVE_VALUE_EMPTY: PrimitiveValue = PrimitiveValue::empty(); +const PAGE_SUBDIVISION_LEN: usize = 64; + +#[derive(Debug, Default, Clone)] +struct MemoryPage { + root: Vec>>, +} -impl Hasher for NoopHasher { - fn write_usize(&mut self, value: usize) { - self.0 = value as u64; +impl MemoryPage { + fn get(&self, slot: usize) -> &PrimitiveValue { + self.root + .get(slot / PAGE_SUBDIVISION_LEN) + .and_then(|inner| inner.as_ref()) + .map(|leaf| &leaf[slot % PAGE_SUBDIVISION_LEN]) + .unwrap_or(&PRIMITIVE_VALUE_EMPTY) } + fn set(&mut self, slot: usize, value: PrimitiveValue) -> PrimitiveValue { + let root_index = slot / PAGE_SUBDIVISION_LEN; + let leaf_index = slot % PAGE_SUBDIVISION_LEN; + + if self.root.len() <= root_index { + self.root.resize_with(root_index + 1, || None); + } + let node = &mut self.root[root_index]; - fn write(&mut self, _bytes: &[u8]) { - unreachable!("internal hasher only handles usize type"); + if let Some(leaf) = node { + let old = leaf[leaf_index]; + leaf[leaf_index] = value; + old + } else { + let mut leaf = [PrimitiveValue::empty(); PAGE_SUBDIVISION_LEN]; + leaf[leaf_index] = value; + self.root[root_index] = Some(Box::new(leaf)); + PrimitiveValue::empty() + } + } + + fn get_size(&self) -> usize { + self.root.iter().filter_map(|x| x.as_ref()).count() + * PAGE_SUBDIVISION_LEN + * std::mem::size_of::() } +} - fn finish(&self) -> u64 { - self.0 +impl PartialEq for MemoryPage { + fn eq(&self, other: &Self) -> bool { + for slot in 0..self.root.len().max(other.root.len()) * PAGE_SUBDIVISION_LEN { + if self.get(slot) != other.get(slot) { + return false; + } + } + true } } #[derive(Debug, Default, Clone)] pub struct MemoryWrapper { - pub memory: Vec>>, + memory: Vec, } impl PartialEq for MemoryWrapper { fn eq(&self, other: &Self) -> bool { - let empty_page = Default::default(); + let empty_page = MemoryPage::default(); let empty_pages = std::iter::repeat(&empty_page); self.memory .iter() @@ -559,7 +593,7 @@ impl MemoryWrapper { if self.memory.len() <= page { // We don't need to record such events in history // because all these vectors will be empty - self.memory.resize_with(page + 1, HashMap::default); + self.memory.resize_with(page + 1, MemoryPage::default); } } @@ -571,26 +605,23 @@ impl MemoryWrapper { if let Some(page) = self.memory.get(page_number as usize) { let mut result = vec![]; for i in range { - if let Some(word) = page.get(&(i as usize)) { - result.push(*word); - } else { - result.push(PrimitiveValue::empty()); - } + result.push(*page.get(i as usize)); } - result } else { vec![PrimitiveValue::empty(); range.len()] } } - const EMPTY: PrimitiveValue = PrimitiveValue::empty(); - pub fn read_slot(&self, page: usize, slot: usize) -> &PrimitiveValue { self.memory .get(page) - .and_then(|page| page.get(&slot)) - .unwrap_or(&Self::EMPTY) + .map(|page| page.get(slot)) + .unwrap_or(&PRIMITIVE_VALUE_EMPTY) + } + + pub fn get_size(&self) -> usize { + self.memory.iter().map(|page| page.get_size()).sum() } } @@ -610,20 +641,15 @@ impl WithHistory for MemoryWrapper { self.ensure_page_exists(page); let page_handle = self.memory.get_mut(page).unwrap(); - let prev_value = if set_value == PrimitiveValue::empty() { - page_handle.remove(&slot) - } else { - page_handle.insert(slot, set_value) - } - .unwrap_or(PrimitiveValue::empty()); + let prev_value = page_handle.set(slot, set_value); - let reserved_item = MemoryHistoryRecord { + let undo = MemoryHistoryRecord { page, slot, set_value: prev_value, }; - (reserved_item, prev_value) + (undo, prev_value) } } @@ -646,15 +672,27 @@ impl HistoryRecorder { } pub fn clear_page(&mut self, page: usize, timestamp: Timestamp) { - let slots_to_clear: Vec<_> = match self.inner.memory.get(page) { - None => return, - Some(x) => x.keys().copied().collect(), - }; - - // We manually clear the page to preserve correct history - for slot in slots_to_clear { - self.write_to_memory(page, slot, PrimitiveValue::empty(), timestamp); - } + self.mutate_history(|inner, history| { + if let Some(page_handle) = inner.memory.get(page) { + for (i, x) in page_handle.root.iter().enumerate() { + if let Some(slots) = x { + for (j, value) in slots.iter().enumerate() { + if *value != PrimitiveValue::empty() { + history.push(( + timestamp, + MemoryHistoryRecord { + page, + slot: PAGE_SUBDIVISION_LEN * i + j, + set_value: *value, + }, + )) + } + } + } + } + inner.memory[page] = MemoryPage::default(); + } + }); } } diff --git a/core/lib/vm/src/lib.rs b/core/lib/vm/src/lib.rs index 336bca7eb114..c648fcacce1a 100644 --- a/core/lib/vm/src/lib.rs +++ b/core/lib/vm/src/lib.rs @@ -1,7 +1,7 @@ #![allow(clippy::derive_partial_eq_without_eq)] mod bootloader_state; -mod errors; +pub mod errors; pub mod event_sink; mod events; mod history_recorder; diff --git a/core/lib/vm/src/memory.rs b/core/lib/vm/src/memory.rs index 7c39027852aa..a54fa124985f 100644 --- a/core/lib/vm/src/memory.rs +++ b/core/lib/vm/src/memory.rs @@ -11,12 +11,23 @@ use crate::history_recorder::{ use crate::oracles::OracleWithHistory; use crate::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; -#[derive(Debug, Clone, PartialEq, Default)] +#[derive(Debug, Clone, PartialEq)] pub struct SimpleMemory { pub memory: MemoryWithHistory, pub observable_pages: IntFrameManagerWithHistory, } +impl Default for SimpleMemory { + fn default() -> Self { + let mut memory: MemoryWithHistory = Default::default(); + memory.mutate_history(|_, h| h.reserve(607)); + Self { + memory, + observable_pages: Default::default(), + } + } +} + impl OracleWithHistory for SimpleMemory { fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { self.memory.rollback_to_timestamp(timestamp); @@ -107,13 +118,7 @@ impl SimpleMemory { pub fn get_size(&self) -> usize { // Hashmap memory overhead is neglected. - let memory_size = self - .memory - .inner() - .memory - .iter() - .map(|page| page.len() * std::mem::size_of::<(usize, PrimitiveValue)>()) - .sum::(); + let memory_size = self.memory.inner().get_size(); let observable_pages_size = self.observable_pages.inner().get_size(); memory_size + observable_pages_size diff --git a/core/lib/vm/src/refunds.rs b/core/lib/vm/src/refunds.rs index a64cceea4b0a..f064bf0fff7d 100644 --- a/core/lib/vm/src/refunds.rs +++ b/core/lib/vm/src/refunds.rs @@ -73,6 +73,51 @@ impl VmInstance<'_, H> { _gas_spent_on_pubdata: u32, ) -> u32 { 0 + + // let pubdata_published = self.pubdata_published(from_timestamp); + // + // let total_gas_spent = gas_remaining_before - self.gas_remaining(); + // let gas_spent_on_computation = total_gas_spent.checked_sub(gas_spent_on_pubdata).unwrap_or_else(|| { + // vlog::error!("Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", gas_spent_on_pubdata, total_gas_spent); + // 0 + // }); + // let (_, l2_to_l1_logs) = self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + // let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + // + // let actual_overhead = Self::actual_overhead_gas( + // self.state.local_state.current_ergs_per_pubdata_byte, + // self.bootloader_state.get_tx_size(current_tx_index), + // pubdata_published, + // gas_spent_on_computation, + // self.state + // .decommittment_processor + // .get_number_of_decommitment_requests_after_timestamp(from_timestamp), + // l2_to_l1_logs.len(), + // ); + // + // let predefined_overhead = self + // .state + // .memory + // .read_slot( + // BOOTLOADER_HEAP_PAGE as usize, + // TX_OVERHEAD_OFFSET + current_tx_index, + // ) + // .value + // .as_u32(); + // + // if actual_overhead <= predefined_overhead { + // predefined_overhead - actual_overhead + // } else { + // // This should never happen but potential mistakes at the early stage should not bring the server down. + // // + // // to make debugging easier. + // vlog::error!( + // "Actual overhead is greater than predefined one, actual: {}, predefined: {}", + // actual_overhead, + // predefined_overhead + // ); + // 0 + // } } #[allow(dead_code)] @@ -85,6 +130,61 @@ impl VmInstance<'_, H> { _l2_l1_logs: usize, ) -> u32 { 0 + + // let overhead_for_block_gas = U256::from(crate::transaction_data::block_overhead_gas( + // gas_per_pubdata_byte_limit, + // )); + + // let encoded_len = U256::from(encoded_len); + // let pubdata_published = U256::from(pubdata_published); + // let gas_spent_on_computation = U256::from(gas_spent_on_computation); + // let number_of_decommitment_requests = U256::from(number_of_decommitment_requests); + // let l2_l1_logs = U256::from(l2_l1_logs); + + // let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()); + + // let overhead_for_length = ceil_div_u256( + // encoded_len * overhead_for_block_gas, + // BOOTLOADER_TX_ENCODING_SPACE.into(), + // ); + + // let actual_overhead_for_pubdata = ceil_div_u256( + // pubdata_published * overhead_for_block_gas, + // MAX_PUBDATA_PER_BLOCK.into(), + // ); + + // let actual_gas_limit_overhead = ceil_div_u256( + // gas_spent_on_computation * overhead_for_block_gas, + // MAX_BLOCK_MULTIINSTANCE_GAS_LIMIT.into(), + // ); + + // let code_decommitter_sorter_circuit_overhead = ceil_div_u256( + // number_of_decommitment_requests * overhead_for_block_gas, + // GEOMETRY_CONFIG.limit_for_code_decommitter_sorter.into(), + // ); + + // let l1_l2_logs_overhead = ceil_div_u256( + // l2_l1_logs * overhead_for_block_gas, + // std::cmp::min( + // GEOMETRY_CONFIG.limit_for_l1_messages_merklizer, + // GEOMETRY_CONFIG.limit_for_l1_messages_pudata_hasher, + // ) + // .into(), + // ); + + // let overhead = vec![ + // tx_slot_overhead, + // overhead_for_length, + // actual_overhead_for_pubdata, + // actual_gas_limit_overhead, + // code_decommitter_sorter_circuit_overhead, + // l1_l2_logs_overhead, + // ] + // .into_iter() + // .max() + // .unwrap(); + + // overhead.as_u32() } /// Returns the given transactions' gas limit - by reading it directly from the VM memory. diff --git a/core/lib/vm/src/test_utils.rs b/core/lib/vm/src/test_utils.rs index bd98537522ac..34fa3b69aabe 100644 --- a/core/lib/vm/src/test_utils.rs +++ b/core/lib/vm/src/test_utils.rs @@ -31,6 +31,7 @@ use crate::{ AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode, HistoryRecorder, }, memory::SimpleMemory, + vm::ZkSyncVmState, VmInstance, }; @@ -329,3 +330,18 @@ pub fn get_create_zksync_address(sender_address: Address, sender_nonce: Nonce) - h256_to_account_address(&H256(hash)) } + +pub fn verify_required_storage( + state: &ZkSyncVmState<'_, H>, + required_values: Vec<(H256, StorageKey)>, +) { + for (required_value, key) in required_values { + let current_value = state.storage.storage.read_from_storage(&key); + + assert_eq!( + u256_to_h256(current_value), + required_value, + "Invalid value at key {key:?}" + ); + } +} diff --git a/core/lib/vm/src/tests/bootloader.rs b/core/lib/vm/src/tests/bootloader.rs index d3398e19c9c9..38add8f22272 100644 --- a/core/lib/vm/src/tests/bootloader.rs +++ b/core/lib/vm/src/tests/bootloader.rs @@ -2,7 +2,6 @@ //! Tests for the bootloader //! The description for each of the tests can be found in the corresponding `.yul` file. //! -use ethabi::Contract; use itertools::Itertools; use std::{ collections::{HashMap, HashSet}, @@ -15,8 +14,12 @@ use crate::{ history_recorder::HistoryMode, oracles::tracer::{StorageInvocationTracer, TransactionResultTracer}, test_utils::{ - get_create_execute, get_create_zksync_address, get_deploy_tx, get_error_tx, - mock_loadnext_test_call, + get_create_zksync_address, get_deploy_tx, get_error_tx, mock_loadnext_test_call, + verify_required_storage, + }, + tests::utils::{ + get_l1_deploy_tx, get_l1_execute_test_contract_tx_with_sender, read_error_contract, + read_long_return_data_contract, read_test_contract, }, transaction_data::TransactionData, utils::{ @@ -38,10 +41,6 @@ use crate::{ use zk_evm::{ aux_structures::Timestamp, block_properties::BlockProperties, zkevm_opcode_defs::FarCallOpcode, }; -use zksync_contracts::{ - get_loadnext_contract, load_contract, read_bytecode, SystemContractCode, - PLAYGROUND_BLOCK_BOOTLOADER_CODE, -}; use zksync_state::{InMemoryStorage, ReadStorage, StoragePtr, StorageView, WriteStorage}; use zksync_types::{ block::DeployedContract, @@ -61,9 +60,9 @@ use zksync_types::{ }, vm_trace::{Call, CallType}, AccountTreeId, Address, Eip712Domain, Execute, ExecuteTransactionCommon, L1TxCommonData, - L2ChainId, Nonce, PackedEthSignature, StorageKey, Transaction, BOOTLOADER_ADDRESS, H160, H256, - L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, SYSTEM_CONTEXT_ADDRESS, - U256, + L2ChainId, Nonce, PackedEthSignature, Transaction, BOOTLOADER_ADDRESS, H160, H256, + L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, SYSTEM_CONTEXT_ADDRESS, U256, }; use zksync_utils::{ bytecode::CompressedBytecodeInfo, @@ -71,6 +70,11 @@ use zksync_utils::{ {bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}, }; +use zksync_contracts::{ + get_loadnext_contract, load_contract, SystemContractCode, PLAYGROUND_BLOCK_BOOTLOADER_CODE, +}; + +use super::utils::{read_many_owners_custom_account_contract, read_nonce_holder_tester}; /// Helper struct for tests, that takes care of setting the database and provides some functions to get and set balances. /// Example use: ///```ignore @@ -318,21 +322,6 @@ fn test_bootloader_out_of_gas() { assert_eq!(res.revert_reason, Some(TxRevertReason::BootloaderOutOfGas)); } -fn verify_required_storage( - state: &ZkSyncVmState<'_, H>, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - fn verify_required_memory( state: &ZkSyncVmState<'_, H>, required_values: Vec<(U256, u32, u32)>, @@ -1674,8 +1663,6 @@ pub fn get_l1_tx_with_custom_bytecode_hash( (bytes_to_be_words(tx_bytes), predefined_overhead) } -const L1_TEST_GAS_PER_PUBDATA_BYTE: u32 = 800; - pub fn get_l1_execute_test_contract_tx(deployed_address: Address, with_panic: bool) -> Transaction { let sender = H160::random(); get_l1_execute_test_contract_tx_with_sender( @@ -1702,7 +1689,7 @@ pub fn get_l1_tx_with_large_output(sender: Address, deployed_address: Address) - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { sender, gas_limit: U256::from(100000000u32), - gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), ..Default::default() }), execute: Execute { @@ -1715,96 +1702,6 @@ pub fn get_l1_tx_with_large_output(sender: Address, deployed_address: Address) - } } -pub fn get_l1_execute_test_contract_tx_with_sender( - sender: Address, - deployed_address: Address, - with_panic: bool, - value: U256, - payable: bool, -) -> Transaction { - let execute = execute_test_contract(deployed_address, with_panic, value, payable); - - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), - to_mint: value, - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - } -} - -pub fn get_l1_deploy_tx(code: &[u8], calldata: &[u8]) -> Transaction { - let execute = get_create_execute(code, calldata); - - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - } -} - -fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -fn read_long_return_data_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json") -} - -fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -fn execute_test_contract( - address: Address, - with_panic: bool, - value: U256, - payable: bool, -) -> Execute { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", - ); - - let function = if payable { - test_contract - .function("incrementWithRevertPayable") - .unwrap() - } else { - test_contract.function("incrementWithRevert").unwrap() - }; - - let calldata = function - .encode_input(&[Token::Uint(U256::from(1u8)), Token::Bool(with_panic)]) - .expect("failed to encode parameters"); - - Execute { - contract_address: address, - calldata, - value, - factory_deps: None, - } -} - #[test] fn test_call_tracer() { let mut vm_test_env = VmTestEnv::default(); @@ -2143,9 +2040,9 @@ async fn test_require_eip712() { }; let txn = pk_signer.sign_transaction(raw_tx).await.unwrap(); - let (txn_request, hash) = TransactionRequest::from_bytes(&txn, chain_id, 100000).unwrap(); + let (txn_request, hash) = TransactionRequest::from_bytes(&txn, chain_id).unwrap(); - let mut l2_tx: L2Tx = txn_request.try_into().unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(txn_request, 100000).unwrap(); l2_tx.set_input(txn, hash); let transaction: Transaction = l2_tx.try_into().unwrap(); let transaction_data: TransactionData = transaction.try_into().unwrap(); @@ -2174,10 +2071,9 @@ async fn test_require_eip712() { let aa_txn = pk_signer.sign_transaction(aa_raw_tx).await.unwrap(); - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&aa_txn, 270, 100000).unwrap(); + let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&aa_txn, 270).unwrap(); - let mut l2_tx: L2Tx = aa_txn_request.try_into().unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); l2_tx.set_input(aa_txn, aa_hash); // Pretend that operator is malicious and sets the initiator to the AA account. l2_tx.common_data.initiator_address = account_address; @@ -2226,9 +2122,9 @@ async fn test_require_eip712() { let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(chain_id)); let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, chain_id, 100000).unwrap(); + TransactionRequest::from_bytes(&encoded_tx, chain_id).unwrap(); - let mut l2_tx: L2Tx = aa_txn_request.try_into().unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); let transaction: Transaction = l2_tx.try_into().unwrap(); diff --git a/core/lib/vm/src/tests/mod.rs b/core/lib/vm/src/tests/mod.rs index 3900135abeaa..a8c040fbdd65 100644 --- a/core/lib/vm/src/tests/mod.rs +++ b/core/lib/vm/src/tests/mod.rs @@ -1 +1,4 @@ mod bootloader; +mod upgrades; + +mod utils; diff --git a/core/lib/vm/src/tests/upgrades.rs b/core/lib/vm/src/tests/upgrades.rs new file mode 100644 index 000000000000..2abb154eb15c --- /dev/null +++ b/core/lib/vm/src/tests/upgrades.rs @@ -0,0 +1,377 @@ +use crate::{ + test_utils::verify_required_storage, + tests::utils::get_l1_deploy_tx, + utils::{create_test_block_params, BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT}, + vm::tx_has_failed, + vm_with_bootloader::{init_vm_inner, push_transaction_to_bootloader_memory}, + vm_with_bootloader::{BlockContextMode, TxExecutionMode}, + HistoryEnabled, OracleTools, TxRevertReason, +}; + +use zk_evm::aux_structures::Timestamp; + +use zksync_types::{ + ethabi::Contract, + tx::tx_execution_info::TxExecutionStatus, + Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, + {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, + {get_code_key, get_known_code_key, H160}, +}; + +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; + +use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; +use zksync_state::WriteStorage; + +use crate::tests::utils::create_storage_view; +use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; + +use super::utils::read_test_contract; + +/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: +/// - This transaction must be the only one in block +/// - If present, this transaction must be the first one in block +#[test] +fn test_protocol_upgrade_is_first() { + let mut storage_view = create_storage_view(); + let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); + let (block_context, block_properties) = create_test_block_params(); + + let bytecode_hash = hash_bytecode(&read_test_contract()); + + // Here we just use some random transaction of protocol upgrade type: + let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecodehash to + address: H160::random(), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + let normal_l1_transaction = get_l1_deploy_tx(&read_test_contract(), &[]); + + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context.into(), Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + + let expected_error = TxRevertReason::UnexpectedVMBehavior( + "Assertion error: Protocol upgrade tx not first".to_string(), + ); + + // Test 1: there must be only one system transaction in block + vm.save_current_vm_as_snapshot(); + + push_transaction_to_bootloader_memory( + &mut vm, + &protocol_upgrade_transaction, + TxExecutionMode::VerifyExecute, + None, + ); + push_transaction_to_bootloader_memory( + &mut vm, + &normal_l1_transaction, + TxExecutionMode::VerifyExecute, + None, + ); + push_transaction_to_bootloader_memory( + &mut vm, + &protocol_upgrade_transaction, + TxExecutionMode::VerifyExecute, + None, + ); + + vm.execute_next_tx(u32::MAX, false).unwrap(); + vm.execute_next_tx(u32::MAX, false).unwrap(); + let res = vm.execute_next_tx(u32::MAX, false); + assert_eq!(res, Err(expected_error.clone())); + + // Test 2: the protocol upgrade tx must be the first one in block + vm.rollback_to_latest_snapshot(); + + push_transaction_to_bootloader_memory( + &mut vm, + &normal_l1_transaction, + TxExecutionMode::VerifyExecute, + None, + ); + push_transaction_to_bootloader_memory( + &mut vm, + &protocol_upgrade_transaction, + TxExecutionMode::VerifyExecute, + None, + ); + + vm.execute_next_tx(u32::MAX, false).unwrap(); + let res = vm.execute_next_tx(u32::MAX, false); + assert_eq!(res, Err(expected_error)); +} + +/// In this test we try to test how force deployments could be done via protocol upgrade transactions. +#[test] +fn test_force_deploy_upgrade() { + let mut storage_view = create_storage_view(); + + let bytecode_hash = hash_bytecode(&read_test_contract()); + + let known_code_key = get_known_code_key(&bytecode_hash); + // It is generally expected that all the keys will be set as known prior to the protocol upgrade. + storage_view.set_value(known_code_key, u256_to_h256(1.into())); + + let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); + let (block_context, block_properties) = create_test_block_params(); + + let address_to_deploy = H160::random(); + // Here we just use some random transaction of protocol upgrade type: + let transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecodehash to + address: address_to_deploy, + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context.into(), Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + push_transaction_to_bootloader_memory( + &mut vm, + &transaction, + TxExecutionMode::VerifyExecute, + None, + ); + let result = vm.execute_next_tx(u32::MAX, false).unwrap(); + assert_eq!( + result.status, + TxExecutionStatus::Success, + "The force upgrade was not successful" + ); + assert!(!tx_has_failed(&vm.state, 0)); + + let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; + + // Verify that the bytecode has been set correctly + verify_required_storage(&vm.state, expected_slots); +} + +/// Here we show how the work with the complex upgrader could be done +#[test] +fn test_complex_upgrader() { + let mut storage_view = create_storage_view(); + + let bytecode_hash = hash_bytecode(&read_complex_upgrade()); + let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); + + // Let's assume that the bytecode for the implementation of the complex upgrade + // is already deployed in some address in userspace + let upgrade_impl = H160::random(); + let account_code_key = get_code_key(&upgrade_impl); + + storage_view.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage_view.set_value( + get_known_code_key(&msg_sender_test_hash), + u256_to_h256(1.into()), + ); + storage_view.set_value(account_code_key, bytecode_hash); + + let mut oracle_tools: OracleTools = + OracleTools::new(&mut storage_view, HistoryEnabled); + oracle_tools.decommittment_processor.populate( + vec![ + ( + h256_to_u256(bytecode_hash), + bytes_to_be_words(read_complex_upgrade()), + ), + ( + h256_to_u256(msg_sender_test_hash), + bytes_to_be_words(read_msg_sender_test()), + ), + ], + Timestamp(0), + ); + + let (block_context, block_properties) = create_test_block_params(); + + let address_to_deploy1 = H160::random(); + let address_to_deploy2 = H160::random(); + + let transaction = get_complex_upgrade_tx( + upgrade_impl, + address_to_deploy1, + address_to_deploy2, + bytecode_hash, + ); + + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context.into(), Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + push_transaction_to_bootloader_memory( + &mut vm, + &transaction, + TxExecutionMode::VerifyExecute, + None, + ); + let result = vm.execute_next_tx(u32::MAX, false).unwrap(); + assert_eq!( + result.status, + TxExecutionStatus::Success, + "The force upgrade was not successful" + ); + assert!(!tx_has_failed(&vm.state, 0)); + + let expected_slots = vec![ + (bytecode_hash, get_code_key(&address_to_deploy1)), + (bytecode_hash, get_code_key(&address_to_deploy2)), + ]; + + // Verify that the bytecode has been set correctly + verify_required_storage(&vm.state, expected_slots); +} + +#[derive(Debug, Clone)] +struct ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash: H256, + // The address on which to deploy the bytecodehash to + address: Address, + // Whether to run the constructor on the force deployment + call_constructor: bool, + // The value with which to initialize a contract + value: U256, + // The constructor calldata + input: Vec, +} + +fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { + let deployer = deployer_contract(); + let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); + + let encoded_deployments: Vec<_> = deployment + .iter() + .map(|deployment| { + Token::Tuple(vec![ + Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), + Token::Address(deployment.address), + Token::Bool(deployment.call_constructor), + Token::Uint(deployment.value), + Token::Bytes(deployment.input.clone()), + ]) + }) + .collect(); + + let params = [Token::Array(encoded_deployments)]; + + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let execute = Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata, + factory_deps: None, + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + } +} + +// Returns the transaction that performs a complex protocol upgrade. +// The first param is the address of the implementation of the complex upgrade +// in user-space, while the next 3 params are params of the implenentaiton itself +// For the explanatation for the parameters, please refer to: +// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol +fn get_complex_upgrade_tx( + implementation_address: Address, + address1: Address, + address2: Address, + bytecode_hash: H256, +) -> Transaction { + let impl_contract = get_complex_upgrade_abi(); + let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); + let impl_calldata = impl_function + .encode_input(&[ + Token::Address(address1), + Token::Address(address2), + Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), + ]) + .unwrap(); + + let complex_upgrader = get_complex_upgrader_abi(); + let upgrade_function = complex_upgrader.function("upgrade").unwrap(); + let complex_upgrader_calldata = upgrade_function + .encode_input(&[ + Token::Address(implementation_address), + Token::Bytes(impl_calldata), + ]) + .unwrap(); + + let execute = Execute { + contract_address: COMPLEX_UPGRADER_ADDRESS, + calldata: complex_upgrader_calldata, + factory_deps: None, + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + } +} + +fn read_complex_upgrade() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") +} + +fn read_msg_sender_test() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") +} + +fn get_complex_upgrade_abi() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" + ) +} + +fn get_complex_upgrader_abi() -> Contract { + load_sys_contract("ComplexUpgrader") +} diff --git a/core/lib/vm/src/tests/utils.rs b/core/lib/vm/src/tests/utils.rs new file mode 100644 index 000000000000..5b6d253bb9d3 --- /dev/null +++ b/core/lib/vm/src/tests/utils.rs @@ -0,0 +1,110 @@ +//! +//! Tests for the bootloader +//! The description for each of the tests can be found in the corresponding `.yul` file. +//! +use zksync_types::{ + ethabi::Contract, + Execute, L1TxCommonData, H160, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, + {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, U256}, +}; + +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_state::{InMemoryStorage, StorageView}; +use zksync_utils::bytecode::hash_bytecode; + +use crate::test_utils::get_create_execute; + +pub fn read_test_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +} + +pub fn read_long_return_data_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json") +} + +pub fn read_nonce_holder_tester() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +} + +pub fn read_error_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ) +} + +pub fn read_many_owners_custom_account_contract() -> (Vec, Contract) { + let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; + (read_bytecode(path), load_contract(path)) +} + +pub fn get_l1_execute_test_contract_tx_with_sender( + sender: Address, + deployed_address: Address, + with_panic: bool, + value: U256, + payable: bool, +) -> Transaction { + let execute = execute_test_contract(deployed_address, with_panic, value, payable); + + Transaction { + common_data: ExecuteTransactionCommon::L1(L1TxCommonData { + sender, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + to_mint: value, + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + } +} + +fn execute_test_contract( + address: Address, + with_panic: bool, + value: U256, + payable: bool, +) -> Execute { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", + ); + + let function = if payable { + test_contract + .function("incrementWithRevertPayable") + .unwrap() + } else { + test_contract.function("incrementWithRevert").unwrap() + }; + + let calldata = function + .encode_input(&[Token::Uint(U256::from(1u8)), Token::Bool(with_panic)]) + .expect("failed to encode parameters"); + + Execute { + contract_address: address, + calldata, + value, + factory_deps: None, + } +} + +pub fn get_l1_deploy_tx(code: &[u8], calldata: &[u8]) -> Transaction { + let execute = get_create_execute(code, calldata); + + Transaction { + common_data: ExecuteTransactionCommon::L1(L1TxCommonData { + sender: H160::random(), + gas_limit: U256::from(2000000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + } +} + +pub fn create_storage_view() -> StorageView { + let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); + StorageView::new(raw_storage) +} diff --git a/core/lib/vm/src/transaction_data.rs b/core/lib/vm/src/transaction_data.rs index cfba24db4878..77a47957580e 100644 --- a/core/lib/vm/src/transaction_data.rs +++ b/core/lib/vm/src/transaction_data.rs @@ -1,7 +1,9 @@ use zk_evm::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_types::ethabi::{encode, Address, Token}; use zksync_types::fee::encoding_len; -use zksync_types::{l2::TransactionType, ExecuteTransactionCommon, Transaction, U256}; +use zksync_types::{ + l1::is_l1_tx_type, l2::TransactionType, ExecuteTransactionCommon, Transaction, U256, +}; use zksync_types::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; use zksync_utils::{address_to_h256, ceil_div_u256}; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; @@ -10,8 +12,6 @@ use crate::vm_with_bootloader::{ BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, }; -pub(crate) const L1_TX_TYPE: u8 = 255; - // This structure represents the data that is used by // the Bootloader to describe the transaction. #[derive(Debug, Default, Clone)] @@ -82,7 +82,7 @@ impl From for TransactionData { ExecuteTransactionCommon::L1(common_data) => { let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); TransactionData { - tx_type: L1_TX_TYPE, + tx_type: common_data.tx_format() as u8, from: common_data.sender, to: execute_tx.execute.contract_address, gas_limit: common_data.gas_limit, @@ -108,6 +108,35 @@ impl From for TransactionData { reserved_dynamic: vec![], } } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: common_data.tx_format() as u8, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.upgrade_id as u16), + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: vec![], + reserved_dynamic: vec![], + } + } } } } @@ -162,7 +191,7 @@ impl TransactionData { pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { // It is enforced by the protocol that the L1 transactions always pay the exact amount of gas per pubdata // as was supplied in the transaction. - if self.tx_type == L1_TX_TYPE { + if is_l1_tx_type(self.tx_type) { self.pubdata_price_limit.as_u32() } else { block_gas_price_per_pubdata @@ -225,7 +254,7 @@ impl TransactionData { pub(crate) fn canonical_l1_tx_hash(&self) -> zksync_types::H256 { use zksync_types::web3::signing::keccak256; - if self.tx_type != L1_TX_TYPE { + if !is_l1_tx_type(self.tx_type) { panic!("Trying to get L1 tx hash for non-L1 tx"); } @@ -268,6 +297,12 @@ pub fn derive_overhead( // We use "ceil" here for formal reasons to allow easier approach for calculating the overhead in O(1) // let max_pubdata_in_tx = ceil_div_u256(gas_limit, gas_price_per_pubdata); + // The maximal potential overhead from pubdata + // let pubdata_overhead = ceil_div_u256( + // max_pubdata_in_tx * max_block_overhead, + // MAX_PUBDATA_PER_BLOCK.into(), + // ); + vec![ (coeficients.ergs_limit_overhead_coeficient * overhead_for_single_instance_circuits.as_u32() as f64) @@ -330,7 +365,7 @@ impl OverheadCoeficients { } pub fn from_tx_type(tx_type: u8) -> Self { - if tx_type == L1_TX_TYPE { + if is_l1_tx_type(tx_type) { Self::new_l1() } else { Self::new_l2() @@ -395,7 +430,7 @@ pub fn get_amortized_overhead( as u32 }; - // since the pubdata is not published. If decided to use the pubdata overhead, it needs to be updated. + // since the pubdat is not published. If decided to use the pubdata overhead, it needs to be updated. // 3. ceil(O3 * overhead_for_block_gas) >= overhead_gas // O3 = max_pubdata_in_tx / MAX_PUBDATA_PER_BLOCK = ceil(gas_limit / gas_per_pubdata_byte_limit) / MAX_PUBDATA_PER_BLOCK // >= (gas_limit / (gas_per_pubdata_byte_limit * MAX_PUBDATA_PER_BLOCK). Throwing off the `ceil`, while may provide marginally lower diff --git a/core/lib/vm/src/vm_with_bootloader.rs b/core/lib/vm/src/vm_with_bootloader.rs index e341b40390cb..bf5237e363e1 100644 --- a/core/lib/vm/src/vm_with_bootloader.rs +++ b/core/lib/vm/src/vm_with_bootloader.rs @@ -14,9 +14,9 @@ use zksync_config::constants::MAX_TXS_IN_BLOCK; use zksync_contracts::BaseSystemContracts; use zksync_types::{ - zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, BOOTLOADER_ADDRESS, - L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, - USED_BOOTLOADER_MEMORY_WORDS, + l1::is_l1_tx_type, zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, + BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, + U256, USED_BOOTLOADER_MEMORY_WORDS, }; use zksync_utils::{ address_to_u256, @@ -30,7 +30,7 @@ use itertools::Itertools; use crate::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, - transaction_data::{TransactionData, L1_TX_TYPE}, + transaction_data::TransactionData, utils::{ code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, }, @@ -455,7 +455,7 @@ pub fn push_raw_transaction_to_bootloader_memory( .collect(); let compressed_bytecodes = explicit_compressed_bytecodes.unwrap_or_else(|| { - if tx.tx_type == L1_TX_TYPE { + if is_l1_tx_type(tx.tx_type) { // L1 transactions do not need compression return vec![]; } diff --git a/core/lib/web3_decl/Cargo.toml b/core/lib/web3_decl/Cargo.toml index 1ca4b3974c6b..2be268b4c22a 100644 --- a/core/lib/web3_decl/Cargo.toml +++ b/core/lib/web3_decl/Cargo.toml @@ -15,8 +15,8 @@ serde = "1.0" serde_json = "1.0" rlp = "0.5.0" thiserror = "1.0" -bigdecimal = { version = "=0.2.0", features = ["serde"] } -jsonrpsee = { version = "0.18.2", default-features = false, features = ["macros"] } +bigdecimal = { version = "0.2.2", features = ["serde"] } +jsonrpsee = { version = "0.19.0", default-features = false, features = ["macros"] } chrono = "0.4" zksync_types = { path = "../../lib/types", version = "1.0" } diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index 68ac33c23357..f92f2a562392 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -3,7 +3,8 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; // Workspace uses use crate::types::{ - Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U256, U64, + Block, Bytes, FeeHistory, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, + U256, U64, }; use zksync_types::{ @@ -162,4 +163,12 @@ pub trait EthNamespace { #[method(name = "mining")] async fn mining(&self) -> RpcResult; + + #[method(name = "feeHistory")] + async fn fee_history( + &self, + block_count: U64, + newest_block: BlockNumber, + reward_percentiles: Vec, + ) -> RpcResult; } diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 5b008da2544f..e28342ad95aa 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -1,16 +1,19 @@ -use crate::types::Token; +use std::collections::HashMap; + use bigdecimal::BigDecimal; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use std::collections::HashMap; -use zksync_types::api::{BridgeAddresses, L2ToL1LogProof, TransactionDetails}; -use zksync_types::transaction_request::CallRequest; + use zksync_types::{ - api::U64, - explorer_api::{BlockDetails, L1BatchDetails}, + api::{ + BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, ProtocolVersion, + TransactionDetails, + }, fee::Fee, - Address, H256, U256, + transaction_request::CallRequest, + Address, L1BatchNumber, MiniblockNumber, H256, U256, U64, }; -use zksync_types::{L1BatchNumber, MiniblockNumber}; + +use crate::types::Token; #[cfg_attr( all(feature = "client", feature = "server"), @@ -98,4 +101,10 @@ pub trait ZksNamespace { #[method(name = "getL1GasPrice")] async fn get_l1_gas_price(&self) -> RpcResult; + + #[method(name = "getProtocolVersion")] + async fn get_protocol_version( + &self, + version_id: Option, + ) -> RpcResult>; } diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index b5538343b89c..b928424bb41b 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -20,8 +20,8 @@ pub use zksync_types::{ web3::{ ethabi, types::{ - Address, BlockHeader, Bytes, CallRequest, Index, SyncState, TraceFilter, Transaction, - Work, H160, H256, H64, U256, U64, + Address, BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, + Transaction, Work, H160, H256, H64, U256, U64, }, }, }; diff --git a/core/multivm_deps/README.md b/core/multivm_deps/README.md new file mode 100644 index 000000000000..a7a4a18c5169 --- /dev/null +++ b/core/multivm_deps/README.md @@ -0,0 +1,6 @@ +# MultiVM dependencies + +This folder contains the old versions of the VM we have used in the past. The `multivm` crate uses them to dynamically +switch the version we use to be able to sync from the genesis. This is a temporary measure until a "native" solution is +implemented (i.e., the `vm` crate would itself know the changes between versions, and thus we will have only the +functional diff between versions, not several fully-fledged VMs). diff --git a/core/multivm_deps/vm_m5/Cargo.toml b/core/multivm_deps/vm_m5/Cargo.toml new file mode 100644 index 000000000000..34fd8477b486 --- /dev/null +++ b/core/multivm_deps/vm_m5/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "vm_m5" +version = "0.1.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +[dependencies] +zksync_crypto = { path = "../../lib/crypto", version = "1.0" } +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } +zksync_config = { path = "../../lib/config", version = "1.0" } +zksync_state = { path = "../../lib/state", version = "1.0" } +zksync_storage = { path = "../../lib/storage", version = "1.0" } + +zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.1" } +zksync_contracts = { path = "../../lib/contracts" } + +hex = "0.4" +thiserror = "1.0" +itertools = "0.10" +once_cell = "1.7" +vlog = { path = "../../lib/vlog", version = "1.0" } +metrics = "0.20" + +tracing = "0.1" + +[dev-dependencies] +tempfile = "3.0.2" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" diff --git a/core/multivm_deps/vm_m5/src/bootloader_state.rs b/core/multivm_deps/vm_m5/src/bootloader_state.rs new file mode 100644 index 000000000000..40d53c047c7e --- /dev/null +++ b/core/multivm_deps/vm_m5/src/bootloader_state.rs @@ -0,0 +1,109 @@ +use crate::vm_with_bootloader::TX_DESCRIPTION_OFFSET; + +/// Intermediate bootloader-related VM state. +/// +/// Required to process transactions one by one (since we intercept the VM execution to execute +/// transactions and add new ones to the memory on the fly). +/// Think about it like a two-pointer scheme: one pointer (`free_tx_index`) tracks the end of the +/// initialized memory; while another (`tx_to_execute`) tracks our progess in this initialized memory. +/// This is required since it's possible to push several transactions to the bootloader memory and then +/// execute it one by one. +/// +/// Serves two purposes: +/// - Tracks where next tx should be pushed to in the bootloader memory. +/// - Tracks which transaction should be executed next. +#[derive(Debug, Default, Clone)] +pub(crate) struct BootloaderState { + /// Memory offset (in words) for the next transaction data. + free_tx_offset: usize, + /// ID of the next transaction to be executed. + /// See the structure doc-comment for a better explanation of purpose. + tx_to_execute: usize, + /// Vector that contains sizes of all pushed transactions. + tx_sizes: Vec, +} + +impl BootloaderState { + /// Creates an empty bootloader state. + pub(crate) fn new() -> Self { + Self::default() + } + + /// Notifies the state about the fact that new transaction was pushed into the memory. + pub(crate) fn add_tx_data(&mut self, tx_size: usize) { + self.free_tx_offset += tx_size; + self.tx_sizes.push(tx_size); + } + + /// Returns the next "free" transaction index. + pub(crate) fn free_tx_index(&self) -> usize { + self.tx_sizes.len() + } + + /// Returns the next index of transaction to execute. + pub(crate) fn tx_to_execute(&self) -> usize { + self.tx_to_execute + } + + /// Returns the memory offset for the new transaction. + pub(crate) fn free_tx_offset(&self) -> usize { + self.free_tx_offset + } + + /// Returns the ID of the next transaction to be executed and increments the local transaction counter. + pub(crate) fn next_unexecuted_tx(&mut self) -> usize { + assert!( + self.tx_to_execute < self.tx_sizes.len(), + "Attempt to execute tx that was not pushed to memory. Tx ID: {}, txs in bootloader: {}", + self.tx_to_execute, + self.tx_sizes.len() + ); + + let old = self.tx_to_execute; + self.tx_to_execute += 1; + old + } + + /// Returns the size of the transaction with given index. + /// Panics if there is no such transaction. + #[allow(dead_code)] + pub(crate) fn get_tx_size(&self, tx_index: usize) -> usize { + self.tx_sizes[tx_index] + } + + pub(crate) fn get_tx_description_offset(&self, tx_index: usize) -> usize { + TX_DESCRIPTION_OFFSET + self.tx_sizes.iter().take(tx_index).sum::() + } +} + +#[cfg(test)] +mod tests { + use super::BootloaderState; + + #[test] + fn workflow() { + let mut state = BootloaderState::new(); + assert_eq!(state.free_tx_index(), 0); + assert_eq!(state.free_tx_offset(), 0); + + state.add_tx_data(2); + assert_eq!(state.free_tx_index(), 1); + assert_eq!(state.free_tx_offset(), 2); + + state.add_tx_data(4); + assert_eq!(state.free_tx_index(), 2); + assert_eq!(state.free_tx_offset(), 6); + + assert_eq!(state.next_unexecuted_tx(), 0); + assert_eq!(state.next_unexecuted_tx(), 1); + } + + #[test] + #[should_panic( + expected = "Attempt to execute tx that was not pushed to memory. Tx ID: 0, txs in bootloader: 0" + )] + fn get_not_pushed_tx() { + let mut state = BootloaderState::new(); + state.next_unexecuted_tx(); + } +} diff --git a/core/multivm_deps/vm_m5/src/errors/bootloader_error.rs b/core/multivm_deps/vm_m5/src/errors/bootloader_error.rs new file mode 100644 index 000000000000..bfbef44a42bd --- /dev/null +++ b/core/multivm_deps/vm_m5/src/errors/bootloader_error.rs @@ -0,0 +1,58 @@ +#[derive(Debug)] +pub(crate) enum BootloaderErrorCode { + EthCall, + AccountTxValidationFailed, + FailedToChargeFee, + FromIsNotAnAccount, + FailedToCheckAccount, + UnacceptableGasPrice, + PayForTxFailed, + PrePaymasterPreparationFailed, + PaymasterValidationFailed, + FailedToSendFeesToTheOperator, + FailedToSetPrevBlockHash, + UnacceptablePubdataPrice, + TxValidationError, + MaxPriorityFeeGreaterThanMaxFee, + BaseFeeGreaterThanMaxFeePerGas, + PaymasterReturnedInvalidContext, + PaymasterContextIsTooLong, + AssertionError, + FailedToMarkFactoryDeps, + TxValidationOutOfGas, + NotEnoughGasProvided, + AccountReturnedInvalidMagic, + PaymasterReturnedInvalidMagic, + Unknown, +} + +impl From for BootloaderErrorCode { + fn from(code: u8) -> BootloaderErrorCode { + match code { + 0 => BootloaderErrorCode::EthCall, + 1 => BootloaderErrorCode::AccountTxValidationFailed, + 2 => BootloaderErrorCode::FailedToChargeFee, + 3 => BootloaderErrorCode::FromIsNotAnAccount, + 4 => BootloaderErrorCode::FailedToCheckAccount, + 5 => BootloaderErrorCode::UnacceptableGasPrice, + 6 => BootloaderErrorCode::FailedToSetPrevBlockHash, + 7 => BootloaderErrorCode::PayForTxFailed, + 8 => BootloaderErrorCode::PrePaymasterPreparationFailed, + 9 => BootloaderErrorCode::PaymasterValidationFailed, + 10 => BootloaderErrorCode::FailedToSendFeesToTheOperator, + 11 => BootloaderErrorCode::UnacceptablePubdataPrice, + 12 => BootloaderErrorCode::TxValidationError, + 13 => BootloaderErrorCode::MaxPriorityFeeGreaterThanMaxFee, + 14 => BootloaderErrorCode::BaseFeeGreaterThanMaxFeePerGas, + 15 => BootloaderErrorCode::PaymasterReturnedInvalidContext, + 16 => BootloaderErrorCode::PaymasterContextIsTooLong, + 17 => BootloaderErrorCode::AssertionError, + 18 => BootloaderErrorCode::FailedToMarkFactoryDeps, + 19 => BootloaderErrorCode::TxValidationOutOfGas, + 20 => BootloaderErrorCode::NotEnoughGasProvided, + 21 => BootloaderErrorCode::AccountReturnedInvalidMagic, + 22 => BootloaderErrorCode::PaymasterReturnedInvalidMagic, + _ => BootloaderErrorCode::Unknown, + } + } +} diff --git a/core/multivm_deps/vm_m5/src/errors/mod.rs b/core/multivm_deps/vm_m5/src/errors/mod.rs new file mode 100644 index 000000000000..462330b41f98 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/errors/mod.rs @@ -0,0 +1,9 @@ +mod bootloader_error; +mod tx_revert_reason; +mod vm_revert_reason; + +pub(crate) use bootloader_error::BootloaderErrorCode; +pub use tx_revert_reason::TxRevertReason; +pub use vm_revert_reason::{ + VmRevertReason, VmRevertReasonParsingError, VmRevertReasonParsingResult, +}; diff --git a/core/multivm_deps/vm_m5/src/errors/tx_revert_reason.rs b/core/multivm_deps/vm_m5/src/errors/tx_revert_reason.rs new file mode 100644 index 000000000000..9259dd87a376 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/errors/tx_revert_reason.rs @@ -0,0 +1,207 @@ +use std::{convert::TryFrom, fmt::Display}; + +use super::{BootloaderErrorCode, VmRevertReason}; + +// Note that currently only EthCall transactions have valid Revert Reason. +// Same transaction executed in bootloader will just have `InnerTxError`. +// Reasons why the transaction executed inside the bootloader could fail. +#[derive(Debug, Clone, PartialEq)] +pub enum TxRevertReason { + // Can only be returned in EthCall execution mode (=ExecuteOnly) + EthCall(VmRevertReason), + // Returned when the execution of an L2 transaction has failed + TxReverted(VmRevertReason), + // Can only be returned in VerifyAndExecute + ValidationFailed(VmRevertReason), + PaymasterValidationFailed(VmRevertReason), + PrePaymasterPreparationFailed(VmRevertReason), + PayForTxFailed(VmRevertReason), + FailedToMarkFactoryDependencies(VmRevertReason), + FailedToChargeFee(VmRevertReason), + // Emitted when trying to call a transaction from an account that has not + // been deployed as an account (i.e. the `from` is just a contract). + // Can only be returned in VerifyAndExecute + FromIsNotAnAccount, + // Currently cannot be returned. Should be removed when refactoring errors. + InnerTxError, + Unknown(VmRevertReason), + // Temporarily used instead of panics to provide better experience for developers: + // their transaction would simply be rejected and they'll be able to provide + // information about the cause to us. + UnexpectedVMBehavior(String), + // Bootloader is out of gas. + BootloaderOutOfGas, + // Transaction has a too big gas limit and will not be executed by the server. + TooBigGasLimit, + // The bootloader did not have enough gas to start the transaction in the first place + NotEnoughGasProvided, +} + +impl TxRevertReason { + pub fn parse_error(bytes: &[u8]) -> Self { + // The first 32 bytes should correspond with error code. + // If the error is smaller than that, we will use a standardized bootloader error. + if bytes.is_empty() { + return Self::UnexpectedVMBehavior("Bootloader returned an empty error".to_string()); + } + + let (error_code, error_msg) = bytes.split_at(1); + let revert_reason = match VmRevertReason::try_from(error_msg) { + Ok(reason) => reason, + Err(_) => { + let function_selector = if error_msg.len() >= 4 { + error_msg[0..4].to_vec() + } else { + error_msg.to_vec() + }; + + let data = if error_msg.len() > 4 { + error_msg[4..].to_vec() + } else { + vec![] + }; + + VmRevertReason::Unknown { + function_selector, + data, + } + } + }; + + // `error_code` is a big-endian number, so we can safely take the first byte of it. + match BootloaderErrorCode::from(error_code[0]) { + BootloaderErrorCode::EthCall => Self::EthCall(revert_reason), + BootloaderErrorCode::AccountTxValidationFailed => Self::ValidationFailed(revert_reason), + BootloaderErrorCode::FailedToChargeFee => Self::FailedToChargeFee(revert_reason), + BootloaderErrorCode::FromIsNotAnAccount => Self::FromIsNotAnAccount, + BootloaderErrorCode::FailedToCheckAccount => Self::ValidationFailed(VmRevertReason::General { + msg: "Failed to check if `from` is an account. Most likely not enough gas provided".to_string() + }), + BootloaderErrorCode::UnacceptableGasPrice => Self::UnexpectedVMBehavior( + "The operator included transaction with an unacceptable gas price".to_owned(), + ), + BootloaderErrorCode::PrePaymasterPreparationFailed => { + Self::PrePaymasterPreparationFailed(revert_reason) + } + BootloaderErrorCode::PaymasterValidationFailed => { + Self::PaymasterValidationFailed(revert_reason) + } + BootloaderErrorCode::FailedToSendFeesToTheOperator => { + Self::UnexpectedVMBehavior("FailedToSendFeesToTheOperator".to_owned()) + } + BootloaderErrorCode::FailedToSetPrevBlockHash => { + panic!( + "The bootloader failed to set previous block hash. Reason: {}", + revert_reason + ) + } + BootloaderErrorCode::UnacceptablePubdataPrice => { + Self::UnexpectedVMBehavior("UnacceptablePubdataPrice".to_owned()) + } + // This is different from AccountTxValidationFailed error in a way that it means that + // the error was not produced by the account itself, but for some other unknown reason (most likely not enough gas) + BootloaderErrorCode::TxValidationError => Self::ValidationFailed(revert_reason), + // Note, that `InnerTxError` is derived only after the actual tx execution, so + // it is not parsed here. Unknown error means that bootloader failed by a reason + // that was not specified by the protocol: + BootloaderErrorCode::MaxPriorityFeeGreaterThanMaxFee => { + Self::UnexpectedVMBehavior("Max priority fee greater than max fee".to_owned()) + } + BootloaderErrorCode::PaymasterReturnedInvalidContext => { + Self::PaymasterValidationFailed(VmRevertReason::General { + msg: String::from("Paymaster returned invalid context"), + }) + } + BootloaderErrorCode::PaymasterContextIsTooLong => { + Self::PaymasterValidationFailed(VmRevertReason::General { + msg: String::from("Paymaster returned context that is too long"), + }) + } + BootloaderErrorCode::AssertionError => { + Self::UnexpectedVMBehavior(format!("Assertion error: {}", revert_reason)) + } + BootloaderErrorCode::BaseFeeGreaterThanMaxFeePerGas => Self::UnexpectedVMBehavior( + "Block.basefee is greater than max fee per gas".to_owned(), + ), + BootloaderErrorCode::PayForTxFailed => { + Self::PayForTxFailed(revert_reason) + }, + BootloaderErrorCode::FailedToMarkFactoryDeps => { + let msg = if let VmRevertReason::General { msg } = revert_reason { + msg + } else { + String::from("Most likely not enough gas provided") + }; + Self::FailedToMarkFactoryDependencies(VmRevertReason::General { + msg + }) + }, + BootloaderErrorCode::TxValidationOutOfGas => { + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Not enough gas for transaction validation") }) + }, + BootloaderErrorCode::NotEnoughGasProvided => { + Self::NotEnoughGasProvided + }, + BootloaderErrorCode::AccountReturnedInvalidMagic => { + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Account validation returned invalid magic value. Most often this means that the signature is incorrect") }) + }, + BootloaderErrorCode::PaymasterReturnedInvalidMagic => { + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Paymaster validation returned invalid magic value. Please refer to the documentation of the paymaster for more details") }) + } + BootloaderErrorCode::Unknown => Self::UnexpectedVMBehavior(format!( + "Unsupported error code: {}. Revert reason: {}", + error_code[0], revert_reason + )), + } + } +} + +impl Display for TxRevertReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self { + // EthCall reason is usually returned unchanged. + TxRevertReason::EthCall(reason) => write!(f, "{}", reason), + TxRevertReason::TxReverted(reason) => write!(f, "{}", reason), + TxRevertReason::ValidationFailed(reason) => { + write!(f, "Account validation error: {}", reason) + } + TxRevertReason::FailedToChargeFee(reason) => { + write!(f, "Failed to charge fee: {}", reason) + } + // Emitted when trying to call a transaction from an account that has no + // been deployed as an account (i.e. the `from` is just a contract). + TxRevertReason::FromIsNotAnAccount => write!(f, "Sender is not an account"), + TxRevertReason::InnerTxError => write!(f, "Bootloader-based tx failed"), + TxRevertReason::PaymasterValidationFailed(reason) => { + write!(f, "Paymaster validation error: {}", reason) + } + TxRevertReason::PrePaymasterPreparationFailed(reason) => { + write!(f, "Pre-paymaster preparation error: {}", reason) + } + TxRevertReason::Unknown(reason) => write!(f, "Unknown reason: {}", reason), + TxRevertReason::UnexpectedVMBehavior(problem) => { + write!(f, + "virtual machine entered unexpected state. Please contact developers and provide transaction details \ + that caused this error. Error description: {problem}" + ) + } + TxRevertReason::BootloaderOutOfGas => write!(f, "Bootloader out of gas"), + TxRevertReason::NotEnoughGasProvided => write!( + f, + "Bootloader did not have enough gas to start the transaction" + ), + TxRevertReason::FailedToMarkFactoryDependencies(reason) => { + write!(f, "Failed to mark factory dependencies: {}", reason) + } + TxRevertReason::PayForTxFailed(reason) => { + write!(f, "Failed to pay for the transaction: {}", reason) + } + TxRevertReason::TooBigGasLimit => { + write!( + f, + "Transaction has a too big ergs limit and will not be executed by the server" + ) + } + } + } +} diff --git a/core/multivm_deps/vm_m5/src/errors/vm_revert_reason.rs b/core/multivm_deps/vm_m5/src/errors/vm_revert_reason.rs new file mode 100644 index 000000000000..a38b99935e92 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/errors/vm_revert_reason.rs @@ -0,0 +1,230 @@ +use std::convert::TryFrom; +use std::fmt::{Debug, Display}; + +use zksync_types::U256; + +use crate::TxRevertReason; + +#[derive(Debug, thiserror::Error)] +pub enum VmRevertReasonParsingError { + #[error("Incorrect data offset. Data: {0:?}")] + IncorrectDataOffset(Vec), + #[error("Input is too short. Data: {0:?}")] + InputIsTooShort(Vec), + #[error("Incorrect string length. Data: {0:?}")] + IncorrectStringLength(Vec), +} + +/// Rich Revert Reasons https://github.com/0xProject/ZEIPs/issues/32 +#[derive(Debug, Clone, PartialEq)] +pub enum VmRevertReason { + General { + msg: String, + }, + InnerTxError, + VmError, + Unknown { + function_selector: Vec, + data: Vec, + }, +} + +impl VmRevertReason { + const GENERAL_ERROR_SELECTOR: &'static [u8] = &[0x08, 0xc3, 0x79, 0xa0]; + + fn parse_general_error(bytes: &[u8]) -> Result { + if bytes.len() < 32 { + return Err(VmRevertReasonParsingError::InputIsTooShort(bytes.to_vec())); + } + let data_offset = U256::from_big_endian(&bytes[0..32]).as_usize(); + + // Data offset couldn't be less than 32 because data offset size is 32 bytes + // and data offset bytes are part of the offset. Also data offset couldn't be greater than + // data length + if data_offset > bytes.len() || data_offset < 32 { + return Err(VmRevertReasonParsingError::IncorrectDataOffset( + bytes.to_vec(), + )); + }; + + let data = &bytes[data_offset..]; + + if data.len() < 32 { + return Err(VmRevertReasonParsingError::InputIsTooShort(bytes.to_vec())); + }; + + let string_length = U256::from_big_endian(&data[0..32]).as_usize(); + + if string_length + 32 > data.len() { + return Err(VmRevertReasonParsingError::IncorrectStringLength( + bytes.to_vec(), + )); + }; + + Ok(Self::General { + msg: String::from_utf8_lossy(&data[32..32 + string_length]).to_string(), + }) + } +} + +impl TryFrom<&[u8]> for VmRevertReason { + type Error = VmRevertReasonParsingError; + + fn try_from(bytes: &[u8]) -> Result { + if bytes.len() < 4 { + // Note, that when the method reverts with no data + // the selector is empty as well. + // For now, we only accept errors with either no data or + // the data with complete selectors. + if !bytes.is_empty() { + return Err(VmRevertReasonParsingError::IncorrectStringLength( + bytes.to_owned(), + )); + } + + let result = VmRevertReason::Unknown { + function_selector: vec![], + data: bytes.to_vec(), + }; + + return Ok(result); + } + + let function_selector = &bytes[0..4]; + let error_data = &bytes[4..]; + match function_selector { + VmRevertReason::GENERAL_ERROR_SELECTOR => Self::parse_general_error(error_data), + _ => { + let result = VmRevertReason::Unknown { + function_selector: function_selector.to_vec(), + data: error_data.to_vec(), + }; + vlog::warn!("Unsupported error type: {}", result); + Ok(result) + } + } + } +} + +impl Display for VmRevertReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use VmRevertReason::{General, InnerTxError, Unknown, VmError}; + + match self { + General { msg } => write!(f, "{}", msg), + VmError => write!(f, "VM Error",), + InnerTxError => write!(f, "Bootloader-based tx failed"), + Unknown { + function_selector, + data, + } => write!( + f, + "Error function_selector = 0x{}, data = 0x{}", + hex::encode(function_selector), + hex::encode(data) + ), + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct VmRevertReasonParsingResult { + pub revert_reason: TxRevertReason, + pub original_data: Vec, +} + +impl VmRevertReasonParsingResult { + pub fn new(revert_reason: TxRevertReason, original_data: Vec) -> Self { + Self { + revert_reason, + original_data, + } + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use super::VmRevertReason; + + #[test] + fn revert_reason_parsing() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()).expect("Shouldn't be error"); + assert_eq!( + reason, + VmRevertReason::General { + msg: "ERC20: transfer amount exceeds balance".to_string() + } + ); + } + + #[test] + fn revert_reason_with_wrong_function_selector() { + let msg = vec![ + 8, 195, 121, 161, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()).expect("Shouldn't be error"); + assert!(matches!(reason, VmRevertReason::Unknown { .. })); + } + + #[test] + fn revert_reason_with_wrong_data_offset() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()); + assert!(reason.is_err()); + } + + #[test] + fn revert_reason_with_big_data_offset() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()); + assert!(reason.is_err()); + } + + #[test] + fn revert_reason_with_wrong_string_length() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()); + assert!(reason.is_err()); + } +} diff --git a/core/multivm_deps/vm_m5/src/event_sink.rs b/core/multivm_deps/vm_m5/src/event_sink.rs new file mode 100644 index 000000000000..e850fb4e2bd4 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/event_sink.rs @@ -0,0 +1,170 @@ +use crate::{oracles::OracleWithHistory, utils::collect_log_queries_after_timestamp}; +use std::collections::HashMap; +use zk_evm::{ + abstractions::EventSink, + aux_structures::{LogQuery, Timestamp}, + reference_impls::event_sink::{ApplicationData, EventMessage}, + zkevm_opcode_defs::system_params::{ + BOOTLOADER_FORMAL_ADDRESS, EVENT_AUX_BYTE, L1_MESSAGE_AUX_BYTE, + }, +}; + +use crate::history_recorder::AppDataFrameManagerWithHistory; + +#[derive(Debug, Default, Clone, PartialEq)] +pub struct InMemoryEventSink { + pub frames_stack: AppDataFrameManagerWithHistory, +} + +impl OracleWithHistory for InMemoryEventSink { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.frames_stack.rollback_to_timestamp(timestamp); + } + + fn delete_history(&mut self) { + self.frames_stack.delete_history(); + } +} + +// as usual, if we rollback the current frame then we apply changes to storage immediately, +// otherwise we carry rollbacks to the parent's frames + +impl InMemoryEventSink { + pub fn flatten(&self) -> (Vec, Vec, Vec) { + assert_eq!( + self.frames_stack.inner().len(), + 1, + "there must exist an initial keeper frame" + ); + let full_history = self.frames_stack.inner().current_frame().clone(); + // we forget rollbacks as we have finished the execution and can just apply them + let ApplicationData { + forward, + rollbacks: _, + } = full_history; + let history = forward.clone(); + let (events, l1_messages) = Self::events_and_l1_messages_from_history(forward); + (history, events, l1_messages) + } + + pub fn get_log_queries(&self) -> usize { + let history = &self.frames_stack.inner().current_frame().forward; + history.len() + } + + pub fn get_events_and_l2_l1_logs_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> (Vec, Vec) { + let history = collect_log_queries_after_timestamp( + &self.frames_stack.inner().current_frame().forward, + from_timestamp, + ); + Self::events_and_l1_messages_from_history(history) + } + + fn events_and_l1_messages_from_history( + history: Vec, + ) -> (Vec, Vec) { + let mut tmp = HashMap::::with_capacity(history.len()); + + // note that we only use "forward" part and discard the rollbacks at the end, + // since if rollbacks of parents were not appended anywhere we just still keep them + for el in history.into_iter() { + // we are time ordered here in terms of rollbacks + if tmp.get(&el.timestamp.0).is_some() { + assert!(el.rollback); + tmp.remove(&el.timestamp.0); + } else { + assert!(!el.rollback); + tmp.insert(el.timestamp.0, el); + } + } + + // naturally sorted by timestamp + let mut keys: Vec<_> = tmp.keys().into_iter().cloned().collect(); + keys.sort_unstable(); + + let mut events = vec![]; + let mut l1_messages = vec![]; + + for k in keys.into_iter() { + let el = tmp.remove(&k).unwrap(); + let LogQuery { + shard_id, + is_service, + tx_number_in_block, + address, + key, + written_value, + aux_byte, + .. + } = el; + + let event = EventMessage { + shard_id, + is_first: is_service, + tx_number_in_block, + address, + key, + value: written_value, + }; + + if aux_byte == EVENT_AUX_BYTE { + events.push(event); + } else { + l1_messages.push(event); + } + } + + (events, l1_messages) + } +} + +impl EventSink for InMemoryEventSink { + // when we enter a new frame we should remember all our current applications and rollbacks + // when we exit the current frame then if we did panic we should concatenate all current + // forward and rollback cases + + fn add_partial_query(&mut self, _monotonic_cycle_counter: u32, mut query: LogQuery) { + assert!(query.rw_flag); + assert!(query.aux_byte == EVENT_AUX_BYTE || query.aux_byte == L1_MESSAGE_AUX_BYTE); + assert!(!query.rollback); + // just append to rollbacks and a full history + + self.frames_stack.push_forward(query, query.timestamp); + // we do not need it explicitly here, but let's be consistent with circuit counterpart + query.rollback = true; + self.frames_stack.push_rollback(query, query.timestamp); + } + + fn start_frame(&mut self, timestamp: Timestamp) { + self.frames_stack.push_frame(timestamp) + } + + fn finish_frame(&mut self, panicked: bool, timestamp: Timestamp) { + // if we panic then we append forward and rollbacks to the forward of parent, + // otherwise we place rollbacks of child before rollbacks of the parent + let ApplicationData { forward, rollbacks } = self.frames_stack.drain_frame(timestamp); + if panicked { + for query in forward { + self.frames_stack.push_forward(query, timestamp); + } + for query in rollbacks.into_iter().rev().into_iter().filter(|q| { + // As of now, the bootloader only emits debug logs + // for events, so we keep them here for now. + // They will be cleared on the server level. + q.address != *BOOTLOADER_FORMAL_ADDRESS || q.aux_byte != EVENT_AUX_BYTE + }) { + self.frames_stack.push_forward(query, timestamp); + } + } else { + for query in forward { + self.frames_stack.push_forward(query, timestamp); + } // we need to prepend rollbacks. No reverse here, as we do not care yet! + for query in rollbacks { + self.frames_stack.push_rollback(query, timestamp); + } + } + } +} diff --git a/core/multivm_deps/vm_m5/src/events.rs b/core/multivm_deps/vm_m5/src/events.rs new file mode 100644 index 000000000000..0d11d9102ea2 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/events.rs @@ -0,0 +1,146 @@ +use zk_evm::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; +use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; + +#[derive(Clone)] +pub struct SolidityLikeEvent { + pub shard_id: u8, + pub tx_number_in_block: u16, + pub address: Address, + pub topics: Vec<[u8; 32]>, + pub data: Vec, +} + +impl SolidityLikeEvent { + pub fn into_vm_event(self, block_number: L1BatchNumber) -> VmEvent { + VmEvent { + location: (block_number, self.tx_number_in_block as u32), + address: self.address, + indexed_topics: be_chunks_to_h256_words(self.topics), + value: self.data, + } + } +} + +fn merge_events_inner(events: Vec) -> Vec { + let mut result = vec![]; + let mut current: Option<(usize, u32, SolidityLikeEvent)> = None; + + for message in events.into_iter() { + if !message.is_first { + let EventMessage { + shard_id, + is_first: _, + tx_number_in_block, + address, + key, + value, + } = message; + + if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = + current.take() + { + if event.address != address + || event.shard_id != shard_id + || event.tx_number_in_block != tx_number_in_block + { + continue; + } + let mut data_0 = [0u8; 32]; + let mut data_1 = [0u8; 32]; + key.to_big_endian(&mut data_0); + value.to_big_endian(&mut data_1); + for el in [data_0, data_1].iter() { + if remaining_topics != 0 { + event.topics.push(*el); + remaining_topics -= 1; + } else if remaining_data_length != 0 { + if remaining_data_length >= 32 { + event.data.extend_from_slice(el); + remaining_data_length -= 32; + } else { + event.data.extend_from_slice(&el[..remaining_data_length]); + remaining_data_length = 0; + } + } + } + + if remaining_data_length != 0 || remaining_topics != 0 { + current = Some((remaining_data_length, remaining_topics, event)) + } else { + result.push(event); + } + } + } else { + // start new one. First take the old one only if it's well formed + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event); + } + } + + let EventMessage { + shard_id, + is_first: _, + tx_number_in_block, + address, + key, + value, + } = message; + // split key as our internal marker. Ignore higher bits + let mut num_topics = key.0[0] as u32; + let mut data_length = (key.0[0] >> 32) as usize; + let mut buffer = [0u8; 32]; + value.to_big_endian(&mut buffer); + + let (topics, data) = if num_topics == 0 && data_length == 0 { + (vec![], vec![]) + } else if num_topics == 0 { + data_length -= 32; + (vec![], buffer.to_vec()) + } else { + num_topics -= 1; + (vec![buffer], vec![]) + }; + + let new_event = SolidityLikeEvent { + shard_id, + tx_number_in_block, + address, + topics, + data, + }; + + current = Some((data_length, num_topics, new_event)) + } + } + + // add the last one + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event); + } + } + + result +} + +pub fn merge_events(events: Vec) -> Vec { + let raw_events = merge_events_inner(events); + + raw_events + .into_iter() + .filter(|e| e.address == EVENT_WRITER_ADDRESS) + .map(|event| { + // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics + let address = h256_to_account_address(&H256(event.topics[0])); + let topics = event.topics.into_iter().skip(1).collect(); + + SolidityLikeEvent { + topics, + address, + ..event + } + }) + .collect() +} diff --git a/core/multivm_deps/vm_m5/src/glue.rs b/core/multivm_deps/vm_m5/src/glue.rs new file mode 100644 index 000000000000..5897f55f790c --- /dev/null +++ b/core/multivm_deps/vm_m5/src/glue.rs @@ -0,0 +1,92 @@ +pub trait GlueFrom: Sized { + fn glue_from(value: T) -> Self; +} + +/// See the description of [`GlueFrom`] trait above. +pub trait GlueInto: Sized { + fn glue_into(self) -> T; +} + +// Blaknet `GlueInto` impl for any type that implements `GlueFrom`. +impl GlueInto for T +where + U: GlueFrom, +{ + fn glue_into(self) -> U { + U::glue_from(self) + } +} + +// Identity impl. +impl GlueFrom for T { + fn glue_from(this: T) -> Self { + this + } +} + +impl GlueFrom + for zksync_types::zk_evm::aux_structures::Timestamp +{ + fn glue_from(timestamp: zk_evm::aux_structures::Timestamp) -> Self { + zksync_types::zk_evm::aux_structures::Timestamp(timestamp.0) + } +} + +impl GlueFrom for zksync_types::zk_evm::aux_structures::LogQuery { + fn glue_from(query: zk_evm::aux_structures::LogQuery) -> Self { + zksync_types::zk_evm::aux_structures::LogQuery { + address: query.address, + key: query.key, + written_value: query.written_value, + timestamp: query.timestamp.glue_into(), + shard_id: query.shard_id, + rollback: query.rollback, + tx_number_in_block: query.tx_number_in_block, + aux_byte: query.aux_byte, + read_value: query.read_value, + rw_flag: query.rw_flag, + is_service: query.is_service, + } + } +} + +impl GlueFrom + for zk_evm::aux_structures::Timestamp +{ + fn glue_from(timestamp: zksync_types::zk_evm::aux_structures::Timestamp) -> Self { + zk_evm::aux_structures::Timestamp(timestamp.0) + } +} + +impl GlueFrom for zk_evm::aux_structures::LogQuery { + fn glue_from(query: zksync_types::zk_evm::aux_structures::LogQuery) -> Self { + zk_evm::aux_structures::LogQuery { + address: query.address, + key: query.key, + written_value: query.written_value, + timestamp: query.timestamp.glue_into(), + shard_id: query.shard_id, + rollback: query.rollback, + tx_number_in_block: query.tx_number_in_block, + aux_byte: query.aux_byte, + read_value: query.read_value, + rw_flag: query.rw_flag, + is_service: query.is_service, + } + } +} + +impl GlueFrom + for zksync_types::zk_evm::reference_impls::event_sink::EventMessage +{ + fn glue_from(event: zk_evm::reference_impls::event_sink::EventMessage) -> Self { + zksync_types::zk_evm::reference_impls::event_sink::EventMessage { + shard_id: event.shard_id, + is_first: event.is_first, + tx_number_in_block: event.tx_number_in_block, + address: event.address, + key: event.key, + value: event.value, + } + } +} diff --git a/core/multivm_deps/vm_m5/src/history_recorder.rs b/core/multivm_deps/vm_m5/src/history_recorder.rs new file mode 100644 index 000000000000..1f673675957f --- /dev/null +++ b/core/multivm_deps/vm_m5/src/history_recorder.rs @@ -0,0 +1,635 @@ +use std::{ + collections::HashMap, + hash::{BuildHasherDefault, Hash, Hasher}, +}; + +use crate::storage::StoragePtr; + +use zk_evm::{ + aux_structures::Timestamp, + reference_impls::event_sink::ApplicationData, + vm_state::PrimitiveValue, + zkevm_opcode_defs::{self}, +}; + +use zksync_types::{StorageKey, U256}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +pub type AppDataFrameManagerWithHistory = FrameManagerWithHistory>; +pub type MemoryWithHistory = HistoryRecorder; +pub type FrameManagerWithHistory = HistoryRecorder>; +pub type IntFrameManagerWithHistory = FrameManagerWithHistory>; + +// Within the same cycle, timestamps in range timestamp..timestamp+TIME_DELTA_PER_CYCLE-1 +// can be used. This can sometimes vioalate monotonicity of the timestamp within the +// same cycle, so it should be normalized. +fn normalize_timestamp(timestamp: Timestamp) -> Timestamp { + let timestamp = timestamp.0; + + // Making sure it is divisible by TIME_DELTA_PER_CYCLE + Timestamp(timestamp - timestamp % zkevm_opcode_defs::TIME_DELTA_PER_CYCLE) +} + +/// Accepts history item as its parameter and applies it. +pub trait WithHistory { + type HistoryRecord; + type ReturnValue; + + // Applies an action and returns the action that would + // rollback its effect as well as some returned value + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue); +} + +/// A struct responsible for tracking history for +/// a component that is passed as a generic parameter to it (`inner`). +#[derive(Debug, PartialEq)] +pub struct HistoryRecorder { + inner: T, + history: Vec<(Timestamp, T::HistoryRecord)>, +} + +impl Clone for HistoryRecorder +where + T::HistoryRecord: Clone, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + history: self.history.clone(), + } + } +} + +impl HistoryRecorder { + pub fn from_inner(inner: T) -> Self { + Self { + inner, + history: vec![], + } + } + + pub fn inner(&self) -> &T { + &self.inner + } + + pub fn history(&self) -> &Vec<(Timestamp, T::HistoryRecord)> { + &self.history + } + + pub fn apply_historic_record( + &mut self, + item: T::HistoryRecord, + timestamp: Timestamp, + ) -> T::ReturnValue { + let timestamp = normalize_timestamp(timestamp); + let last_recorded_timestamp = self.history.last().map(|(t, _)| *t).unwrap_or(Timestamp(0)); + assert!( + last_recorded_timestamp <= timestamp, + "Timestamps are not monotonic" + ); + + let (reversed_item, return_value) = self.inner.apply_historic_record(item); + self.history.push((timestamp, reversed_item)); + + return_value + } + + pub fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + loop { + let should_undo = self + .history + .last() + .map(|(item_timestamp, _)| *item_timestamp >= timestamp) + .unwrap_or(false); + if !should_undo { + break; + } + + let (_, item_to_apply) = self.history.pop().unwrap(); + self.inner.apply_historic_record(item_to_apply); + } + } + + /// Deletes all the history for its component, making + /// its current state irreversible + pub fn delete_history(&mut self) { + self.history.clear(); + } +} + +impl Default for HistoryRecorder { + fn default() -> Self { + Self::from_inner(T::default()) + } +} + +/// Frame manager is basically a wrapper +/// over a stack of items, which typically constitute +/// frames in oracles like StorageOracle, Memory, etc. +#[derive(Debug, PartialEq, Clone)] +pub struct FrameManager { + frame_stack: Vec, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum FrameManagerHistoryRecord { + PushFrame, + PopFrame, + /// The operation should be handled by the current frame itself + InnerOperation(V), +} + +impl Default for FrameManager { + fn default() -> Self { + Self { + // We typically require at least the first frame to be there + // since the last user-provided frame might be reverted + frame_stack: vec![T::default()], + } + } +} + +impl WithHistory for FrameManager { + type HistoryRecord = FrameManagerHistoryRecord; + type ReturnValue = Option; + + fn apply_historic_record( + &mut self, + item: FrameManagerHistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + match item { + FrameManagerHistoryRecord::PopFrame => { + self.frame_stack.pop().unwrap(); + (FrameManagerHistoryRecord::PushFrame, None) + } + FrameManagerHistoryRecord::PushFrame => { + self.frame_stack.push(T::default()); + (FrameManagerHistoryRecord::PopFrame, None) + } + FrameManagerHistoryRecord::InnerOperation(record) => { + let (resulting_op, return_value) = self + .frame_stack + .last_mut() + .unwrap() + .apply_historic_record(record); + ( + FrameManagerHistoryRecord::InnerOperation(resulting_op), + Some(return_value), + ) + } + } + } +} + +impl FrameManager +where + T: WithHistory + Default, +{ + pub fn current_frame(&self) -> &T { + self.frame_stack + .last() + .expect("Frame stack should never be empty") + } + + pub fn len(&self) -> usize { + self.frame_stack.len() + } +} + +impl HistoryRecorder> { + /// Add a new frame. + pub fn push_frame(&mut self, timestamp: Timestamp) { + self.apply_historic_record(FrameManagerHistoryRecord::PushFrame, timestamp); + } + + /// Remove the current frame. + pub fn pop_frame(&mut self, timestamp: Timestamp) { + self.apply_historic_record(FrameManagerHistoryRecord::PopFrame, timestamp); + } +} + +impl HistoryRecorder>> { + /// Push an element to the forward queue + pub fn push_forward(&mut self, elem: T, timestamp: Timestamp) { + let forward_event = + ApplicationDataHistoryEvent::ForwardEvent(VectorHistoryEvent::Push(elem)); + let event = FrameManagerHistoryRecord::InnerOperation(forward_event); + + self.apply_historic_record(event, timestamp); + } + + /// Pop an element from the forward queue + pub fn pop_forward(&mut self, timestamp: Timestamp) -> T { + let forward_event = ApplicationDataHistoryEvent::ForwardEvent(VectorHistoryEvent::Pop); + let event = FrameManagerHistoryRecord::InnerOperation(forward_event); + + self.apply_historic_record(event, timestamp) + .flatten() + .unwrap() + } + + /// Push an element to the rollback queue + pub fn push_rollback(&mut self, elem: T, timestamp: Timestamp) { + let rollback_event = + ApplicationDataHistoryEvent::RollbacksEvent(VectorHistoryEvent::Push(elem)); + let event = FrameManagerHistoryRecord::InnerOperation(rollback_event); + + self.apply_historic_record(event, timestamp); + } + + /// Pop an element from the rollback queue + pub fn pop_rollback(&mut self, timestamp: Timestamp) -> T { + let rollback_event = ApplicationDataHistoryEvent::RollbacksEvent(VectorHistoryEvent::Pop); + let event = FrameManagerHistoryRecord::InnerOperation(rollback_event); + + self.apply_historic_record(event, timestamp) + .flatten() + .unwrap() + } + + /// Pops the current frame and returns its value + pub fn drain_frame(&mut self, timestamp: Timestamp) -> ApplicationData { + let mut forward = vec![]; + while !self.inner.current_frame().forward.is_empty() { + let popped_item = self.pop_forward(timestamp); + forward.push(popped_item); + } + + let mut rollbacks = vec![]; + while !self.inner.current_frame().rollbacks.is_empty() { + let popped_item = self.pop_rollback(timestamp); + rollbacks.push(popped_item); + } + + self.pop_frame(timestamp); + + // items are in reversed order: + ApplicationData { + forward: forward.into_iter().rev().collect(), + rollbacks: rollbacks.into_iter().rev().collect(), + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum VectorHistoryEvent { + Push(X), + Pop, +} + +impl WithHistory for Vec { + type HistoryRecord = VectorHistoryEvent; + type ReturnValue = Option; + fn apply_historic_record( + &mut self, + item: VectorHistoryEvent, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + match item { + VectorHistoryEvent::Pop => { + // Note, that here we assume that the users + // will check themselves whether this vector is empty + // prior to popping from it. + let poped_item = self.pop().unwrap(); + + (VectorHistoryEvent::Push(poped_item), Some(poped_item)) + } + VectorHistoryEvent::Push(x) => { + self.push(x); + + (VectorHistoryEvent::Pop, None) + } + } + } +} + +impl HistoryRecorder> { + pub fn push(&mut self, elem: T, timestamp: Timestamp) { + self.apply_historic_record(VectorHistoryEvent::Push(elem), timestamp); + } + + pub fn pop(&mut self, timestamp: Timestamp) -> T { + self.apply_historic_record(VectorHistoryEvent::Pop, timestamp) + .unwrap() + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl HistoryRecorder>> { + /// Push an element to the current frame + pub fn push_to_frame(&mut self, elem: T, timestamp: Timestamp) { + self.apply_historic_record( + FrameManagerHistoryRecord::InnerOperation(VectorHistoryEvent::Push(elem)), + timestamp, + ); + } + + /// Pop an element from the current frame + pub fn pop_from_frame(&mut self, timestamp: Timestamp) -> T { + self.apply_historic_record( + FrameManagerHistoryRecord::InnerOperation(VectorHistoryEvent::Pop), + timestamp, + ) + .flatten() + .unwrap() + } + + /// Drains the top frame and returns its value + pub fn drain_frame(&mut self, timestamp: Timestamp) -> Vec { + let mut items = vec![]; + while !self.inner.current_frame().is_empty() { + let popped_item = self.pop_from_frame(timestamp); + items.push(popped_item); + } + + self.pop_frame(timestamp); + + // items are in reversed order: + items.into_iter().rev().collect() + } + + /// Extends the top frame with a vector of items + pub fn extend_frame(&mut self, items: Vec, timestamp: Timestamp) { + for item in items { + self.push_to_frame(item, timestamp); + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct HashMapHistoryEvent { + pub key: K, + pub value: Option, +} + +impl WithHistory for HashMap { + type HistoryRecord = HashMapHistoryEvent; + type ReturnValue = Option; + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let HashMapHistoryEvent { key, value } = item; + + let prev_value = match value { + Some(x) => self.insert(key, x), + None => self.remove(&key), + }; + + ( + HashMapHistoryEvent { + key, + value: prev_value.clone(), + }, + prev_value, + ) + } +} + +impl HistoryRecorder> { + pub fn insert(&mut self, key: K, value: V, timestamp: Timestamp) -> Option { + self.apply_historic_record( + HashMapHistoryEvent { + key, + value: Some(value), + }, + timestamp, + ) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ApplicationDataHistoryEvent { + // The event about the forward queue + ForwardEvent(VectorHistoryEvent), + // The event about the rollbacks queue + RollbacksEvent(VectorHistoryEvent), +} + +impl WithHistory for ApplicationData { + type HistoryRecord = ApplicationDataHistoryEvent; + type ReturnValue = Option; + + fn apply_historic_record( + &mut self, + item: ApplicationDataHistoryEvent, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + match item { + ApplicationDataHistoryEvent::ForwardEvent(e) => { + let (vec_event, result) = self.forward.apply_historic_record(e); + (ApplicationDataHistoryEvent::ForwardEvent(vec_event), result) + } + ApplicationDataHistoryEvent::RollbacksEvent(e) => { + let (vec_event, result) = self.rollbacks.apply_historic_record(e); + ( + ApplicationDataHistoryEvent::RollbacksEvent(vec_event), + result, + ) + } + } + } +} + +#[derive(Default)] +pub struct NoopHasher(u64); + +impl Hasher for NoopHasher { + fn write_usize(&mut self, value: usize) { + self.0 = value as u64; + } + + fn write(&mut self, _bytes: &[u8]) { + unreachable!("internal hasher only handles usize type"); + } + + fn finish(&self) -> u64 { + self.0 + } +} + +#[derive(Debug, Default, Clone, PartialEq)] +pub struct MemoryWrapper { + pub memory: Vec>>, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MemoryHistoryRecord { + pub page: usize, + pub slot: usize, + pub set_value: Option, +} + +impl MemoryWrapper { + pub fn shrink_pages(&mut self) { + while self.memory.last().map(|h| h.is_empty()).unwrap_or(false) { + self.memory.pop(); + } + } + + pub fn ensure_page_exists(&mut self, page: usize) { + if self.memory.len() <= page { + // We don't need to record such events in history + // because all these vectors will be empty + self.memory.resize_with(page + 1, HashMap::default); + } + } + + pub fn dump_page_content_as_u256_words( + &self, + page_number: u32, + range: std::ops::Range, + ) -> Vec { + if let Some(page) = self.memory.get(page_number as usize) { + let mut result = vec![]; + for i in range { + if let Some(word) = page.get(&(i as usize)) { + result.push(*word); + } else { + result.push(PrimitiveValue::empty()); + } + } + + result + } else { + vec![PrimitiveValue::empty(); range.len()] + } + } +} + +impl WithHistory for MemoryWrapper { + type HistoryRecord = MemoryHistoryRecord; + type ReturnValue = Option; + + fn apply_historic_record( + &mut self, + item: MemoryHistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let MemoryHistoryRecord { + page, + slot, + set_value, + } = item; + + self.ensure_page_exists(page); + let page_handle = self.memory.get_mut(page).unwrap(); + let prev_value = match set_value { + Some(x) => page_handle.insert(slot, x), + None => page_handle.remove(&slot), + }; + self.shrink_pages(); + + let reserved_item = MemoryHistoryRecord { + page, + slot, + set_value: prev_value, + }; + + (reserved_item, prev_value) + } +} + +impl HistoryRecorder { + pub fn write_to_memory( + &mut self, + page: usize, + slot: usize, + value: Option, + timestamp: Timestamp, + ) -> Option { + self.apply_historic_record( + MemoryHistoryRecord { + page, + slot, + set_value: value, + }, + timestamp, + ) + } + + pub fn clear_page(&mut self, page: usize, timestamp: Timestamp) { + let slots_to_clear: Vec<_> = match self.inner.memory.get(page) { + None => return, + Some(x) => x.keys().copied().collect(), + }; + + // We manually clear the page to preserve correct history + for slot in slots_to_clear { + self.write_to_memory(page, slot, None, timestamp); + } + } +} + +#[derive(Debug)] + +pub struct StorageWrapper<'a> { + storage_ptr: StoragePtr<'a>, +} + +impl<'a> StorageWrapper<'a> { + pub fn new(storage_ptr: StoragePtr<'a>) -> Self { + Self { storage_ptr } + } + + pub fn get_ptr(&self) -> StoragePtr<'a> { + self.storage_ptr.clone() + } + + pub fn read_from_storage(&self, key: &StorageKey) -> U256 { + h256_to_u256(self.storage_ptr.borrow_mut().get_value(key)) + } +} + +#[derive(Debug, Clone)] +pub struct StorageHistoryRecord { + pub key: StorageKey, + pub value: U256, +} + +impl<'a> WithHistory for StorageWrapper<'a> { + type HistoryRecord = StorageHistoryRecord; + type ReturnValue = U256; + + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let prev_value = h256_to_u256( + self.storage_ptr + .borrow_mut() + .set_value(&item.key, u256_to_h256(item.value)), + ); + + let reverse_item = StorageHistoryRecord { + key: item.key, + value: prev_value, + }; + + (reverse_item, prev_value) + } +} + +impl<'a> HistoryRecorder> { + pub fn read_from_storage(&self, key: &StorageKey) -> U256 { + self.inner.read_from_storage(key) + } + + pub fn write_to_storage(&mut self, key: StorageKey, value: U256, timestamp: Timestamp) -> U256 { + self.apply_historic_record(StorageHistoryRecord { key, value }, timestamp) + } + + /// Returns a pointer to the storage. + /// Note, that any changes done to the storage via this pointer + /// will NOT be recorded as its history. + pub fn get_ptr(&self) -> StoragePtr<'a> { + self.inner.get_ptr() + } +} diff --git a/core/multivm_deps/vm_m5/src/lib.rs b/core/multivm_deps/vm_m5/src/lib.rs new file mode 100644 index 000000000000..bd6b9364eceb --- /dev/null +++ b/core/multivm_deps/vm_m5/src/lib.rs @@ -0,0 +1,39 @@ +#![allow(clippy::derive_partial_eq_without_eq)] + +mod bootloader_state; +pub mod errors; +pub mod event_sink; +mod events; +pub(crate) mod glue; +mod history_recorder; +pub mod memory; +mod oracle_tools; +pub mod oracles; +mod pubdata_utils; +mod refunds; +pub mod storage; +pub mod test_utils; +pub mod transaction_data; +pub mod utils; +pub mod vm; +pub mod vm_with_bootloader; + +#[cfg(test)] +mod tests; + +pub use crate::errors::TxRevertReason; +pub use crate::oracle_tools::OracleTools; +pub use crate::oracles::storage::StorageOracle; +pub use crate::vm::VmBlockResult; +pub use crate::vm::VmExecutionResult; +pub use crate::vm::VmInstance; +pub use zk_evm; +pub use zksync_types::vm_trace::VmExecutionTrace; + +pub type Word = zksync_types::U256; + +pub const MEMORY_SIZE: usize = 1 << 16; +pub const MAX_CALLS: usize = 65536; +pub const REGISTERS_COUNT: usize = 16; +pub const MAX_STACK_SIZE: usize = 256; +pub const MAX_CYCLES_FOR_TX: u32 = u32::MAX; diff --git a/core/multivm_deps/vm_m5/src/memory.rs b/core/multivm_deps/vm_m5/src/memory.rs new file mode 100644 index 000000000000..4591bf916847 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/memory.rs @@ -0,0 +1,300 @@ +use zk_evm::abstractions::{Memory, MemoryType, MEMORY_CELLS_OTHER_PAGES}; +use zk_evm::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; +use zk_evm::vm_state::PrimitiveValue; +use zk_evm::zkevm_opcode_defs::FatPointer; +use zksync_types::U256; + +use crate::history_recorder::{IntFrameManagerWithHistory, MemoryWithHistory}; +use crate::oracles::OracleWithHistory; +use crate::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; + +#[derive(Debug, Default, Clone, PartialEq)] +pub struct SimpleMemory { + pub memory: MemoryWithHistory, + + pub observable_pages: IntFrameManagerWithHistory, +} + +impl OracleWithHistory for SimpleMemory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.memory.rollback_to_timestamp(timestamp); + self.observable_pages.rollback_to_timestamp(timestamp); + } + + fn delete_history(&mut self) { + self.memory.delete_history(); + self.observable_pages.delete_history(); + } +} + +impl SimpleMemory { + pub fn populate(&mut self, elements: Vec<(u32, Vec)>, timestamp: Timestamp) { + for (page, values) in elements.into_iter() { + // Resizing the pages array to fit the page. + let len = values.len(); + assert!(len <= MEMORY_CELLS_OTHER_PAGES); + + for (i, value) in values.into_iter().enumerate() { + let value = PrimitiveValue { + value, + is_pointer: false, + }; + self.memory + .write_to_memory(page as usize, i, Some(value), timestamp); + } + } + } + + pub fn populate_page( + &mut self, + page: usize, + elements: Vec<(usize, U256)>, + timestamp: Timestamp, + ) { + elements.into_iter().for_each(|(offset, value)| { + let value = PrimitiveValue { + value, + is_pointer: false, + }; + + self.memory + .write_to_memory(page, offset, Some(value), timestamp); + }); + } + + pub fn dump_page_content_as_u256_words( + &self, + page: u32, + range: std::ops::Range, + ) -> Vec { + self.memory + .inner() + .dump_page_content_as_u256_words(page, range) + .into_iter() + .map(|v| v.value) + .collect() + } + + pub fn read_slot(&self, page: usize, slot: usize) -> PrimitiveValue { + let slot = slot as u32; + let page = page as u32; + self.memory + .inner() + .dump_page_content_as_u256_words(page, slot..slot + 1)[0] + } + + // This method should be used with relatively small lengths, since + // we don't heavily optimize here for cases with long lengths + pub fn read_unaligned_bytes(&self, page: usize, start: usize, length: usize) -> Vec { + if length == 0 { + return vec![]; + } + + let end = start + length - 1; + + let mut current_word = start / 32; + let mut result = vec![]; + while current_word * 32 <= end { + let word_value = self.read_slot(page, current_word).value; + let word_value = { + let mut bytes: Vec = vec![0u8; 32]; + word_value.to_big_endian(&mut bytes); + bytes + }; + + result.extend(extract_needed_bytes_from_word( + word_value, + current_word, + start, + end, + )); + + current_word += 1; + } + + assert_eq!(result.len(), length); + + result + } +} + +impl Memory for SimpleMemory { + fn execute_partial_query( + &mut self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + match query.location.memory_type { + MemoryType::Stack => {} + MemoryType::Heap | MemoryType::AuxHeap => { + // The following assertion works fine even when doing a read + // from heap through pointer, since `value_is_pointer` can only be set to + // `true` during memory writes. + assert!( + !query.value_is_pointer, + "Pointers can only be stored on stack" + ); + } + MemoryType::FatPointer => { + assert!(!query.rw_flag); + assert!( + !query.value_is_pointer, + "Pointers can only be stored on stack" + ); + } + MemoryType::Code => { + unreachable!("code should be through specialized query"); + } + } + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + if query.rw_flag { + self.memory.write_to_memory( + page, + slot, + Some(PrimitiveValue { + value: query.value, + is_pointer: query.value_is_pointer, + }), + query.timestamp, + ); + } else { + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + } + + query + } + + fn specialized_code_query( + &mut self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + assert_eq!(query.location.memory_type, MemoryType::Code); + assert!( + !query.value_is_pointer, + "Pointers are not used for decommmits" + ); + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + if query.rw_flag { + self.memory.write_to_memory( + page, + slot, + Some(PrimitiveValue { + value: query.value, + is_pointer: query.value_is_pointer, + }), + query.timestamp, + ); + } else { + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + } + + query + } + + fn read_code_query( + &self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + assert_eq!(query.location.memory_type, MemoryType::Code); + assert!( + !query.value_is_pointer, + "Pointers are not used for decommmits" + ); + assert!(!query.rw_flag, "Only read queries can be processed"); + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + + query + } + + fn start_global_frame( + &mut self, + _current_base_page: MemoryPage, + new_base_page: MemoryPage, + calldata_fat_pointer: FatPointer, + timestamp: Timestamp, + ) { + // Besides the calldata page, we also formally include the current stack + // page, heap page and aux heap page. + // The code page will be always left observable, so we don't include it here. + self.observable_pages.push_frame(timestamp); + self.observable_pages.extend_frame( + vec![ + calldata_fat_pointer.memory_page, + stack_page_from_base(new_base_page).0, + heap_page_from_base(new_base_page).0, + aux_heap_page_from_base(new_base_page).0, + ], + timestamp, + ); + } + + fn finish_global_frame( + &mut self, + base_page: MemoryPage, + returndata_fat_pointer: FatPointer, + timestamp: Timestamp, + ) { + // Safe to unwrap here, since `finish_global_frame` is never called with empty stack + let current_observable_pages = self.observable_pages.drain_frame(timestamp); + let returndata_page = returndata_fat_pointer.memory_page; + + for page in current_observable_pages { + // If the page's number is greater than or equal to the base_page, + // it means that it was created by the internal calls of this contract. + // We need to add this check as the calldata pointer is also part of the + // observable pages. + if page >= base_page.0 && page != returndata_page { + self.memory.clear_page(page as usize, timestamp); + } + } + + // Push to the parent's frame + self.observable_pages + .push_to_frame(returndata_page, timestamp); + } +} + +// It is expected that there is some intersection between [word_number*32..word_number*32+31] and [start, end] +fn extract_needed_bytes_from_word( + word_value: Vec, + word_number: usize, + start: usize, + end: usize, +) -> Vec { + let word_start = word_number * 32; + let word_end = word_start + 31; // Note, that at word_start + 32 a new word already starts + + let intersection_left = std::cmp::max(word_start, start); + let intersection_right = std::cmp::min(word_end, end); + + if intersection_right < intersection_left { + vec![] + } else { + let start_bytes = intersection_left - word_start; + let to_take = intersection_right - intersection_left + 1; + + word_value + .into_iter() + .skip(start_bytes) + .take(to_take) + .collect() + } +} diff --git a/core/multivm_deps/vm_m5/src/oracle_tools.rs b/core/multivm_deps/vm_m5/src/oracle_tools.rs new file mode 100644 index 000000000000..3df462d554af --- /dev/null +++ b/core/multivm_deps/vm_m5/src/oracle_tools.rs @@ -0,0 +1,40 @@ +use crate::memory::SimpleMemory; +use crate::vm::MultiVMSubversion; +use std::cell::RefCell; + +use std::fmt::Debug; +use std::rc::Rc; + +use crate::event_sink::InMemoryEventSink; +use crate::oracles::decommitter::DecommitterOracle; +use crate::oracles::precompile::PrecompilesProcessorWithHistory; +use crate::oracles::storage::StorageOracle; +use crate::storage::{Storage, StoragePtr}; +use zk_evm::witness_trace::DummyTracer; + +#[derive(Debug)] +pub struct OracleTools<'a, const B: bool> { + pub storage: StorageOracle<'a>, + pub memory: SimpleMemory, + pub event_sink: InMemoryEventSink, + pub precompiles_processor: PrecompilesProcessorWithHistory, + pub decommittment_processor: DecommitterOracle<'a, B>, + pub witness_tracer: DummyTracer, + pub storage_view: StoragePtr<'a>, +} + +impl<'a> OracleTools<'a, false> { + pub fn new(storage_view: &'a mut dyn Storage, refund_state: MultiVMSubversion) -> Self { + let pointer: Rc> = Rc::new(RefCell::new(storage_view)); + + Self { + storage: StorageOracle::new(pointer.clone(), refund_state), + memory: SimpleMemory::default(), + event_sink: InMemoryEventSink::default(), + precompiles_processor: PrecompilesProcessorWithHistory::default(), + decommittment_processor: DecommitterOracle::new(pointer.clone()), + witness_tracer: DummyTracer {}, + storage_view: pointer, + } + } +} diff --git a/core/multivm_deps/vm_m5/src/oracles/decommitter.rs b/core/multivm_deps/vm_m5/src/oracles/decommitter.rs new file mode 100644 index 000000000000..a431c6f003bf --- /dev/null +++ b/core/multivm_deps/vm_m5/src/oracles/decommitter.rs @@ -0,0 +1,186 @@ +use std::collections::HashMap; + +use crate::history_recorder::HistoryRecorder; +use crate::storage::StoragePtr; + +use zk_evm::abstractions::MemoryType; +use zk_evm::aux_structures::Timestamp; +use zk_evm::{ + abstractions::{DecommittmentProcessor, Memory}, + aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, +}; +use zksync_types::U256; +use zksync_utils::bytecode::bytecode_len_in_words; +use zksync_utils::{bytes_to_be_words, u256_to_h256}; + +use super::OracleWithHistory; + +#[derive(Debug)] +pub struct DecommitterOracle<'a, const B: bool> { + /// Pointer that enables to read contract bytecodes from the database. + storage: StoragePtr<'a>, + /// The cache of bytecodes that the bootloader "knows", but that are not necessarily in the database. + pub known_bytecodes: HistoryRecorder>>, + /// Stores pages of memory where certain code hashes have already been decommitted. + decommitted_code_hashes: HistoryRecorder>, + /// Stores history of decommitment requests. + decommitment_requests: HistoryRecorder>, +} + +impl<'a, const B: bool> DecommitterOracle<'a, B> { + pub fn new(storage: StoragePtr<'a>) -> Self { + Self { + storage, + known_bytecodes: Default::default(), + decommitted_code_hashes: Default::default(), + decommitment_requests: Default::default(), + } + } + + pub fn get_bytecode(&mut self, hash: U256, timestamp: Timestamp) -> Vec { + let entry = self.known_bytecodes.inner().get(&hash); + + match entry { + Some(x) => x.clone(), + None => { + // It is ok to panic here, since the decommitter is never called directly by + // the users and always called by the VM. VM will never let decommit the + // code hash which we didn't previously claim to know the preimage of. + let value = self + .storage + .borrow_mut() + .load_factory_dep(u256_to_h256(hash)) + .expect("Trying to decode unexisting hash"); + + let value = bytes_to_be_words(value); + self.known_bytecodes.insert(hash, value.clone(), timestamp); + value + } + } + } + + pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { + for (hash, bytecode) in bytecodes { + self.known_bytecodes.insert(hash, bytecode, timestamp); + } + } + + pub fn get_used_bytecode_hashes(&self) -> Vec { + self.decommitted_code_hashes + .inner() + .iter() + .map(|item| *item.0) + .collect() + } + + pub fn get_decommitted_bytes_after_timestamp(&self, timestamp: Timestamp) -> usize { + // Note, that here we rely on the fact that for each used bytecode + // there is one and only one corresponding event in the history of it. + self.decommitted_code_hashes + .history() + .iter() + .rev() + .take_while(|(t, _)| *t >= timestamp) + .count() + } + + pub fn get_number_of_decommitment_requests_after_timestamp( + &self, + timestamp: Timestamp, + ) -> usize { + self.decommitment_requests + .history() + .iter() + .rev() + .take_while(|(t, _)| *t >= timestamp) + .count() + } + + pub fn get_decommitted_code_hashes_with_history(&self) -> &HistoryRecorder> { + &self.decommitted_code_hashes + } + + pub fn get_storage(&self) -> StoragePtr<'a> { + self.storage.clone() + } +} + +impl<'a, const B: bool> OracleWithHistory for DecommitterOracle<'a, B> { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.decommitted_code_hashes + .rollback_to_timestamp(timestamp); + self.known_bytecodes.rollback_to_timestamp(timestamp); + self.decommitment_requests.rollback_to_timestamp(timestamp); + } + + fn delete_history(&mut self) { + self.decommitted_code_hashes.delete_history(); + self.known_bytecodes.delete_history(); + self.decommitment_requests.delete_history(); + } +} + +impl<'a, const B: bool> DecommittmentProcessor for DecommitterOracle<'a, B> { + fn decommit_into_memory( + &mut self, + monotonic_cycle_counter: u32, + mut partial_query: DecommittmentQuery, + memory: &mut M, + ) -> (DecommittmentQuery, Option>) { + self.decommitment_requests.push((), partial_query.timestamp); + if let Some(memory_page) = self + .decommitted_code_hashes + .inner() + .get(&partial_query.hash) + .copied() + { + partial_query.is_fresh = false; + partial_query.memory_page = MemoryPage(memory_page); + partial_query.decommitted_length = + bytecode_len_in_words(&u256_to_h256(partial_query.hash)); + + (partial_query, None) + } else { + // fresh one + let values = self.get_bytecode(partial_query.hash, partial_query.timestamp); + let page_to_use = partial_query.memory_page; + let timestamp = partial_query.timestamp; + partial_query.decommitted_length = values.len() as u16; + partial_query.is_fresh = true; + + // write into memory + let mut tmp_q = MemoryQuery { + timestamp, + location: MemoryLocation { + memory_type: MemoryType::Code, + page: page_to_use, + index: MemoryIndex(0), + }, + value: U256::zero(), + value_is_pointer: false, + rw_flag: true, + is_pended: false, + }; + self.decommitted_code_hashes + .insert(partial_query.hash, page_to_use.0, timestamp); + + if B { + for (i, value) in values.iter().enumerate() { + tmp_q.location.index = MemoryIndex(i as u32); + tmp_q.value = *value; + memory.specialized_code_query(monotonic_cycle_counter, tmp_q); + } + + (partial_query, Some(values)) + } else { + for (i, value) in values.into_iter().enumerate() { + tmp_q.location.index = MemoryIndex(i as u32); + tmp_q.value = value; + memory.specialized_code_query(monotonic_cycle_counter, tmp_q); + } + + (partial_query, None) + } + } + } +} diff --git a/core/multivm_deps/vm_m5/src/oracles/mod.rs b/core/multivm_deps/vm_m5/src/oracles/mod.rs new file mode 100644 index 000000000000..5b9378729ed8 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/oracles/mod.rs @@ -0,0 +1,19 @@ +use zk_evm::aux_structures::Timestamp; +// We will discard RAM as soon as the execution of a tx ends, so +// it is ok for now to use SimpleMemory +pub use zk_evm::reference_impls::memory::SimpleMemory as RamOracle; +// All the changes to the events in the DB will be applied after the tx is executed, +// so fow now it is fine. +pub use zk_evm::reference_impls::event_sink::InMemoryEventSink as EventSinkOracle; + +pub use zk_evm::testing::simple_tracer::NoopTracer; + +pub mod decommitter; +pub mod precompile; +pub mod storage; +pub mod tracer; + +pub trait OracleWithHistory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp); + fn delete_history(&mut self); +} diff --git a/core/multivm_deps/vm_m5/src/oracles/precompile.rs b/core/multivm_deps/vm_m5/src/oracles/precompile.rs new file mode 100644 index 000000000000..3374be5caa96 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/oracles/precompile.rs @@ -0,0 +1,78 @@ +use zk_evm::{ + abstractions::Memory, + abstractions::PrecompileCyclesWitness, + abstractions::PrecompilesProcessor, + aux_structures::{LogQuery, MemoryQuery, Timestamp}, + precompiles::DefaultPrecompilesProcessor, +}; + +use crate::history_recorder::HistoryRecorder; + +use super::OracleWithHistory; + +/// Wrap of DefaultPrecompilesProcessor that store queue +/// of timestamp when precompiles are called to be executed. +/// Number of precompiles per block is strictly limited, +/// saving timestamps allows us to check the exact number +/// of log queries, that were used during the tx execution. +#[derive(Debug, Clone)] +pub struct PrecompilesProcessorWithHistory { + pub timestamp_history: HistoryRecorder>, + pub default_precompiles_processor: DefaultPrecompilesProcessor, +} + +impl OracleWithHistory for PrecompilesProcessorWithHistory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.timestamp_history.rollback_to_timestamp(timestamp); + } + + fn delete_history(&mut self) { + self.timestamp_history.delete_history(); + } +} + +impl PrecompilesProcessorWithHistory { + pub fn new() -> Self { + Self { + timestamp_history: Default::default(), + default_precompiles_processor: DefaultPrecompilesProcessor {}, + } + } + pub fn get_timestamp_history(&self) -> &Vec { + self.timestamp_history.inner() + } +} + +impl Default for PrecompilesProcessorWithHistory { + fn default() -> Self { + Self::new() + } +} + +impl PrecompilesProcessor for PrecompilesProcessorWithHistory { + fn start_frame(&mut self) { + self.default_precompiles_processor.start_frame(); + } + fn execute_precompile( + &mut self, + monotonic_cycle_counter: u32, + query: LogQuery, + memory: &mut M, + ) -> Option<(Vec, Vec, PrecompileCyclesWitness)> { + // In the next line we same `query.timestamp` as both + // an operation in the history of precompiles processor and + // the time when this operation occured. + // While slightly weird, it is done for consistency with other oracles + // where operations and timestamp have different types. + self.timestamp_history + .push(query.timestamp, query.timestamp); + self.default_precompiles_processor.execute_precompile( + monotonic_cycle_counter, + query, + memory, + ) + } + fn finish_frame(&mut self, _panicked: bool) { + self.default_precompiles_processor.finish_frame(_panicked); + } +} diff --git a/core/multivm_deps/vm_m5/src/oracles/storage.rs b/core/multivm_deps/vm_m5/src/oracles/storage.rs new file mode 100644 index 000000000000..cb9fb8f7b8ab --- /dev/null +++ b/core/multivm_deps/vm_m5/src/oracles/storage.rs @@ -0,0 +1,304 @@ +use std::collections::HashMap; + +use crate::glue::GlueInto; +use crate::storage::StoragePtr; + +use crate::history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryRecorder, StorageWrapper, +}; +use crate::vm::MultiVMSubversion; + +use zk_evm::abstractions::RefundedAmounts; +use zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; +use zk_evm::{ + abstractions::{RefundType, Storage as VmStorageOracle}, + aux_structures::{LogQuery, Timestamp}, + reference_impls::event_sink::ApplicationData, +}; +use zksync_types::utils::storage_key_for_eth_balance; +use zksync_types::{ + AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, + U256, +}; +use zksync_utils::u256_to_h256; + +use super::OracleWithHistory; + +// While the storage does not support different shards, it was decided to write the +// code of the StorageOracle with the shard parameters in mind. +pub fn triplet_to_storage_key(_shard_id: u8, address: Address, key: U256) -> StorageKey { + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)) +} + +pub fn storage_key_of_log(query: &LogQuery) -> StorageKey { + triplet_to_storage_key(query.shard_id, query.address, query.key) +} + +#[derive(Debug)] +pub struct StorageOracle<'a> { + // Access to the persistent storage. Please note that it + // is used only for read access. All the actual writes happen + // after the execution ended. + pub storage: HistoryRecorder>, + + pub frames_stack: AppDataFrameManagerWithHistory, + + // The changes that have been paid for in previous transactions. + // It is a mapping from storage key to the number of *bytes* that was paid by the user + // to cover this slot. + pub paid_changes: HistoryRecorder>, + + pub refund_state: MultiVMSubversion, +} + +impl<'a> OracleWithHistory for StorageOracle<'a> { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.frames_stack.rollback_to_timestamp(timestamp); + self.storage.rollback_to_timestamp(timestamp); + self.paid_changes.rollback_to_timestamp(timestamp); + } + + fn delete_history(&mut self) { + self.frames_stack.delete_history(); + self.storage.delete_history(); + self.paid_changes.delete_history(); + } +} + +impl<'a> StorageOracle<'a> { + pub fn new(storage: StoragePtr<'a>, refund_state: MultiVMSubversion) -> Self { + Self { + storage: HistoryRecorder::from_inner(StorageWrapper::new(storage)), + frames_stack: Default::default(), + paid_changes: Default::default(), + refund_state, + } + } + + fn is_storage_key_free(&self, key: &StorageKey) -> bool { + match self.refund_state { + MultiVMSubversion::V1 => { + key.address() == &zksync_config::constants::SYSTEM_CONTEXT_ADDRESS + } + MultiVMSubversion::V2 => { + key.address() == &zksync_config::constants::SYSTEM_CONTEXT_ADDRESS + || *key == storage_key_for_eth_balance(&BOOTLOADER_ADDRESS) + } + } + } + + pub fn read_value(&mut self, mut query: LogQuery) -> LogQuery { + let key = triplet_to_storage_key(query.shard_id, query.address, query.key); + let current_value = self.storage.read_from_storage(&key); + + query.read_value = current_value; + + self.frames_stack.push_forward( + StorageLogQuery { + log_query: query.glue_into(), + log_type: StorageLogQueryType::Read, + }, + query.timestamp, + ); + + query + } + + pub fn write_value(&mut self, mut query: LogQuery) -> LogQuery { + let key = triplet_to_storage_key(query.shard_id, query.address, query.key); + let current_value = + self.storage + .write_to_storage(key, query.written_value, query.timestamp); + + let log_query_type = if self.storage.get_ptr().borrow_mut().is_write_initial(&key) { + StorageLogQueryType::InitialWrite + } else { + StorageLogQueryType::RepeatedWrite + }; + + query.read_value = current_value; + + let mut storage_log_query = StorageLogQuery { + log_query: query.glue_into(), + log_type: log_query_type, + }; + self.frames_stack + .push_forward(storage_log_query, query.timestamp); + storage_log_query.log_query.rollback = true; + self.frames_stack + .push_rollback(storage_log_query, query.timestamp); + storage_log_query.log_query.rollback = false; + + query + } + + // Returns the amount of funds that has been already paid for writes into the storage slot + fn prepaid_for_write(&self, storage_key: &StorageKey) -> u32 { + self.paid_changes + .inner() + .get(storage_key) + .copied() + .unwrap_or_default() + } + + pub(crate) fn base_price_for_write(&self, query: &LogQuery) -> u32 { + let storage_key = storage_key_of_log(query); + + if self.is_storage_key_free(&storage_key) { + return 0; + } + + let is_initial = self + .storage + .get_ptr() + .borrow_mut() + .is_write_initial(&storage_key); + + get_pubdata_price_bytes(query, is_initial) + } + + // Returns the price of the update in terms of pubdata bytes. + fn value_update_price(&self, query: &LogQuery) -> u32 { + let storage_key = storage_key_of_log(query); + + let base_cost = self.base_price_for_write(query); + + let already_paid = self.prepaid_for_write(&storage_key); + + if base_cost <= already_paid { + // Some other transaction has already paid for this slot, no need to pay anything + 0u32 + } else { + base_cost - already_paid + } + } +} + +impl<'a> VmStorageOracle for StorageOracle<'a> { + // Perform a storage read/write access by taking an partially filled query + // and returning filled query and cold/warm marker for pricing purposes + fn execute_partial_query( + &mut self, + _monotonic_cycle_counter: u32, + query: LogQuery, + ) -> LogQuery { + // vlog::trace!( + // "execute partial query cyc {:?} addr {:?} key {:?}, rw {:?}, wr {:?}, tx {:?}", + // _monotonic_cycle_counter, + // query.address, + // query.key, + // query.rw_flag, + // query.written_value, + // query.tx_number_in_block + // ); + assert!(!query.rollback); + if query.rw_flag { + // The number of bytes that have been compensated by the user to perform this write + let storage_key = storage_key_of_log(&query); + + // It is considered that the user has paid for the whole base price for the writes + let to_pay_by_user = self.base_price_for_write(&query); + let prepaid = self.prepaid_for_write(&storage_key); + + if to_pay_by_user > prepaid { + self.paid_changes.apply_historic_record( + HashMapHistoryEvent { + key: storage_key, + value: Some(to_pay_by_user), + }, + query.timestamp, + ); + } + self.write_value(query) + } else { + self.read_value(query) + } + } + + // We can return the size of the refund before each storage query. + // Note, that while the `RefundType` allows to provide refunds both in + // `ergs` and `pubdata`, only refunds in pubdata will be compensated for the users + fn estimate_refunds_for_write( + &mut self, // to avoid any hacks inside, like prefetch + _monotonic_cycle_counter: u32, + partial_query: &LogQuery, + ) -> RefundType { + let price_to_pay = self.value_update_price(partial_query); + + RefundType::RepeatedWrite(RefundedAmounts { + ergs: 0, + // `INITIAL_STORAGE_WRITE_PUBDATA_BYTES` is the default amount of pubdata bytes the user pays for. + pubdata_bytes: (INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32) - price_to_pay, + }) + } + + // Indicate a start of execution frame for rollback purposes + fn start_frame(&mut self, timestamp: Timestamp) { + self.frames_stack.push_frame(timestamp); + } + + // Indicate that execution frame went out from the scope, so we can + // log the history and either rollback immediately or keep records to rollback later + fn finish_frame(&mut self, timestamp: Timestamp, panicked: bool) { + // If we panic then we append forward and rollbacks to the forward of parent, + // otherwise we place rollbacks of child before rollbacks of the parent + let current_frame = self.frames_stack.drain_frame(timestamp); + let ApplicationData { forward, rollbacks } = current_frame; + + if panicked { + // perform actual rollback + for query in rollbacks.iter().rev() { + let read_value = match query.log_type { + StorageLogQueryType::Read => { + // Having Read logs in rollback is not possible + vlog::warn!("Read log in rollback queue {:?}", query); + continue; + } + StorageLogQueryType::InitialWrite | StorageLogQueryType::RepeatedWrite => { + query.log_query.read_value + } + }; + + let LogQuery { written_value, .. } = query.log_query.glue_into(); + let key = triplet_to_storage_key( + query.log_query.shard_id, + query.log_query.address, + query.log_query.key, + ); + let current_value = self.storage.write_to_storage( + key, + // NOTE, that since it is a rollback query, + // the `read_value` is being set + read_value, timestamp, + ); + + // Additional validation that the current value was correct + // Unwrap is safe because the return value from write_inner is the previous value in this leaf. + // It is impossible to set leaf value to `None` + assert_eq!(current_value, written_value); + } + + for query in forward { + self.frames_stack.push_forward(query, timestamp) + } + for query in rollbacks.into_iter().rev() { + self.frames_stack.push_forward(query, timestamp) + } + } else { + for query in forward { + self.frames_stack.push_forward(query, timestamp) + } + for query in rollbacks { + self.frames_stack.push_rollback(query, timestamp) + } + } + } +} + +fn get_pubdata_price_bytes(_query: &LogQuery, is_initial: bool) -> u32 { + if is_initial { + zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32 + } else { + zk_evm::zkevm_opcode_defs::system_params::REPEATED_STORAGE_WRITE_PUBDATA_BYTES as u32 + } +} diff --git a/core/multivm_deps/vm_m5/src/oracles/tracer.rs b/core/multivm_deps/vm_m5/src/oracles/tracer.rs new file mode 100644 index 000000000000..4104c8100be3 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/oracles/tracer.rs @@ -0,0 +1,850 @@ +use std::{ + collections::HashSet, + fmt::{self, Display}, +}; + +use crate::{ + errors::VmRevertReasonParsingResult, + memory::SimpleMemory, + storage::StoragePtr, + utils::{aux_heap_page_from_base, heap_page_from_base}, + vm::{get_vm_hook_params, VM_HOOK_POSITION}, + vm_with_bootloader::BOOTLOADER_HEAP_PAGE, +}; +// use zk_evm::testing::memory::SimpleMemory; +use zk_evm::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + aux_structures::MemoryPage, + vm_state::{ErrorFlags, VmLocalState}, + witness_trace::{DummyTracer, VmWitnessTracer}, + zkevm_opcode_defs::{ + decoding::VmEncodingMode, ContextOpcode, FarCallABI, FarCallForwardPageType, FatPointer, + LogOpcode, Opcode, RetOpcode, UMAOpcode, + }, +}; +use zksync_types::{ + get_code_key, web3::signing::keccak256, AccountTreeId, Address, StorageKey, + ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, H256, + KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + L2_ETH_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, U256, +}; +use zksync_utils::{ + be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, +}; + +pub trait ExecutionEndTracer: Tracer { + // Returns whether the vm execution should stop. + fn should_stop_execution(&self) -> bool; +} + +pub trait PendingRefundTracer: Tracer { + // Some(x) means that the bootloader has asked the operator to provide the refund for the + // transaction, where `x` is the refund that the bootloader has suggested on its own. + fn requested_refund(&self) -> Option { + None + } + + // Set the current request for refund as fulfilled + fn set_refund_as_done(&mut self) {} +} +pub trait PubdataSpentTracer: Tracer { + // Returns how much gas was spent on pubdata. + fn gas_spent_on_pubdata(&self, _vm_local_state: &VmLocalState) -> u32 { + 0 + } +} + +#[derive(Debug, Clone, Default)] +pub(crate) struct TransactionResultTracer { + pub(crate) revert_reason: Option>, +} + +impl> VmWitnessTracer for TransactionResultTracer {} + +impl Tracer for TransactionResultTracer { + type SupportedMemory = SimpleMemory; + const CALL_BEFORE_EXECUTION: bool = true; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + if matches!(hook, VmHook::ExecutionResult) { + let vm_hook_params = get_vm_hook_params(memory); + + let success = vm_hook_params[0]; + let returndata_ptr = FatPointer::from_u256(vm_hook_params[1]); + let returndata = read_pointer(memory, returndata_ptr); + + if success == U256::zero() { + self.revert_reason = Some(returndata); + } else { + self.revert_reason = None; + } + } + } + fn after_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterExecutionData, + _memory: &Self::SupportedMemory, + ) { + } +} + +impl ExecutionEndTracer for TransactionResultTracer { + fn should_stop_execution(&self) -> bool { + // This tracer will not prevent the execution from going forward + // until the end of the block. + false + } +} + +impl PendingRefundTracer for TransactionResultTracer {} +impl PubdataSpentTracer for TransactionResultTracer {} + +#[derive(Debug, Clone, Eq, PartialEq, Copy)] +pub enum ValidationTracerMode { + // Should be activated when the transaction is being validated by user. + UserTxValidation, + // Should be activated when the transaction is being validated by the paymaster. + PaymasterTxValidation, + // Is a state when there are no restrictions on the execution. + NoValidation, +} + +#[derive(Debug, Clone)] +pub enum ViolatedValidationRule { + TouchedUnallowedStorageSlots(Address, U256), + CalledContractWithNoCode(Address), + TouchedUnallowedContext, +} + +pub enum ValidationError { + FailedTx(VmRevertReasonParsingResult), + VioalatedRule(ViolatedValidationRule), +} + +impl Display for ViolatedValidationRule { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ViolatedValidationRule::TouchedUnallowedStorageSlots(contract, key) => write!( + f, + "Touched unallowed storage slots: address {}, key: {}", + hex::encode(contract), + hex::encode(u256_to_h256(*key)) + ), + ViolatedValidationRule::CalledContractWithNoCode(contract) => { + write!(f, "Called contract with no code: {}", hex::encode(contract)) + } + ViolatedValidationRule::TouchedUnallowedContext => { + write!(f, "Touched unallowed context") + } + } + } +} + +impl Display for ValidationError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::FailedTx(revert_reason) => { + write!(f, "Validation revert: {}", revert_reason.revert_reason) + } + Self::VioalatedRule(rule) => { + write!(f, "Violated validation rules: {}", rule) + } + } + } +} + +fn touches_allowed_context(address: Address, key: U256) -> bool { + // Context is not touched at all + if address != SYSTEM_CONTEXT_ADDRESS { + return false; + } + + // Only chain_id is allowed to be touched. + key == U256::from(0u32) +} + +fn is_constant_code_hash(address: Address, key: U256, storage: StoragePtr<'_>) -> bool { + if address != ACCOUNT_CODE_STORAGE_ADDRESS { + // Not a code hash + return false; + } + + let value = storage.borrow_mut().get_value(&StorageKey::new( + AccountTreeId::new(address), + u256_to_h256(key), + )); + + value != H256::zero() +} + +fn valid_eth_token_call(address: Address, msg_sender: Address) -> bool { + let is_valid_caller = msg_sender == MSG_VALUE_SIMULATOR_ADDRESS + || msg_sender == CONTRACT_DEPLOYER_ADDRESS + || msg_sender == BOOTLOADER_ADDRESS; + address == L2_ETH_TOKEN_ADDRESS && is_valid_caller +} + +/// Tracer that is used to ensure that the validation adheres to all the rules +/// to prevent DDoS attacks on the server. +#[derive(Clone)] +pub struct ValidationTracer<'a> { + // A copy of it should be used in the Storage oracle + pub storage: StoragePtr<'a>, + pub validation_mode: ValidationTracerMode, + pub auxilary_allowed_slots: HashSet, + pub validation_error: Option, + + user_address: Address, + paymaster_address: Address, + should_stop_execution: bool, + trusted_slots: HashSet<(Address, U256)>, + trusted_addresses: HashSet
, + trusted_address_slots: HashSet<(Address, U256)>, +} + +impl fmt::Debug for ValidationTracer<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ValidationTracer") + .field("storage", &"StoragePtr") + .field("validation_mode", &self.validation_mode) + .field("auxilary_allowed_slots", &self.auxilary_allowed_slots) + .field("validation_error", &self.validation_error) + .field("user_address", &self.user_address) + .field("paymaster_address", &self.paymaster_address) + .field("should_stop_execution", &self.should_stop_execution) + .field("trusted_slots", &self.trusted_slots) + .field("trusted_addresses", &self.trusted_addresses) + .field("trusted_address_slots", &self.trusted_address_slots) + .finish() + } +} + +#[derive(Debug, Clone)] +pub struct ValidationTracerParams { + pub user_address: Address, + pub paymaster_address: Address, + /// Slots that are trusted (i.e. the user can access them). + pub trusted_slots: HashSet<(Address, U256)>, + /// Trusted addresses (the user can access any slots on these addresses). + pub trusted_addresses: HashSet
, + /// Slots, that are trusted and the value of them is the new trusted address. + /// They are needed to work correctly with beacon proxy, where the address of the implementation is + /// stored in the beacon. + pub trusted_address_slots: HashSet<(Address, U256)>, +} + +#[derive(Debug, Clone, Default)] +pub struct NewTrustedValidationItems { + pub new_allowed_slots: Vec, + pub new_trusted_addresses: Vec
, +} + +type ValidationRoundResult = Result; + +impl<'a> ValidationTracer<'a> { + pub fn new(storage: StoragePtr<'a>, params: ValidationTracerParams) -> Self { + ValidationTracer { + storage, + validation_error: None, + validation_mode: ValidationTracerMode::NoValidation, + auxilary_allowed_slots: Default::default(), + + should_stop_execution: false, + user_address: params.user_address, + paymaster_address: params.paymaster_address, + trusted_slots: params.trusted_slots, + trusted_addresses: params.trusted_addresses, + trusted_address_slots: params.trusted_address_slots, + } + } + + fn process_validation_round_result(&mut self, result: ValidationRoundResult) { + match result { + Ok(NewTrustedValidationItems { + new_allowed_slots, + new_trusted_addresses, + }) => { + self.auxilary_allowed_slots.extend(new_allowed_slots); + self.trusted_addresses.extend(new_trusted_addresses); + } + Err(err) => { + self.validation_error = Some(err); + } + } + } + + // Checks whether such storage access is acceptable. + fn is_allowed_storage_read(&self, address: Address, key: U256, msg_sender: Address) -> bool { + // If there are no restrictions, all storage reads are valid. + // We also don't support the paymaster validation for now. + if matches!( + self.validation_mode, + ValidationTracerMode::NoValidation | ValidationTracerMode::PaymasterTxValidation + ) { + return true; + } + + // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transfering ETH + // that is safe for the DDoS protection rules. + if valid_eth_token_call(address, msg_sender) { + return true; + } + + if self.trusted_slots.contains(&(address, key)) + || self.trusted_addresses.contains(&address) + || self.trusted_address_slots.contains(&(address, key)) + { + return true; + } + + if touches_allowed_context(address, key) { + return true; + } + + // The user is allowed to touch its own slots or slots semantically related to him. + let valid_users_slot = address == self.user_address + || u256_to_account_address(&key) == self.user_address + || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); + if valid_users_slot { + return true; + } + + if is_constant_code_hash(address, key, self.storage.clone()) { + return true; + } + + false + } + + // Used to remember user-related fields (its balance/allowance/etc). + // Note that it assumes that the length of the calldata is 64 bytes. + fn slot_to_add_from_keccak_call( + &self, + calldata: &[u8], + validated_address: Address, + ) -> Option { + assert_eq!(calldata.len(), 64); + + let (potential_address_bytes, potential_position_bytes) = calldata.split_at(32); + let potential_address = be_bytes_to_safe_address(potential_address_bytes); + + // If the validation_address is equal to the potential_address, + // then it is a request that could be used for mapping of kind mapping(address => ...). + // + // If the potential_position_bytes were already allowed before, then this keccak might be used + // for ERC-20 allowance or any other of mapping(address => mapping(...)) + if potential_address == Some(validated_address) + || self + .auxilary_allowed_slots + .contains(&H256::from_slice(potential_position_bytes)) + { + // This is request that could be used for mapping of kind mapping(address => ...) + + // We could theoretically wait for the slot number to be returned by the + // keccak256 precompile itself, but this would complicate the code even further + // so let's calculate it here. + let slot = keccak256(calldata); + + // Adding this slot to the allowed ones + Some(H256(slot)) + } else { + None + } + } + + pub fn check_user_restrictions( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &SimpleMemory, + ) -> ValidationRoundResult { + let opcode_variant = data.opcode.variant; + match opcode_variant.opcode { + Opcode::FarCall(_) => { + let packed_abi = data.src0_value.value; + let call_destination_value = data.src1_value.value; + + let called_address = u256_to_account_address(&call_destination_value); + let far_call_abi = FarCallABI::from_u256(packed_abi); + + if called_address == KECCAK256_PRECOMPILE_ADDRESS + && far_call_abi.memory_quasi_fat_pointer.length == 64 + { + let calldata_page = get_calldata_page_via_abi( + &far_call_abi, + state.vm_local_state.callstack.current.base_memory_page, + ); + let calldata = memory.read_unaligned_bytes( + calldata_page as usize, + far_call_abi.memory_quasi_fat_pointer.start as usize, + 64, + ); + + let slot_to_add = + self.slot_to_add_from_keccak_call(&calldata, self.user_address); + + if let Some(slot) = slot_to_add { + return Ok(NewTrustedValidationItems { + new_allowed_slots: vec![slot], + ..Default::default() + }); + } + } else if called_address != self.user_address { + let code_key = get_code_key(&called_address); + let code = self.storage.borrow_mut().get_value(&code_key); + + if code == H256::zero() { + // The users are not allowed to call contracts with no code + return Err(ViolatedValidationRule::CalledContractWithNoCode( + called_address, + )); + } + } + } + Opcode::Context(context) => { + match context { + ContextOpcode::Meta => { + return Err(ViolatedValidationRule::TouchedUnallowedContext); + } + ContextOpcode::ErgsLeft => { + } + _ => {} + } + } + Opcode::Log(LogOpcode::StorageRead) => { + let key = data.src0_value.value; + let this_address = state.vm_local_state.callstack.current.this_address; + let msg_sender = state.vm_local_state.callstack.current.msg_sender; + + if !self.is_allowed_storage_read(this_address, key, msg_sender) { + return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + this_address, + key, + )); + } + + if self.trusted_address_slots.contains(&(this_address, key)) { + let storage_key = + StorageKey::new(AccountTreeId::new(this_address), u256_to_h256(key)); + + let value = self.storage.borrow_mut().get_value(&storage_key); + + return Ok(NewTrustedValidationItems { + new_trusted_addresses: vec![h256_to_account_address(&value)], + ..Default::default() + }); + } + } + _ => {} + } + + Ok(Default::default()) + } +} + +impl Tracer for ValidationTracer<'_> { + const CALL_BEFORE_EXECUTION: bool = true; + + type SupportedMemory = SimpleMemory; + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + // For now, we support only validations for users. + if let ValidationTracerMode::UserTxValidation = self.validation_mode { + let validation_round_result = self.check_user_restrictions(state, data, memory); + self.process_validation_round_result(validation_round_result); + } + + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + let current_mode = self.validation_mode; + match (current_mode, hook) { + (ValidationTracerMode::NoValidation, VmHook::AccountValidationEntered) => { + // Account validation can be entered when there is no prior validation (i.e. "nested" validations are not allowed) + self.validation_mode = ValidationTracerMode::UserTxValidation; + } + (ValidationTracerMode::NoValidation, VmHook::PaymasterValidationEntered) => { + // Paymaster validation can be entered when there is no prior validation (i.e. "nested" validations are not allowed) + self.validation_mode = ValidationTracerMode::PaymasterTxValidation; + } + (_, VmHook::AccountValidationEntered | VmHook::PaymasterValidationEntered) => { + panic!( + "Unallowed transition inside the validation tracer. Mode: {:#?}, hook: {:#?}", + self.validation_mode, hook + ); + } + (_, VmHook::NoValidationEntered) => { + // Validation can be always turned off + self.validation_mode = ValidationTracerMode::NoValidation; + } + (_, VmHook::ValidationStepEndeded) => { + // The validation step has ended. + self.should_stop_execution = true; + } + (_, _) => { + // The hook is not relevant to the validation tracer. Ignore. + } + } + } + fn after_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterExecutionData, + _memory: &Self::SupportedMemory, + ) { + } +} + +fn get_calldata_page_via_abi(far_call_abi: &FarCallABI, base_page: MemoryPage) -> u32 { + match far_call_abi.forwarding_mode { + FarCallForwardPageType::ForwardFatPointer => { + far_call_abi.memory_quasi_fat_pointer.memory_page + } + FarCallForwardPageType::UseAuxHeap => aux_heap_page_from_base(base_page).0, + FarCallForwardPageType::UseHeap => heap_page_from_base(base_page).0, + } +} + +impl ExecutionEndTracer for ValidationTracer<'_> { + fn should_stop_execution(&self) -> bool { + self.should_stop_execution || self.validation_error.is_some() + } +} + +impl PendingRefundTracer for ValidationTracer<'_> {} +impl PubdataSpentTracer for ValidationTracer<'_> {} + +/// Allows any opcodes, but tells the VM to end the execution once the tx is over. +#[derive(Debug, Clone, Default)] +pub struct OneTxTracer { + tx_has_been_processed: bool, + + // Some(x) means that the bootloader has asked the operator + // to provide the refund the user, where `x` is the refund proposed + // by the bootloader itself. + pending_operator_refund: Option, + + pub refund_gas: u32, + pub gas_spent_on_bytecodes_and_long_messages: u32, + bootloader_tracer: BootloaderTracer, +} + +impl Tracer for OneTxTracer { + const CALL_BEFORE_EXECUTION: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + match hook { + VmHook::TxHasEnded => self.tx_has_been_processed = true, + VmHook::NotifyAboutRefund => self.refund_gas = get_vm_hook_params(memory)[0].as_u32(), + VmHook::AskOperatorForRefund => { + self.pending_operator_refund = Some(get_vm_hook_params(memory)[0].as_u32()) + } + _ => {} + } + + if data.opcode.variant.opcode == Opcode::Log(LogOpcode::PrecompileCall) { + let current_stack = state.vm_local_state.callstack.get_current_stack(); + // Trace for precompile calls from `KNOWN_CODES_STORAGE_ADDRESS` and `L1_MESSENGER_ADDRESS` that burn some gas. + // Note, that if there is less gas left than requested to burn it will be burnt anyway. + if current_stack.this_address == KNOWN_CODES_STORAGE_ADDRESS + || current_stack.this_address == L1_MESSENGER_ADDRESS + { + self.gas_spent_on_bytecodes_and_long_messages += + std::cmp::min(data.src1_value.value.as_u32(), current_stack.ergs_remaining); + } + } + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + self.bootloader_tracer.after_execution(state, data, memory) + } +} + +impl ExecutionEndTracer for OneTxTracer { + fn should_stop_execution(&self) -> bool { + self.tx_has_been_processed || self.bootloader_tracer.should_stop_execution() + } +} + +impl PendingRefundTracer for OneTxTracer { + fn requested_refund(&self) -> Option { + self.pending_operator_refund + } + + fn set_refund_as_done(&mut self) { + self.pending_operator_refund = None; + } +} + +impl PubdataSpentTracer for OneTxTracer { + fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter + } +} + +impl OneTxTracer { + pub fn is_bootloader_out_of_gas(&self) -> bool { + self.bootloader_tracer.is_bootloader_out_of_gas() + } + + pub fn tx_has_been_processed(&self) -> bool { + self.tx_has_been_processed + } +} + +/// Tells the VM to end the execution before `ret` from the booloader if there is no panic or revert. +/// Also, saves the information if this `ret` was caused by "out of gas" panic. +#[derive(Debug, Clone, Default)] +pub struct BootloaderTracer { + is_bootloader_out_of_gas: bool, + ret_from_the_bootloader: Option, +} + +impl Tracer for BootloaderTracer { + const CALL_AFTER_DECODING: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + state: VmLocalStateData<'_>, + data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + // We should check not only for the `NOT_ENOUGH_ERGS` flag but if the current frame is bootloader too. + if Self::current_frame_is_bootloader(state.vm_local_state) + && data + .error_flags_accumulated + .contains(ErrorFlags::NOT_ENOUGH_ERGS) + { + self.is_bootloader_out_of_gas = true; + } + } + + fn before_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: BeforeExecutionData, + _memory: &Self::SupportedMemory, + ) { + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + // Decodes next opcode. + // `self` is passed as `tracer`, so `self.after_decoding` will be called and it will catch "out of gas". + let (next_opcode, _, _) = + zk_evm::vm_state::read_and_decode(state.vm_local_state, memory, &mut DummyTracer, self); + if Self::current_frame_is_bootloader(state.vm_local_state) { + if let Opcode::Ret(ret) = next_opcode.inner.variant.opcode { + self.ret_from_the_bootloader = Some(ret); + } + } + } +} + +impl ExecutionEndTracer for BootloaderTracer { + fn should_stop_execution(&self) -> bool { + self.ret_from_the_bootloader == Some(RetOpcode::Ok) + } +} + +impl PendingRefundTracer for BootloaderTracer {} +impl PubdataSpentTracer for BootloaderTracer {} + +impl BootloaderTracer { + fn current_frame_is_bootloader(local_state: &VmLocalState) -> bool { + // The current frame is bootloader if the callstack depth is 1. + // Some of the near calls inside the bootloader can be out of gas, which is totally normal behavior + // and it shouldn't result in `is_bootloader_out_of_gas` becoming true. + local_state.callstack.inner.len() == 1 + } + + pub fn is_bootloader_out_of_gas(&self) -> bool { + self.is_bootloader_out_of_gas + } + + pub fn bootloader_panicked(&self) -> bool { + self.ret_from_the_bootloader == Some(RetOpcode::Panic) + } +} + +#[derive(Clone, Debug, Copy)] +pub(crate) enum VmHook { + AccountValidationEntered, + PaymasterValidationEntered, + NoValidationEntered, + ValidationStepEndeded, + TxHasEnded, + DebugLog, + DebugReturnData, + NoHook, + NearCallCatch, + AskOperatorForRefund, + NotifyAboutRefund, + ExecutionResult, +} + +impl VmHook { + pub fn from_opcode_memory(state: &VmLocalStateData<'_>, data: &BeforeExecutionData) -> Self { + let opcode_variant = data.opcode.variant; + let heap_page = + heap_page_from_base(state.vm_local_state.callstack.current.base_memory_page).0; + + let src0_value = data.src0_value.value; + + let fat_ptr = FatPointer::from_u256(src0_value); + + let value = data.src1_value.value; + + // Only UMA opcodes in the bootloader serve for vm hooks + if !matches!(opcode_variant.opcode, Opcode::UMA(UMAOpcode::HeapWrite)) + || heap_page != BOOTLOADER_HEAP_PAGE + || fat_ptr.offset != VM_HOOK_POSITION * 32 + { + return Self::NoHook; + } + + match value.as_u32() { + 0 => Self::AccountValidationEntered, + 1 => Self::PaymasterValidationEntered, + 2 => Self::NoValidationEntered, + 3 => Self::ValidationStepEndeded, + 4 => Self::TxHasEnded, + 5 => Self::DebugLog, + 6 => Self::DebugReturnData, + 7 => Self::NearCallCatch, + 8 => Self::AskOperatorForRefund, + 9 => Self::NotifyAboutRefund, + 10 => Self::ExecutionResult, + _ => panic!("Unkown hook"), + } + } +} + +fn get_debug_log(state: &VmLocalStateData<'_>, memory: &SimpleMemory) -> String { + let vm_hook_params: Vec<_> = get_vm_hook_params(memory) + .into_iter() + .map(u256_to_h256) + .collect(); + let msg = vm_hook_params[0].as_bytes().to_vec(); + let data = vm_hook_params[1].as_bytes().to_vec(); + + let msg = String::from_utf8(msg).expect("Invalid debug message"); + let data = U256::from_big_endian(&data); + + // For long data, it is better to use hex-encoding for greater readibility + let data_str = if data > U256::from(u64::max_value()) { + let mut bytes = [0u8; 32]; + data.to_big_endian(&mut bytes); + format!("0x{}", hex::encode(bytes)) + } else { + data.to_string() + }; + + let tx_id = state.vm_local_state.tx_number_in_block; + + format!("Bootloader transaction {}: {} {}", tx_id, msg, data_str) +} + +/// Reads the memory slice represented by the fat pointer. +/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +pub(crate) fn read_pointer(memory: &SimpleMemory, pointer: FatPointer) -> Vec { + let FatPointer { + offset, + length, + start, + memory_page, + } = pointer; + + // The actual bounds of the returndata ptr is [start+offset..start+length] + let mem_region_start = start + offset; + let mem_region_length = length - offset; + + memory.read_unaligned_bytes( + memory_page as usize, + mem_region_start as usize, + mem_region_length as usize, + ) +} + +/// Outputs the returndata for the latest call. +/// This is usually used to output the revert reason. +fn get_debug_returndata(memory: &SimpleMemory) -> String { + let vm_hook_params: Vec<_> = get_vm_hook_params(memory); + let returndata_ptr = FatPointer::from_u256(vm_hook_params[0]); + let returndata = read_pointer(memory, returndata_ptr); + + format!("0x{}", hex::encode(returndata)) +} + +/// Accepts a vm hook and, if it requires to output some debug log, outputs it. +fn print_debug_if_needed(hook: &VmHook, state: &VmLocalStateData<'_>, memory: &SimpleMemory) { + let log = match hook { + VmHook::DebugLog => get_debug_log(state, memory), + VmHook::DebugReturnData => get_debug_returndata(memory), + _ => return, + }; + + vlog::trace!("{}", log); +} diff --git a/core/multivm_deps/vm_m5/src/pubdata_utils.rs b/core/multivm_deps/vm_m5/src/pubdata_utils.rs new file mode 100644 index 000000000000..af5ea6d4dc97 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/pubdata_utils.rs @@ -0,0 +1,103 @@ +use crate::glue::GlueInto; +use crate::oracles::storage::storage_key_of_log; +use crate::utils::collect_storage_log_queries_after_timestamp; +use crate::VmInstance; +use std::collections::HashMap; +use zk_evm::aux_structures::Timestamp; +use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; +use zksync_types::zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries; +use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; +use zksync_utils::bytecode::bytecode_len_in_bytes; + +impl<'a> VmInstance<'a> { + pub fn pubdata_published(&self, from_timestamp: Timestamp) -> u32 { + let storage_writes_pubdata_published = self.pubdata_published_for_writes(from_timestamp); + + let (events, l2_to_l1_logs) = + self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + // For the first transaction in L1 batch there may be (it depends on the execution mode) an L2->L1 log + // that is sent by `SystemContext` in `setNewBlock`. It's a part of the L1 batch pubdata overhead and not the transaction itself. + let l2_l1_logs_bytes = (l2_to_l1_logs + .iter() + .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) + .count() as u32) + * zk_evm::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; + let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + .iter() + .map(|event| event.len() as u32) + .sum(); + + let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + .iter() + .map(|bytecodehash| { + bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD + }) + .sum(); + + storage_writes_pubdata_published + + l2_l1_logs_bytes + + l2_l1_long_messages_bytes + + published_bytecode_bytes + } + + fn pubdata_published_for_writes(&self, from_timestamp: Timestamp) -> u32 { + // This `HashMap` contains how much was already paid for every slot that was paid during the last tx execution. + // For the slots that weren't paid during the last tx execution we can just use + // `self.state.storage.paid_changes.inner().get(&key)` to get how much it was paid before. + let pre_paid_before_tx_map: HashMap = self + .state + .storage + .paid_changes + .history() + .iter() + .rev() + .take_while(|history_elem| history_elem.0 >= from_timestamp) + .map(|history_elem| (history_elem.1.key, history_elem.1.value.unwrap_or(0))) + .collect(); + let pre_paid_before_tx = |key: &StorageKey| -> u32 { + if let Some(pre_paid) = pre_paid_before_tx_map.get(key) { + *pre_paid + } else { + self.state + .storage + .paid_changes + .inner() + .get(key) + .copied() + .unwrap_or(0) + } + }; + + let storage_logs = collect_storage_log_queries_after_timestamp( + &self + .state + .storage + .frames_stack + .inner() + .current_frame() + .forward, + from_timestamp, + ); + let (_, deduplicated_logs) = + sort_storage_access_queries(storage_logs.iter().map(|log| &log.log_query)); + + deduplicated_logs + .into_iter() + .filter_map(|log| { + if log.rw_flag { + let key = storage_key_of_log(&log.glue_into()); + let pre_paid = pre_paid_before_tx(&key); + let to_pay_by_user = self.state.storage.base_price_for_write(&log.glue_into()); + + if to_pay_by_user > pre_paid { + Some(to_pay_by_user - pre_paid) + } else { + None + } + } else { + None + } + }) + .sum() + } +} diff --git a/core/multivm_deps/vm_m5/src/refunds.rs b/core/multivm_deps/vm_m5/src/refunds.rs new file mode 100644 index 000000000000..78db47bbfad2 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/refunds.rs @@ -0,0 +1,200 @@ +use crate::vm_with_bootloader::{ + eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET, +}; +use crate::VmInstance; +use zk_evm::aux_structures::Timestamp; +use zksync_types::U256; +use zksync_utils::ceil_div_u256; + +impl<'a> VmInstance<'a> { + pub(crate) fn tx_body_refund( + &self, + from_timestamp: Timestamp, + bootloader_refund: u32, + gas_spent_on_pubdata: u32, + ) -> u32 { + let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + let tx_gas_limit = self.get_tx_gas_limit(current_tx_index); + let total_gas_spent = tx_gas_limit - bootloader_refund; + + let gas_spent_on_computation = total_gas_spent + .checked_sub(gas_spent_on_pubdata) + .unwrap_or_else(|| { + vlog::error!( + "Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", + gas_spent_on_pubdata, + total_gas_spent + ); + 0 + }); + + let pubdata_published = self.pubdata_published(from_timestamp); + + // For now, bootloader charges only for base fee. + let effective_gas_price = self.block_context.base_fee; + + let bootloader_eth_price_per_pubdata_byte = U256::from(effective_gas_price) + * U256::from(self.state.local_state.current_ergs_per_pubdata_byte); + let fair_eth_price_per_pubdata_byte = U256::from(eth_price_per_pubdata_byte( + self.block_context.context.l1_gas_price, + )); + + // For now, L1 originated transactions are allowed to pay less than fair fee per pubdata, + // so we should take it into account. + let eth_price_per_pubdata_byte_for_calculation = std::cmp::min( + bootloader_eth_price_per_pubdata_byte, + fair_eth_price_per_pubdata_byte, + ); + + let fair_fee_eth = U256::from(gas_spent_on_computation) + * U256::from(self.block_context.context.fair_l2_gas_price) + + U256::from(pubdata_published) * eth_price_per_pubdata_byte_for_calculation; + let pre_paid_eth = U256::from(tx_gas_limit) * U256::from(effective_gas_price); + let refund_eth = pre_paid_eth.checked_sub(fair_fee_eth).unwrap_or_else(|| { + vlog::error!( + "Fair fee is greater than pre paid. Fair fee: {} wei, pre paid: {} wei", + fair_fee_eth, + pre_paid_eth + ); + U256::zero() + }); + + ceil_div_u256(refund_eth, effective_gas_price.into()).as_u32() + } + + /// Calculates the refund for the block overhead. + /// This refund is the difference between how much user paid in advance for the block overhead + /// and how much he should pay based on actual tx execution result. + pub(crate) fn block_overhead_refund( + &self, + _from_timestamp: Timestamp, + _gas_remaining_before: u32, + _gas_spent_on_pubdata: u32, + ) -> u32 { + 0 + + // let pubdata_published = self.pubdata_published(from_timestamp); + // + // let total_gas_spent = gas_remaining_before - self.gas_remaining(); + // let gas_spent_on_computation = total_gas_spent.checked_sub(gas_spent_on_pubdata).unwrap_or_else(|| { + // vlog::error!("Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", gas_spent_on_pubdata, total_gas_spent); + // 0 + // }); + // let (_, l2_to_l1_logs) = self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + // let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + // + // let actual_overhead = Self::actual_overhead_gas( + // self.state.local_state.current_ergs_per_pubdata_byte, + // self.bootloader_state.get_tx_size(current_tx_index), + // pubdata_published, + // gas_spent_on_computation, + // self.state + // .decommittment_processor + // .get_number_of_decommitment_requests_after_timestamp(from_timestamp), + // l2_to_l1_logs.len(), + // ); + // + // let predefined_overhead = self + // .state + // .memory + // .read_slot( + // BOOTLOADER_HEAP_PAGE as usize, + // TX_OVERHEAD_OFFSET + current_tx_index, + // ) + // .value + // .as_u32(); + // + // if actual_overhead <= predefined_overhead { + // predefined_overhead - actual_overhead + // } else { + // // This should never happen but potential mistakes at the early stage should not bring the server down. + // + // // to make debugging easier. + // vlog::error!( + // "Actual overhead is greater than predefined one, actual: {}, predefined: {}", + // actual_overhead, + // predefined_overhead + // ); + // 0 + // } + } + + #[allow(dead_code)] + fn actual_overhead_gas( + _gas_per_pubdata_byte_limit: u32, + _encoded_len: usize, + _pubdata_published: u32, + _gas_spent_on_computation: u32, + _number_of_decommitment_requests: usize, + _l2_l1_logs: usize, + ) -> u32 { + 0 + + // let overhead_for_block_gas = U256::from(crate::transaction_data::block_overhead_gas( + // gas_per_pubdata_byte_limit, + // )); + + // let encoded_len = U256::from(encoded_len); + // let pubdata_published = U256::from(pubdata_published); + // let gas_spent_on_computation = U256::from(gas_spent_on_computation); + // let number_of_decommitment_requests = U256::from(number_of_decommitment_requests); + // let l2_l1_logs = U256::from(l2_l1_logs); + + // let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()); + + // let overhead_for_length = ceil_div_u256( + // encoded_len * overhead_for_block_gas, + // BOOTLOADER_TX_ENCODING_SPACE.into(), + // ); + + // let actual_overhead_for_pubdata = ceil_div_u256( + // pubdata_published * overhead_for_block_gas, + // MAX_PUBDATA_PER_BLOCK.into(), + // ); + + // let actual_gas_limit_overhead = ceil_div_u256( + // gas_spent_on_computation * overhead_for_block_gas, + // MAX_BLOCK_MULTIINSTANCE_GAS_LIMIT.into(), + // ); + + // let code_decommitter_sorter_circuit_overhead = ceil_div_u256( + // number_of_decommitment_requests * overhead_for_block_gas, + // GEOMETRY_CONFIG.limit_for_code_decommitter_sorter.into(), + // ); + + // let l1_l2_logs_overhead = ceil_div_u256( + // l2_l1_logs * overhead_for_block_gas, + // std::cmp::min( + // GEOMETRY_CONFIG.limit_for_l1_messages_merklizer, + // GEOMETRY_CONFIG.limit_for_l1_messages_pudata_hasher, + // ) + // .into(), + // ); + + // let overhead = vec![ + // tx_slot_overhead, + // overhead_for_length, + // actual_overhead_for_pubdata, + // actual_gas_limit_overhead, + // code_decommitter_sorter_circuit_overhead, + // l1_l2_logs_overhead, + // ] + // .into_iter() + // .max() + // .unwrap(); + + // overhead.as_u32() + } + + pub(crate) fn get_tx_gas_limit(&self, tx_index: usize) -> u32 { + let tx_description_offset = self.bootloader_state.get_tx_description_offset(tx_index); + self.state + .memory + .read_slot( + BOOTLOADER_HEAP_PAGE as usize, + tx_description_offset + TX_GAS_LIMIT_OFFSET, + ) + .value + .as_u32() + } +} diff --git a/core/multivm_deps/vm_m5/src/storage.rs b/core/multivm_deps/vm_m5/src/storage.rs new file mode 100644 index 000000000000..09bd611e91a3 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/storage.rs @@ -0,0 +1,48 @@ +use std::cell::RefCell; +use std::collections::HashMap; +use std::fmt::Debug; +use std::rc::Rc; + +use zksync_state::{ReadStorage, StorageView, WriteStorage}; +use zksync_types::{StorageKey, StorageValue, H256}; + +pub trait Storage: Debug + Sync + Send { + fn get_value(&mut self, key: &StorageKey) -> StorageValue; + // Returns the original value. + fn set_value(&mut self, key: &StorageKey, value: StorageValue) -> StorageValue; + fn is_write_initial(&mut self, key: &StorageKey) -> bool; + fn load_factory_dep(&mut self, hash: H256) -> Option>; + + fn number_of_updated_storage_slots(&self) -> usize; + + fn get_modified_storage_keys(&self) -> &HashMap; +} + +impl Storage for StorageView { + fn get_value(&mut self, key: &StorageKey) -> StorageValue { + ReadStorage::read_value(self, key) + } + + /// Returns the original value. + fn set_value(&mut self, key: &StorageKey, value: StorageValue) -> StorageValue { + WriteStorage::set_value(self, *key, value) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + ReadStorage::is_write_initial(self, key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + ReadStorage::load_factory_dep(self, hash) + } + + fn number_of_updated_storage_slots(&self) -> usize { + self.get_modified_storage_keys().len() + } + + fn get_modified_storage_keys(&self) -> &HashMap { + WriteStorage::modified_storage_keys(self) + } +} + +pub type StoragePtr<'a> = Rc>; diff --git a/core/multivm_deps/vm_m5/src/test_utils.rs b/core/multivm_deps/vm_m5/src/test_utils.rs new file mode 100644 index 000000000000..907dcae8f0fc --- /dev/null +++ b/core/multivm_deps/vm_m5/src/test_utils.rs @@ -0,0 +1,331 @@ +//! +//! This file contains various utilities +//! that could be used for testing, but are not needed anywhere else. +//! +//! They are not put into the `cfg(test)` folder to allow easy sharing of the content +//! of this file with other crates. +//! + +use std::collections::HashMap; + +use itertools::Itertools; +use zk_evm::{ + aux_structures::Timestamp, reference_impls::event_sink::ApplicationData, vm_state::VmLocalState, +}; +use zksync_contracts::{deployer_contract, get_loadnext_contract, load_contract}; +use zksync_types::{ + ethabi::{Address, Token}, + fee::Fee, + l2::L2Tx, + web3::signing::keccak256, + Execute, L2ChainId, Nonce, StorageKey, StorageLogQuery, StorageValue, + CONTRACT_DEPLOYER_ADDRESS, H256, U256, +}; +use zksync_utils::{ + address_to_h256, bytecode::hash_bytecode, h256_to_account_address, + test_utils::LoadnextContractExecutionParams, u256_to_h256, +}; + +/// The tests here help us with the testing the VM +use crate::{ + event_sink::InMemoryEventSink, + history_recorder::{FrameManager, HistoryRecorder}, + memory::SimpleMemory, + VmInstance, +}; + +#[derive(Clone, Debug)] +pub struct ModifiedKeysMap(HashMap); + +// We consider hashmaps to be equal even if there is a key +// that is not present in one but has zero value in another. +impl PartialEq for ModifiedKeysMap { + fn eq(&self, other: &Self) -> bool { + for (key, value) in self.0.iter() { + if *value != other.0.get(key).cloned().unwrap_or_default() { + return false; + } + } + for (key, value) in other.0.iter() { + if *value != self.0.get(key).cloned().unwrap_or_default() { + return false; + } + } + true + } +} + +#[derive(Clone, PartialEq, Debug)] +pub struct DecommitterTestInnerState { + /// There is no way to "trully" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub modified_storage_keys: ModifiedKeysMap, + pub known_bytecodes: HistoryRecorder>>, + pub decommitted_code_hashes: HistoryRecorder>, +} + +#[derive(Clone, PartialEq, Debug)] +pub struct StorageOracleInnerState { + /// There is no way to "trully" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub modified_storage_keys: ModifiedKeysMap, + + pub frames_stack: HistoryRecorder>>, +} + +#[derive(Clone, PartialEq, Debug)] +pub struct PrecompileProcessorTestInnerState { + pub timestamp_history: HistoryRecorder>, +} + +/// A struct that encapsulates the state of the VM's oracles +/// The state is to be used in tests. +#[derive(Clone, PartialEq, Debug)] +pub struct VmInstanceInnerState { + event_sink: InMemoryEventSink, + precompile_processor_state: PrecompileProcessorTestInnerState, + memory: SimpleMemory, + decommitter_state: DecommitterTestInnerState, + storage_oracle_state: StorageOracleInnerState, + local_state: VmLocalState, +} + +impl<'a> VmInstance<'a> { + /// This method is mostly to be used in tests. It dumps the inner state of all the oracles and the VM itself. + pub fn dump_inner_state(&self) -> VmInstanceInnerState { + let event_sink = self.state.event_sink.clone(); + let precompile_processor_state = PrecompileProcessorTestInnerState { + timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), + }; + let memory = self.state.memory.clone(); + let decommitter_state = DecommitterTestInnerState { + modified_storage_keys: ModifiedKeysMap( + self.state + .decommittment_processor + .get_storage() + .borrow() + .get_modified_storage_keys() + .clone(), + ), + known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), + decommitted_code_hashes: self + .state + .decommittment_processor + .get_decommitted_code_hashes_with_history() + .clone(), + }; + let storage_oracle_state = StorageOracleInnerState { + modified_storage_keys: ModifiedKeysMap( + self.state + .storage + .storage + .get_ptr() + .borrow() + .get_modified_storage_keys() + .clone(), + ), + frames_stack: self.state.storage.frames_stack.clone(), + }; + let local_state = self.state.local_state.clone(); + + VmInstanceInnerState { + event_sink, + precompile_processor_state, + memory, + decommitter_state, + storage_oracle_state, + local_state, + } + } +} + +// This one is used only for tests, but it is in this folder to +// be able to share it among crates +pub fn mock_loadnext_test_call( + eth_private_key: H256, + nonce: Nonce, + contract_address: Address, + fee: Fee, + execution_params: LoadnextContractExecutionParams, +) -> L2Tx { + let loadnext_contract = get_loadnext_contract(); + + let contract_function = loadnext_contract.contract.function("execute").unwrap(); + + let params = vec![ + Token::Uint(U256::from(execution_params.reads)), + Token::Uint(U256::from(execution_params.writes)), + Token::Uint(U256::from(execution_params.hashes)), + Token::Uint(U256::from(execution_params.events)), + Token::Uint(U256::from(execution_params.recursive_calls)), + Token::Uint(U256::from(execution_params.deploys)), + ]; + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let mut l2_tx = L2Tx::new_signed( + contract_address, + calldata, + nonce, + fee, + Default::default(), + L2ChainId(270), + ð_private_key, + None, + Default::default(), + ) + .unwrap(); + // Input means all transaction data (NOT calldata, but all tx fields) that came from the API. + // This input will be used for the derivation of the tx hash, so put some random to it to be sure + // that the transaction hash is unique. + l2_tx.set_input(H256::random().0.to_vec(), H256::random()); + l2_tx +} + +// This one is used only for tests, but it is in this folder to +// be able to share it among crates +pub fn mock_loadnext_gas_burn_call( + eth_private_key: H256, + nonce: Nonce, + contract_address: Address, + fee: Fee, + gas: u32, +) -> L2Tx { + let loadnext_contract = get_loadnext_contract(); + + let contract_function = loadnext_contract.contract.function("burnGas").unwrap(); + + let params = vec![Token::Uint(U256::from(gas))]; + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let mut l2_tx = L2Tx::new_signed( + contract_address, + calldata, + nonce, + fee, + Default::default(), + L2ChainId(270), + ð_private_key, + None, + Default::default(), + ) + .unwrap(); + // Input means all transaction data (NOT calldata, but all tx fields) that came from the API. + // This input will be used for the derivation of the tx hash, so put some random to it to be sure + // that the transaction hash is unique. + l2_tx.set_input(H256::random().0.to_vec(), H256::random()); + l2_tx +} + +pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { + let deployer = deployer_contract(); + + let contract_function = deployer.function("create").unwrap(); + + let params = [ + Token::FixedBytes(vec![0u8; 32]), + Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::Bytes(calldata.to_vec()), + ]; + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata, + factory_deps: Some(vec![code.to_vec()]), + value: U256::zero(), + } +} + +fn get_execute_error_calldata() -> Vec { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ); + + let function = test_contract.function("require_short").unwrap(); + + function + .encode_input(&[]) + .expect("failed to encode parameters") +} + +pub fn get_deploy_tx( + account_private_key: H256, + nonce: Nonce, + code: &[u8], + factory_deps: Vec>, + calldata: &[u8], + fee: Fee, +) -> L2Tx { + let factory_deps = factory_deps + .into_iter() + .chain(vec![code.to_vec()]) + .collect(); + let execute = get_create_execute(code, calldata); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + execute.calldata, + nonce, + fee, + U256::zero(), + L2ChainId(270), + &account_private_key, + Some(factory_deps), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + + signed +} + +pub fn get_error_tx( + account_private_key: H256, + nonce: Nonce, + contract_address: Address, + fee: Fee, +) -> L2Tx { + let factory_deps = vec![]; + let calldata = get_execute_error_calldata(); + + let mut signed = L2Tx::new_signed( + contract_address, + calldata, + nonce, + fee, + U256::zero(), + L2ChainId(270), + &account_private_key, + Some(factory_deps), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + + signed +} + +pub fn get_create_zksync_address(sender_address: Address, sender_nonce: Nonce) -> Address { + let prefix = keccak256("zksyncCreate".as_bytes()); + let address = address_to_h256(&sender_address); + let nonce = u256_to_h256(U256::from(sender_nonce.0)); + + let digest = prefix + .iter() + .chain(address.0.iter()) + .chain(nonce.0.iter()) + .copied() + .collect_vec(); + + let hash = keccak256(&digest); + + h256_to_account_address(&H256(hash)) +} diff --git a/core/multivm_deps/vm_m5/src/tests/bootloader.rs b/core/multivm_deps/vm_m5/src/tests/bootloader.rs new file mode 100644 index 000000000000..f2b80e9e3c3d --- /dev/null +++ b/core/multivm_deps/vm_m5/src/tests/bootloader.rs @@ -0,0 +1,1655 @@ +// //! +// //! Tests for the bootloader +// //! The description for each of the tests can be found in the corresponding `.yul` file. +// //! +// #![cfg_attr(test, allow(unused_imports))] + +// use crate::errors::{VmRevertReason, VmRevertReasonParsingResult}; +// use crate::memory::SimpleMemory; +// use crate::oracles::tracer::{ +// read_pointer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, +// TransactionResultTracer, VmHook, +// }; +// use crate::storage::{Storage, StoragePtr}; +// use crate::test_utils::{ +// get_create_execute, get_create_zksync_address, get_deploy_tx, get_error_tx, +// mock_loadnext_test_call, VmInstanceInnerState, +// }; +// use crate::utils::{ +// create_test_block_params, insert_system_contracts, read_bootloader_test_code, +// BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, +// }; +// use crate::vm::{ +// get_vm_hook_params, tx_has_failed, VmBlockResult, VmExecutionStopReason, ZkSyncVmState, +// MAX_MEM_SIZE_BYTES, +// }; +// use crate::vm_with_bootloader::{ +// bytecode_to_factory_dep, get_bootloader_memory, get_bootloader_memory_for_encoded_tx, +// init_vm_inner, push_raw_transaction_to_bootloader_memory, +// push_transaction_to_bootloader_memory, BlockContext, DerivedBlockContext, BOOTLOADER_HEAP_PAGE, +// BOOTLOADER_TX_DESCRIPTION_OFFSET, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, +// }; +// use crate::vm_with_bootloader::{BlockContextMode, BootloaderJobType, TxExecutionMode}; +// use crate::{test_utils, VmInstance}; +// use crate::{TxRevertReason, VmExecutionResult}; +// use itertools::Itertools; +// use std::cell::RefCell; +// use std::convert::TryFrom; +// use std::ops::{Add, DivAssign}; +// use std::rc::Rc; +// use tempfile::TempDir; +// use zk_evm::abstractions::{ +// AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, +// MAX_HEAP_PAGE_SIZE_IN_WORDS, MAX_MEMORY_BYTES, +// }; +// use zk_evm::aux_structures::Timestamp; +// use zk_evm::block_properties::BlockProperties; +// use zk_evm::sha3::digest::typenum::U830; +// use zk_evm::witness_trace::VmWitnessTracer; +// use zk_evm::zkevm_opcode_defs::decoding::VmEncodingMode; +// use zk_evm::zkevm_opcode_defs::FatPointer; +// use zksync_types::block::DeployedContract; +// use zksync_types::ethabi::encode; +// use zksync_types::l1::L1Tx; +// use zksync_types::tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}; +// use zksync_utils::test_utils::LoadnextContractExecutionParams; +// use zksync_utils::{ +// address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, bytes_to_le_words, h256_to_u256, +// u256_to_h256, +// }; +// use zksync_utils::{h256_to_account_address, u256_to_account_address}; + +// use crate::{transaction_data::TransactionData, OracleTools}; +// use std::time; +// use zksync_contracts::{ +// default_erc20_bytecode, get_loadnext_contract, known_codes_contract, load_contract, +// load_sys_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, +// BaseSystemContracts, SystemContractCode, PLAYGROUND_BLOCK_BOOTLOADER_CODE, +// }; +// use zksync_crypto::rand::random; +// use zksync_state::secondary_storage::SecondaryStateStorage; +// use zksync_state::storage_view::StorageView; +// use zksync_storage::db::Database; +// use zksync_storage::RocksDB; +// use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; +// use zksync_types::utils::{ +// deployed_address_create, storage_key_for_eth_balance, storage_key_for_standard_token_balance, +// }; +// use zksync_types::{ +// ethabi::Token, AccountTreeId, Address, Execute, ExecuteTransactionCommon, L1BatchNumber, +// L2ChainId, PackedEthSignature, StorageKey, StorageLogQueryType, Transaction, H256, +// KNOWN_CODES_STORAGE_ADDRESS, U256, +// }; +// use zksync_types::{fee::Fee, l2::L2Tx, l2_to_l1_log::L2ToL1Log, tx::ExecutionMetrics}; +// use zksync_types::{ +// get_code_key, get_is_account_key, get_known_code_key, get_nonce_key, L1TxCommonData, Nonce, +// PriorityOpId, SerialId, StorageLog, ZkSyncReadStorage, BOOTLOADER_ADDRESS, +// CONTRACT_DEPLOYER_ADDRESS, H160, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, +// MAX_TXS_IN_BLOCK, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, +// SYSTEM_CONTEXT_MINIMAL_BASE_FEE, SYSTEM_CONTEXT_TX_ORIGIN_POSITION, +// }; + +// use once_cell::sync::Lazy; +// use zksync_config::constants::ZKPORTER_IS_AVAILABLE; + +// fn run_vm_with_custom_factory_deps<'a>( +// oracle_tools: &'a mut OracleTools<'a, false>, +// block_context: BlockContext, +// block_properties: &'a BlockProperties, +// encoded_tx: Vec, +// predefined_overhead: u32, +// expected_error: Option, +// ) { +// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); +// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); +// let mut vm = init_vm_inner( +// oracle_tools, +// BlockContextMode::OverrideCurrent(block_context.into()), +// block_properties, +// BLOCK_GAS_LIMIT, +// &base_system_contracts, +// TxExecutionMode::VerifyExecute, +// ); + +// vm.bootloader_state.add_tx_data(encoded_tx.len()); +// vm.state.memory.populate_page( +// BOOTLOADER_HEAP_PAGE as usize, +// get_bootloader_memory_for_encoded_tx( +// encoded_tx, +// 0, +// TxExecutionMode::VerifyExecute, +// 0, +// 0, +// predefined_overhead, +// ), +// Timestamp(0), +// ); + +// let result = vm.execute_next_tx().err(); + +// assert_eq!(expected_error, result); +// } + +// fn get_balance(token_id: AccountTreeId, account: &Address, main_storage: StoragePtr<'_>) -> U256 { +// let key = storage_key_for_standard_token_balance(token_id, account); +// h256_to_u256(main_storage.borrow_mut().get_value(&key)) +// } + +// #[test] +// fn test_dummy_bootloader() { +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; + +// let mut oracle_tools = OracleTools::new(storage_ptr); +// let (block_context, block_properties) = create_test_block_params(); +// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); +// let bootloader_code = read_bootloader_test_code("dummy"); +// let bootloader_hash = hash_bytecode(&bootloader_code); + +// base_system_contracts.bootloader = SystemContractCode { +// code: bytes_to_be_words(bootloader_code), +// hash: bootloader_hash, +// }; + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context.into(), Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &base_system_contracts, +// TxExecutionMode::VerifyExecute, +// ); + +// let VmBlockResult { +// full_result: res, .. +// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); + +// // Dummy bootloader should not panic +// assert!(res.revert_reason.is_none()); + +// let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); + +// verify_required_memory( +// &vm.state, +// vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], +// ); +// } + +// #[test] +// fn test_bootloader_out_of_gas() { +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; + +// let mut oracle_tools = OracleTools::new(storage_ptr); +// let (block_context, block_properties) = create_test_block_params(); + +// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + +// let bootloader_code = read_bootloader_test_code("dummy"); +// let bootloader_hash = hash_bytecode(&bootloader_code); + +// base_system_contracts.bootloader = SystemContractCode { +// code: bytes_to_be_words(bootloader_code), +// hash: bootloader_hash, +// }; + +// // init vm with only 100 ergs +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context.into(), Default::default()), +// &block_properties, +// 10, +// &base_system_contracts, +// TxExecutionMode::VerifyExecute, +// ); + +// let res = vm.execute_block_tip(); + +// assert_eq!(res.revert_reason, Some(TxRevertReason::BootloaderOutOfGas)); +// } + +// fn verify_required_storage(state: &ZkSyncVmState<'_>, required_values: Vec<(H256, StorageKey)>) { +// for (required_value, key) in required_values { +// let current_value = state.storage.storage.read_from_storage(&key); + +// assert_eq!( +// u256_to_h256(current_value), +// required_value, +// "Invalid value at key {key:?}" +// ); +// } +// } + +// fn verify_required_memory(state: &ZkSyncVmState<'_>, required_values: Vec<(U256, u32, u32)>) { +// for (required_value, memory_page, cell) in required_values { +// let current_value = state +// .memory +// .dump_page_content_as_u256_words(memory_page, cell..cell + 1)[0]; +// assert_eq!(current_value, required_value); +// } +// } + +// #[test] +// fn test_default_aa_interaction() { +// // In this test, we aim to test whether a simple account interaction (without any fee logic) +// // will work. The account will try to deploy a simple contract from integration tests. + +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let operator_address = block_context.context.operator_address; +// let base_fee = block_context.base_fee; +// // We deploy here counter contract, because its logic is trivial +// let contract_code = read_test_contract(); +// let contract_code_hash = hash_bytecode(&contract_code); +// let tx: Transaction = get_deploy_tx( +// H256::random(), +// Nonce(0), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(10000000u32), +// max_fee_per_gas: U256::from(base_fee), +// max_priority_fee_per_gas: U256::from(0), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); +// let tx_data: TransactionData = tx.clone().into(); + +// let maximal_fee = tx_data.gas_limit * tx_data.max_fee_per_gas; +// let sender_address = tx_data.from(); +// // set balance + +// let key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); + +// let tx_execution_result = vm +// .execute_next_tx() +// .expect("Bootloader failed while processing transaction"); + +// assert_eq!( +// tx_execution_result.status, +// TxExecutionStatus::Success, +// "Transaction wasn't successful" +// ); + +// let VmBlockResult { +// full_result: res, .. +// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); +// // Should not panic +// assert!( +// res.revert_reason.is_none(), +// "Bootloader was not expected to revert: {:?}", +// res.revert_reason +// ); + +// // Both deployment and ordinary nonce should be incremented by one. +// let account_nonce_key = get_nonce_key(&sender_address); +// let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; + +// // The code hash of the deployed contract should be marked as republished. +// let known_codes_key = get_known_code_key(&contract_code_hash); + +// // The contract should be deployed successfully. +// let deployed_address = deployed_address_create(sender_address, U256::zero()); +// let account_code_key = get_code_key(&deployed_address); + +// let expected_slots = vec![ +// (u256_to_h256(expected_nonce), account_nonce_key), +// (u256_to_h256(U256::from(1u32)), known_codes_key), +// (contract_code_hash, account_code_key), +// ]; + +// verify_required_storage(&vm.state, expected_slots); + +// assert!(!tx_has_failed(&vm.state, 0)); + +// let expected_fee = +// maximal_fee - U256::from(tx_execution_result.gas_refunded) * U256::from(base_fee); +// let operator_balance = get_balance( +// AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), +// &operator_address, +// vm.state.storage.storage.get_ptr(), +// ); + +// assert!( +// operator_balance == expected_fee, +// "Operator did not receive his fee" +// ); +// } + +// fn execute_vm_with_predetermined_refund(txs: Vec, refunds: Vec) -> VmBlockResult { +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// // set balance +// for tx in txs.iter() { +// let sender_address = tx.initiator_account(); +// let key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); +// } + +// let mut oracle_tools = OracleTools::new(storage_ptr); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// let codes_for_decommiter = txs +// .iter() +// .flat_map(|tx| { +// tx.execute +// .factory_deps +// .clone() +// .unwrap_or_default() +// .iter() +// .map(|dep| bytecode_to_factory_dep(dep.clone())) +// .collect::)>>() +// }) +// .collect(); + +// vm.state.decommittment_processor.populate( +// codes_for_decommiter, +// Timestamp(vm.state.local_state.timestamp), +// ); + +// let memory_with_suggested_refund = get_bootloader_memory( +// txs.into_iter().map(Into::into).collect(), +// refunds, +// TxExecutionMode::VerifyExecute, +// BlockContextMode::NewBlock(block_context, Default::default()), +// ); + +// vm.state.memory.populate_page( +// BOOTLOADER_HEAP_PAGE as usize, +// memory_with_suggested_refund, +// Timestamp(0), +// ); + +// vm.execute_till_block_end(BootloaderJobType::TransactionExecution) +// } + +// #[test] +// fn test_predetermined_refunded_gas() { +// // In this test, we compare the execution of the bootloader with the predefined +// // refunded gas and without them + +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let base_fee = block_context.base_fee; +// // We deploy here counter contract, because its logic is trivial +// let contract_code = read_test_contract(); +// let tx: Transaction = get_deploy_tx( +// H256::random(), +// Nonce(0), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(10000000u32), +// max_fee_per_gas: U256::from(base_fee), +// max_priority_fee_per_gas: U256::from(0), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); + +// let sender_address = tx.initiator_account(); + +// // set balance +// let key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); + +// let tx_execution_result = vm +// .execute_next_tx() +// .expect("Bootloader failed while processing transaction"); + +// assert_eq!( +// tx_execution_result.status, +// TxExecutionStatus::Success, +// "Transaction wasn't successful" +// ); + +// // If the refund provided by the operator or the final refund are the 0 +// // there is no impact of the operator's refund at all and so this test does not +// // make much sense. +// assert!( +// tx_execution_result.operator_suggested_refund > 0, +// "The operator's refund is 0" +// ); +// assert!( +// tx_execution_result.gas_refunded > 0, +// "The final refund is 0" +// ); + +// let mut result = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); +// assert!( +// result.full_result.revert_reason.is_none(), +// "Bootloader was not expected to revert: {:?}", +// result.full_result.revert_reason +// ); + +// let mut result_with_predetermined_refund = execute_vm_with_predetermined_refund( +// vec![tx], +// vec![tx_execution_result.operator_suggested_refund], +// ); +// // We need to sort these lists as those are flattened from HashMaps +// result.full_result.used_contract_hashes.sort(); +// result_with_predetermined_refund +// .full_result +// .used_contract_hashes +// .sort(); + +// assert_eq!( +// result.full_result.events, +// result_with_predetermined_refund.full_result.events +// ); +// assert_eq!( +// result.full_result.l2_to_l1_logs, +// result_with_predetermined_refund.full_result.l2_to_l1_logs +// ); +// assert_eq!( +// result.full_result.storage_log_queries, +// result_with_predetermined_refund +// .full_result +// .storage_log_queries +// ); +// assert_eq!( +// result.full_result.used_contract_hashes, +// result_with_predetermined_refund +// .full_result +// .used_contract_hashes +// ); +// } + +// #[derive(Debug, Clone)] +// enum TransactionRollbackTestInfo { +// Rejected(Transaction, TxRevertReason), +// Processed(Transaction, bool, TxExecutionStatus), +// } + +// impl TransactionRollbackTestInfo { +// fn new_rejected(transaction: Transaction, revert_reason: TxRevertReason) -> Self { +// Self::Rejected(transaction, revert_reason) +// } + +// fn new_processed( +// transaction: Transaction, +// should_be_rollbacked: bool, +// expected_status: TxExecutionStatus, +// ) -> Self { +// Self::Processed(transaction, should_be_rollbacked, expected_status) +// } + +// fn get_transaction(&self) -> &Transaction { +// match self { +// TransactionRollbackTestInfo::Rejected(tx, _) => tx, +// TransactionRollbackTestInfo::Processed(tx, _, _) => tx, +// } +// } + +// fn rejection_reason(&self) -> Option { +// match self { +// TransactionRollbackTestInfo::Rejected(_, revert_reason) => Some(revert_reason.clone()), +// TransactionRollbackTestInfo::Processed(_, _, _) => None, +// } +// } + +// fn should_rollback(&self) -> bool { +// match self { +// TransactionRollbackTestInfo::Rejected(_, _) => true, +// TransactionRollbackTestInfo::Processed(_, x, _) => *x, +// } +// } + +// fn expected_status(&self) -> TxExecutionStatus { +// match self { +// TransactionRollbackTestInfo::Rejected(_, _) => { +// panic!("There is no execution status for rejected transaction") +// } +// TransactionRollbackTestInfo::Processed(_, _, status) => *status, +// } +// } +// } + +// // Accepts the address of the sender as well as the list of pairs of its transactions +// // and whether these transactions should succeed. +// fn execute_vm_with_possible_rollbacks( +// sender_address: Address, +// transactions: Vec, +// block_context: DerivedBlockContext, +// block_properties: BlockProperties, +// ) -> VmExecutionResult { +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// // Setting infinite balance for the sender. +// let key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// for test_info in transactions { +// vm.save_current_vm_as_snapshot(); +// let vm_state_before_tx = vm.dump_inner_state(); +// push_transaction_to_bootloader_memory( +// &mut vm, +// test_info.get_transaction(), +// TxExecutionMode::VerifyExecute, +// ); + +// match vm.execute_next_tx() { +// Err(reason) => { +// assert_eq!(test_info.rejection_reason(), Some(reason)); +// } +// Ok(res) => { +// assert_eq!(test_info.rejection_reason(), None); +// assert_eq!( +// res.status, +// test_info.expected_status(), +// "Transaction status is not correct" +// ); +// } +// }; + +// if test_info.should_rollback() { +// // Some error has occured, we should reject the transaction +// vm.rollback_to_latest_snapshot(); + +// // vm_state_before_tx. +// let state_after_rollback = vm.dump_inner_state(); +// assert_eq!( +// vm_state_before_tx, state_after_rollback, +// "Did not rollback VM state correctly" +// ); +// } +// } + +// let VmBlockResult { +// full_result: mut result, +// .. +// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); +// // Used contract hashes are retrieved in unordered manner. +// // However it must be sorted for the comparisons in tests to work +// result.used_contract_hashes.sort(); + +// result +// } + +// // Sets the signature for an L2 transaction and returns the same transaction +// // but this different signature. +// fn change_signature(mut tx: Transaction, signature: Vec) -> Transaction { +// tx.common_data = match tx.common_data { +// ExecuteTransactionCommon::L2(mut data) => { +// data.signature = signature; +// ExecuteTransactionCommon::L2(data) +// } +// _ => unreachable!(), +// }; + +// tx +// } + +// #[test] +// fn test_vm_rollbacks() { +// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { +// let (block_context, block_properties) = create_test_block_params(); +// (block_context.into(), block_properties) +// }; + +// let base_fee = U256::from(block_context.base_fee); + +// let sender_private_key = H256::random(); +// let contract_code = read_test_contract(); + +// let tx_nonce_0: Transaction = get_deploy_tx( +// sender_private_key, +// Nonce(0), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(10000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); +// let tx_nonce_1: Transaction = get_deploy_tx( +// sender_private_key, +// Nonce(1), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(10000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); +// let tx_nonce_2: Transaction = get_deploy_tx( +// sender_private_key, +// Nonce(2), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(10000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); + +// let wrong_signature_length_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 32]); +// let wrong_v_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 65]); +// let wrong_signature_tx = change_signature(tx_nonce_0.clone(), vec![27u8; 65]); + +// let sender_address = tx_nonce_0.initiator_account(); + +// let result_without_rollbacks = execute_vm_with_possible_rollbacks( +// sender_address, +// vec![ +// // The nonces are ordered correctly, all the transactions should succeed. +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_0.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_1.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_2.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// ], +// block_context, +// block_properties, +// ); + +// let incorrect_nonce = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "Incorrect nonce".to_string(), +// }); +// let reusing_nonce_twice = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "Reusing the same nonce twice".to_string(), +// }); +// let signature_length_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "Signature length is incorrect".to_string(), +// }); +// let v_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "v is neither 27 nor 28".to_string(), +// }); +// let signature_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), +// }); + +// let result_with_rollbacks = execute_vm_with_possible_rollbacks( +// sender_address, +// vec![ +// TransactionRollbackTestInfo::new_rejected( +// wrong_signature_length_tx, +// signature_length_is_incorrect, +// ), +// TransactionRollbackTestInfo::new_rejected(wrong_v_tx, v_is_incorrect), +// TransactionRollbackTestInfo::new_rejected(wrong_signature_tx, signature_is_incorrect), +// // The correct nonce is 0, this tx will fail +// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce.clone()), +// // This tx will succeed +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_0.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// // The correct nonce is 1, this tx will fail +// TransactionRollbackTestInfo::new_rejected( +// tx_nonce_0.clone(), +// reusing_nonce_twice.clone(), +// ), +// // The correct nonce is 1, this tx will fail +// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce), +// // This tx will succeed +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_1, +// false, +// TxExecutionStatus::Success, +// ), +// // The correct nonce is 2, this tx will fail +// TransactionRollbackTestInfo::new_rejected(tx_nonce_0, reusing_nonce_twice.clone()), +// // This tx will succeed +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_2.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// // This tx will fail +// TransactionRollbackTestInfo::new_rejected(tx_nonce_2, reusing_nonce_twice.clone()), +// ], +// block_context, +// block_properties, +// ); + +// assert_eq!(result_without_rollbacks, result_with_rollbacks); + +// let loadnext_contract = get_loadnext_contract(); + +// let loadnext_constructor_data = encode(&[Token::Uint(U256::from(100))]); +// let loadnext_deploy_tx: Transaction = get_deploy_tx( +// sender_private_key, +// Nonce(0), +// &loadnext_contract.bytecode, +// loadnext_contract.factory_deps, +// &loadnext_constructor_data, +// Fee { +// gas_limit: U256::from(60000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); +// let loadnext_contract_address = +// get_create_zksync_address(loadnext_deploy_tx.initiator_account(), Nonce(0)); +// let deploy_loadnext_tx_info = TransactionRollbackTestInfo::new_processed( +// loadnext_deploy_tx, +// false, +// TxExecutionStatus::Success, +// ); + +// let get_load_next_tx = |params: LoadnextContractExecutionParams, nonce: Nonce| { +// // Here we test loadnext with various kinds of operations +// let tx: Transaction = mock_loadnext_test_call( +// sender_private_key, +// nonce, +// loadnext_contract_address, +// Fee { +// gas_limit: U256::from(80000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// params, +// ) +// .into(); + +// tx +// }; + +// let loadnext_tx_0 = get_load_next_tx( +// LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// }, +// Nonce(1), +// ); +// let loadnext_tx_1 = get_load_next_tx( +// LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// }, +// Nonce(2), +// ); + +// let result_without_rollbacks = execute_vm_with_possible_rollbacks( +// sender_address, +// vec![ +// deploy_loadnext_tx_info.clone(), +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_0.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_1.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// ], +// block_context, +// block_properties, +// ); + +// let result_with_rollbacks = execute_vm_with_possible_rollbacks( +// sender_address, +// vec![ +// deploy_loadnext_tx_info, +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_0.clone(), +// true, +// TxExecutionStatus::Success, +// ), +// // After the previous tx has been rolled back, this one should succeed +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_0.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// // The nonce has been bumped up, this transaction should now fail +// TransactionRollbackTestInfo::new_rejected(loadnext_tx_0, reusing_nonce_twice.clone()), +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_1.clone(), +// true, +// TxExecutionStatus::Success, +// ), +// // After the previous tx has been rolled back, this one should succeed +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_1.clone(), +// true, +// TxExecutionStatus::Success, +// ), +// // After the previous tx has been rolled back, this one should succeed +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_1.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// // The nonce has been bumped up, this transaction should now fail +// TransactionRollbackTestInfo::new_rejected(loadnext_tx_1, reusing_nonce_twice), +// ], +// block_context, +// block_properties, +// ); + +// assert_eq!(result_without_rollbacks, result_with_rollbacks); +// } + +// // Inserts the contracts into the test environment, bypassing the +// // deployer system contract. Besides the reference to storage +// // it accepts a `contracts` tuple of information about the contract +// // and whether or not it is an account. +// fn insert_contracts( +// raw_storage: &mut SecondaryStateStorage, +// contracts: Vec<(DeployedContract, bool)>, +// ) { +// let logs: Vec = contracts +// .iter() +// .flat_map(|(contract, is_account)| { +// let mut new_logs = vec![]; + +// let deployer_code_key = get_code_key(contract.account_id.address()); +// new_logs.push(StorageLog::new_write_log( +// deployer_code_key, +// hash_bytecode(&contract.bytecode), +// )); + +// if *is_account { +// let is_account_key = get_is_account_key(contract.account_id.address()); +// new_logs.push(StorageLog::new_write_log( +// is_account_key, +// u256_to_h256(1u32.into()), +// )); +// } + +// new_logs +// }) +// .collect(); +// raw_storage.process_transaction_logs(&logs); + +// for (contract, _) in contracts { +// raw_storage.store_contract(*contract.account_id.address(), contract.bytecode.clone()); +// raw_storage.store_factory_dep(hash_bytecode(&contract.bytecode), contract.bytecode); +// } +// raw_storage.save(L1BatchNumber(0)); +// } + +// enum NonceHolderTestMode { +// SetValueUnderNonce, +// IncreaseMinNonceBy5, +// IncreaseMinNonceTooMuch, +// LeaveNonceUnused, +// IncreaseMinNonceBy1, +// SwitchToArbitraryOrdering, +// } + +// impl From for u8 { +// fn from(mode: NonceHolderTestMode) -> u8 { +// match mode { +// NonceHolderTestMode::SetValueUnderNonce => 0, +// NonceHolderTestMode::IncreaseMinNonceBy5 => 1, +// NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, +// NonceHolderTestMode::LeaveNonceUnused => 3, +// NonceHolderTestMode::IncreaseMinNonceBy1 => 4, +// NonceHolderTestMode::SwitchToArbitraryOrdering => 5, +// } +// } +// } + +// fn get_nonce_holder_test_tx( +// nonce: U256, +// account_address: Address, +// test_mode: NonceHolderTestMode, +// block_context: &DerivedBlockContext, +// ) -> TransactionData { +// TransactionData { +// tx_type: 113, +// from: account_address, +// to: account_address, +// gas_limit: U256::from(10000000u32), +// pubdata_price_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// max_fee_per_gas: U256::from(block_context.base_fee), +// max_priority_fee_per_gas: U256::zero(), +// nonce, +// // The reserved fields that are unique for different types of transactions. +// // E.g. nonce is currently used in all transaction, but it should not be mandatory +// // in the long run. +// reserved: [U256::zero(); 4], +// data: vec![12], +// signature: vec![test_mode.into()], + +// ..Default::default() +// } +// } + +// fn run_vm_with_raw_tx<'a>( +// oracle_tools: &'a mut OracleTools<'a, false>, +// block_context: DerivedBlockContext, +// block_properties: &'a BlockProperties, +// tx: TransactionData, +// ) -> (VmExecutionResult, bool) { +// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); +// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); +// let mut vm = init_vm_inner( +// oracle_tools, +// BlockContextMode::OverrideCurrent(block_context), +// block_properties, +// BLOCK_GAS_LIMIT, +// &base_system_contracts, +// TxExecutionMode::VerifyExecute, +// ); + +// let overhead = tx.overhead_gas(); +// push_raw_transaction_to_bootloader_memory( +// &mut vm, +// tx, +// TxExecutionMode::VerifyExecute, +// overhead, +// ); +// let VmBlockResult { +// full_result: result, +// .. +// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); + +// (result, tx_has_failed(&vm.state, 0)) +// } + +// #[test] +// fn test_nonce_holder() { +// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { +// let (block_context, block_properties) = create_test_block_params(); +// (block_context.into(), block_properties) +// }; + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); + +// let account_address = H160::random(); +// let account = DeployedContract { +// account_id: AccountTreeId::new(account_address), +// bytecode: read_nonce_holder_tester(), +// }; + +// insert_contracts(&mut raw_storage, vec![(account, true)]); + +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// // We deploy here counter contract, because its logic is trivial + +// let key = storage_key_for_eth_balance(&account_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut run_nonce_test = |nonce: U256, +// test_mode: NonceHolderTestMode, +// error_message: Option, +// comment: &'static str| { +// let tx = get_nonce_holder_test_tx(nonce, account_address, test_mode, &block_context); + +// let mut oracle_tools = OracleTools::new(storage_ptr); +// let (result, tx_has_failed) = +// run_vm_with_raw_tx(&mut oracle_tools, block_context, &block_properties, tx); +// if let Some(msg) = error_message { +// let expected_error = TxRevertReason::ValidationFailed(VmRevertReason::General { msg }); +// assert_eq!( +// result +// .revert_reason +// .expect("No revert reason") +// .revert_reason, +// expected_error, +// "{}", +// comment +// ); +// } else { +// assert!(!tx_has_failed, "{}", comment); +// } +// }; + +// // Test 1: trying to set value under non sequential nonce value. +// run_nonce_test( +// 1u32.into(), +// NonceHolderTestMode::SetValueUnderNonce, +// Some("Previous nonce has not been used".to_string()), +// "Allowed to set value under non sequential value", +// ); + +// // Test 2: increase min nonce by 1 with sequential nonce ordering: +// run_nonce_test( +// 0u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy1, +// None, +// "Failed to increment nonce by 1 for sequential account", +// ); + +// // Test 3: correctly set value under nonce with sequential nonce ordering: +// run_nonce_test( +// 1u32.into(), +// NonceHolderTestMode::SetValueUnderNonce, +// None, +// "Failed to set value under nonce sequential value", +// ); + +// // Test 5: migrate to the arbitrary nonce ordering: +// run_nonce_test( +// 2u32.into(), +// NonceHolderTestMode::SwitchToArbitraryOrdering, +// None, +// "Failed to switch to arbitrary ordering", +// ); + +// // Test 6: increase min nonce by 5 +// run_nonce_test( +// 6u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy5, +// None, +// "Failed to increase min nonce by 5", +// ); + +// // Test 7: since the nonces in range [6,10] are no longer allowed, the +// // tx with nonce 10 should not be allowed +// run_nonce_test( +// 10u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy5, +// Some("Reusing the same nonce twice".to_string()), +// "Allowed to reuse nonce below the minimal one", +// ); + +// // Test 8: we should be able to use nonce 13 +// run_nonce_test( +// 13u32.into(), +// NonceHolderTestMode::SetValueUnderNonce, +// None, +// "Did not allow to use unused nonce 10", +// ); + +// // Test 9: we should not be able to reuse nonce 13 +// run_nonce_test( +// 13u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy5, +// Some("Reusing the same nonce twice".to_string()), +// "Allowed to reuse the same nonce twice", +// ); + +// // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 +// run_nonce_test( +// 14u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy5, +// None, +// "Did not allow to use a bumped nonce", +// ); + +// // Test 6: Do not allow bumping nonce by too much +// run_nonce_test( +// 16u32.into(), +// NonceHolderTestMode::IncreaseMinNonceTooMuch, +// Some("The value for incrementing the nonce is too high".to_string()), +// "Allowed for incrementing min nonce too much", +// ); + +// // Test 7: Do not allow not setting a nonce as used +// run_nonce_test( +// 16u32.into(), +// NonceHolderTestMode::LeaveNonceUnused, +// Some("The nonce was not set as used".to_string()), +// "Allowed to leave nonce as unused", +// ); +// } + +// #[test] +// fn test_l1_tx_execution() { +// // In this test, we try to execute a contract deployment from L1 +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; + +// let mut oracle_tools = OracleTools::new(storage_ptr); +// let (block_context, block_properties) = create_test_block_params(); + +// // Here instead of marking code hash via the bootloader means, we will +// // using L1->L2 communication, the same it would likely be done during the priority mode. +// let contract_code = read_test_contract(); +// let contract_code_hash = hash_bytecode(&contract_code); +// let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); +// let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); + +// let required_l2_to_l1_logs = vec![ +// L2ToL1Log { +// shard_id: 0, +// is_service: false, +// tx_number_in_block: 0, +// sender: SYSTEM_CONTEXT_ADDRESS, +// key: u256_to_h256(U256::from(block_context.block_timestamp)), +// value: Default::default(), +// }, +// L2ToL1Log { +// shard_id: 0, +// is_service: true, +// tx_number_in_block: 0, +// sender: BOOTLOADER_ADDRESS, +// key: l1_deploy_tx_data.canonical_l1_tx_hash(), +// value: u256_to_h256(U256::from(1u32)), +// }, +// ]; + +// let sender_address = l1_deploy_tx_data.from(); + +// oracle_tools.decommittment_processor.populate( +// vec![( +// h256_to_u256(contract_code_hash), +// bytes_to_be_words(contract_code), +// )], +// Timestamp(0), +// ); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context.into(), Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// push_transaction_to_bootloader_memory(&mut vm, &l1_deploy_tx, TxExecutionMode::VerifyExecute); + +// let res = vm.execute_next_tx().unwrap(); + +// // The code hash of the deployed contract should be marked as republished. +// let known_codes_key = get_known_code_key(&contract_code_hash); + +// // The contract should be deployed successfully. +// let deployed_address = deployed_address_create(sender_address, U256::zero()); +// let account_code_key = get_code_key(&deployed_address); + +// let expected_slots = vec![ +// (u256_to_h256(U256::from(1u32)), known_codes_key), +// (contract_code_hash, account_code_key), +// ]; +// assert!(!tx_has_failed(&vm.state, 0)); + +// verify_required_storage(&vm.state, expected_slots); + +// assert_eq!(res.result.logs.l2_to_l1_logs, required_l2_to_l1_logs); + +// let tx = get_l1_execute_test_contract_tx(deployed_address, true); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); +// let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); +// assert_eq!(res.initial_storage_writes, 0); + +// let tx = get_l1_execute_test_contract_tx(deployed_address, false); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); +// let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); +// assert_eq!(res.initial_storage_writes, 2); + +// let repeated_writes = res.repeated_storage_writes; + +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); +// let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); +// assert_eq!(res.initial_storage_writes, 1); +// // We do the same storage write, so it will be deduplicated +// assert_eq!(res.repeated_storage_writes, repeated_writes); + +// let mut tx = get_l1_execute_test_contract_tx(deployed_address, false); +// tx.execute.value = U256::from(1); +// match &mut tx.common_data { +// ExecuteTransactionCommon::L1(l1_data) => { +// l1_data.to_mint = U256::from(4); +// } +// _ => unreachable!(), +// } +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); +// let execution_result = vm.execute_next_tx().unwrap(); +// // The method is not payable, so the transaction with non-zero value should fail +// assert_eq!( +// execution_result.status, +// TxExecutionStatus::Failure, +// "The transaction should fail" +// ); + +// let res = ExecutionMetrics::new(&execution_result.result.logs, 0, 0, 0, 0); + +// // There are 2 initial writes here: +// // - totalSupply of ETH token +// // - balance of the refund recipient +// assert_eq!(res.initial_storage_writes, 2); +// } + +// #[test] +// fn test_invalid_bytecode() { +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let (block_context, block_properties) = create_test_block_params(); + +// let test_vm_with_custom_bytecode_hash = +// |bytecode_hash: H256, expected_revert_reason: Option| { +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; +// let mut oracle_tools = OracleTools::new(storage_ptr); + +// let (encoded_tx, predefined_overhead) = +// get_l1_tx_with_custom_bytecode_hash(h256_to_u256(bytecode_hash)); + +// run_vm_with_custom_factory_deps( +// &mut oracle_tools, +// block_context, +// &block_properties, +// encoded_tx, +// predefined_overhead, +// expected_revert_reason, +// ); +// }; + +// let failed_to_mark_factory_deps = |msg: &str| { +// TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { +// msg: msg.to_string(), +// }) +// }; + +// // Here we provide the correctly-formatted bytecode hash of +// // odd length, so it should work. +// test_vm_with_custom_bytecode_hash( +// H256([ +// 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, +// ]), +// None, +// ); + +// // Here we provide correctly formatted bytecode of even length, so +// // it should fail. +// test_vm_with_custom_bytecode_hash( +// H256([ +// 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, +// ]), +// Some(failed_to_mark_factory_deps( +// "Code length in words must be odd", +// )), +// ); + +// // Here we provide incorrectly formatted bytecode of odd length, so +// // it should fail. +// test_vm_with_custom_bytecode_hash( +// H256([ +// 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, +// ]), +// Some(failed_to_mark_factory_deps( +// "Incorrectly formatted bytecodeHash", +// )), +// ); + +// // Here we provide incorrectly formatted bytecode of odd length, so +// // it should fail. +// test_vm_with_custom_bytecode_hash( +// H256([ +// 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, +// ]), +// Some(failed_to_mark_factory_deps( +// "Incorrectly formatted bytecodeHash", +// )), +// ); +// } + +// #[test] +// fn test_tracing_of_execution_errors() { +// // In this test, we are checking that the execution errors are transmitted correctly from the bootloader. +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); + +// let contract_address = Address::random(); +// let error_contract = DeployedContract { +// account_id: AccountTreeId::new(contract_address), +// bytecode: read_error_contract(), +// }; + +// let tx = get_error_tx( +// H256::random(), +// Nonce(0), +// contract_address, +// Fee { +// gas_limit: U256::from(1000000u32), +// max_fee_per_gas: U256::from(10000000000u64), +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(50000u32), +// }, +// ); + +// insert_contracts(&mut raw_storage, vec![(error_contract, false)]); + +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let key = storage_key_for_eth_balance(&tx.common_data.initiator_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// push_transaction_to_bootloader_memory(&mut vm, &tx.into(), TxExecutionMode::VerifyExecute); + +// let mut tracer = TransactionResultTracer::default(); +// assert_eq!( +// vm.execute_with_custom_tracer(&mut tracer), +// VmExecutionStopReason::VmFinished, +// "Tracer should never request stop" +// ); + +// match tracer.revert_reason { +// Some(revert_reason) => { +// let revert_reason = VmRevertReason::try_from(&revert_reason as &[u8]).unwrap(); +// assert_eq!( +// revert_reason, +// VmRevertReason::General { +// msg: "short".to_string() +// } +// ) +// } +// _ => panic!( +// "Tracer captured incorrect result {:#?}", +// tracer.revert_reason +// ), +// } +// } + +// /// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +// #[test] +// fn test_tx_gas_limit_offset() { +// let gas_limit = U256::from(999999); + +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let raw_storage = SecondaryStateStorage::new(db); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let contract_code = read_test_contract(); +// let tx: Transaction = get_deploy_tx( +// H256::random(), +// Nonce(0), +// &contract_code, +// Default::default(), +// Default::default(), +// Fee { +// gas_limit, +// ..Default::default() +// }, +// ) +// .into(); + +// let mut oracle_tools = OracleTools::new(storage_ptr); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); + +// let gas_limit_from_memory = vm +// .state +// .memory +// .read_slot( +// BOOTLOADER_HEAP_PAGE as usize, +// TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, +// ) +// .value; +// assert_eq!(gas_limit_from_memory, gas_limit); +// } + +// #[test] +// fn test_is_write_initial_behaviour() { +// // In this test, we check result of `is_write_initial` at different stages. + +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let base_fee = block_context.base_fee; +// let account_pk = H256::random(); +// let contract_code = read_test_contract(); +// let tx: Transaction = get_deploy_tx( +// account_pk, +// Nonce(0), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(10000000u32), +// max_fee_per_gas: U256::from(base_fee), +// max_priority_fee_per_gas: U256::from(0), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); + +// let sender_address = tx.initiator_account(); +// let nonce_key = get_nonce_key(&sender_address); + +// // Check that the next write to the nonce key will be initial. +// assert!(storage_ptr.is_write_initial(&nonce_key)); + +// // Set balance to be able to pay fee for txs. +// let balance_key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&balance_key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); + +// vm.execute_next_tx() +// .expect("Bootloader failed while processing the first transaction"); +// // Check that `is_write_initial` still returns true for the nonce key. +// assert!(storage_ptr.is_write_initial(&nonce_key)); +// } + +// pub fn get_l1_tx_with_custom_bytecode_hash(bytecode_hash: U256) -> (Vec, u32) { +// let tx: TransactionData = get_l1_execute_test_contract_tx(Default::default(), false).into(); +// let predefined_overhead = tx.overhead_gas_with_custom_factory_deps(vec![bytecode_hash]); +// let tx_bytes = tx.abi_encode_with_custom_factory_deps(vec![bytecode_hash]); + +// (bytes_to_be_words(tx_bytes), predefined_overhead) +// } + +// const L1_TEST_GAS_PER_PUBDATA_BYTE: u32 = 800; + +// pub fn get_l1_execute_test_contract_tx(deployed_address: Address, with_panic: bool) -> Transaction { +// let execute = execute_test_contract(deployed_address, with_panic); +// Transaction { +// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { +// sender: H160::random(), +// gas_limit: U256::from(1000000u32), +// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), +// ..Default::default() +// }), +// execute, +// received_timestamp_ms: 0, +// } +// } + +// pub fn get_l1_deploy_tx(code: &[u8], calldata: &[u8]) -> Transaction { +// let execute = get_create_execute(code, calldata); + +// Transaction { +// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { +// sender: H160::random(), +// gas_limit: U256::from(2000000u32), +// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), +// ..Default::default() +// }), +// execute, +// received_timestamp_ms: 0, +// } +// } + +// fn read_test_contract() -> Vec { +// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +// } + +// fn read_nonce_holder_tester() -> Vec { +// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +// } + +// fn read_error_contract() -> Vec { +// read_bytecode( +// "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", +// ) +// } + +// fn execute_test_contract(address: Address, with_panic: bool) -> Execute { +// let test_contract = load_contract( +// "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", +// ); + +// let function = test_contract.function("incrementWithRevert").unwrap(); + +// let calldata = function +// .encode_input(&[Token::Uint(U256::from(1u8)), Token::Bool(with_panic)]) +// .expect("failed to encode parameters"); +// Execute { +// contract_address: address, +// calldata, +// value: U256::zero(), +// factory_deps: None, +// } +// } diff --git a/core/multivm_deps/vm_m5/src/tests/mod.rs b/core/multivm_deps/vm_m5/src/tests/mod.rs new file mode 100644 index 000000000000..3900135abeaa --- /dev/null +++ b/core/multivm_deps/vm_m5/src/tests/mod.rs @@ -0,0 +1 @@ +mod bootloader; diff --git a/core/multivm_deps/vm_m5/src/transaction_data.rs b/core/multivm_deps/vm_m5/src/transaction_data.rs new file mode 100644 index 000000000000..578d254547ee --- /dev/null +++ b/core/multivm_deps/vm_m5/src/transaction_data.rs @@ -0,0 +1,487 @@ +use zk_evm::zkevm_opcode_defs::system_params::{MAX_PUBDATA_PER_BLOCK, MAX_TX_ERGS_LIMIT}; +use zksync_types::ethabi::{encode, Address, Token}; +use zksync_types::fee::encoding_len; +use zksync_types::MAX_TXS_IN_BLOCK; +use zksync_types::{l2::TransactionType, ExecuteTransactionCommon, Transaction, U256}; +use zksync_utils::{address_to_h256, ceil_div_u256}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; + +use crate::vm_with_bootloader::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, +}; + +const L1_TX_TYPE: u8 = 255; + +// This structure represents the data that is used by +// the Bootloader to describe the transaction. +#[derive(Debug, Default, Clone)] +pub struct TransactionData { + pub tx_type: u8, + pub from: Address, + pub to: Address, + pub gas_limit: U256, + pub pubdata_price_limit: U256, + pub max_fee_per_gas: U256, + pub max_priority_fee_per_gas: U256, + pub paymaster: Address, + pub nonce: U256, + pub value: U256, + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + pub reserved: [U256; 4], + pub data: Vec, + pub signature: Vec, + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + pub factory_deps: Vec>, + pub paymaster_input: Vec, + pub reserved_dynamic: Vec, +} + +impl From for TransactionData { + fn from(execute_tx: Transaction) -> Self { + match &execute_tx.common_data { + ExecuteTransactionCommon::L2(common_data) => { + let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); + + let should_check_chain_id = if matches!( + common_data.transaction_type, + TransactionType::LegacyTransaction + ) { + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + + TransactionData { + tx_type: (common_data.transaction_type as u32) as u8, + from: execute_tx.initiator_account(), + to: execute_tx.execute.contract_address, + gas_limit: common_data.fee.gas_limit, + pubdata_price_limit: common_data.fee.gas_per_pubdata_limit, + max_fee_per_gas: common_data.fee.max_fee_per_gas, + max_priority_fee_per_gas: common_data.fee.max_priority_fee_per_gas, + paymaster: common_data.paymaster_params.paymaster, + nonce, + value: execute_tx.execute.value, + reserved: [ + should_check_chain_id, + U256::zero(), + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + signature: common_data.signature.clone(), + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: common_data.paymaster_params.paymaster_input.clone(), + reserved_dynamic: vec![], + } + } + ExecuteTransactionCommon::L1(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: L1_TX_TYPE, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.serial_id.0), // priority op ID + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: vec![], + reserved_dynamic: vec![], + } + } + ExecuteTransactionCommon::ProtocolUpgrade(_) => { + panic!("Protocol upgrade transactions are not supported in vm_m5") + } + } + } +} + +impl TransactionData { + pub fn from(&self) -> Address { + self.from + } + + // This method is to be used only in tests, when we want to bypass the checks imposed + // on the bytecode hash. + pub(crate) fn abi_encode_with_custom_factory_deps( + self, + factory_deps_hashes: Vec, + ) -> Vec { + encode(&[Token::Tuple(vec![ + Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), + Token::Address(self.from), + Token::Address(self.to), + Token::Uint(self.gas_limit), + Token::Uint(self.pubdata_price_limit), + Token::Uint(self.max_fee_per_gas), + Token::Uint(self.max_priority_fee_per_gas), + Token::Address(self.paymaster), + Token::Uint(self.nonce), + Token::Uint(self.value), + Token::FixedArray(self.reserved.iter().copied().map(Token::Uint).collect()), + Token::Bytes(self.data), + Token::Bytes(self.signature), + Token::Array(factory_deps_hashes.into_iter().map(Token::Uint).collect()), + Token::Bytes(self.paymaster_input), + Token::Bytes(self.reserved_dynamic), + ])]) + } + + pub(crate) fn abi_encode(self) -> Vec { + let factory_deps_hashes = self + .factory_deps + .iter() + .map(|dep| h256_to_u256(hash_bytecode(dep))) + .collect(); + self.abi_encode_with_custom_factory_deps(factory_deps_hashes) + } + + pub fn into_tokens(self) -> Vec { + let bytes = self.abi_encode(); + assert!(bytes.len() % 32 == 0); + + bytes_to_be_words(bytes) + } + + pub fn overhead_gas(&self) -> u32 { + if self.tx_type != L1_TX_TYPE { + return 0; + } + + let total_gas_limit = self.gas_limit.as_u32(); + let gas_per_pubdata_byte_limit = self.pubdata_price_limit.as_u32(); + let encoded_len = encoding_len( + self.data.len() as u64, + self.signature.len() as u64, + self.factory_deps.len() as u64, + self.paymaster_input.len() as u64, + self.reserved_dynamic.len() as u64, + ); + + get_maximal_allowed_overhead(total_gas_limit, gas_per_pubdata_byte_limit, encoded_len) + } + + // #[cfg(test)] + // pub(crate) fn overhead_gas_with_custom_factory_deps( + // &self, + // factory_deps_hashes: Vec, + // ) -> u32 { + // let total_gas_limit = self.gas_limit.as_u32(); + // let gas_per_pubdata_byte_limit = self.pubdata_price_limit.as_u32(); + // let encoded_len = encoding_len( + // self.data.len() as u64, + // self.signature.len() as u64, + // factory_deps_hashes.len() as u64, + // self.paymaster_input.len() as u64, + // self.reserved_dynamic.len() as u64, + // ); + // get_maximal_allowed_overhead(total_gas_limit, gas_per_pubdata_byte_limit, encoded_len) + // } + + // #[cfg(test)] + // pub(crate) fn canonical_l1_tx_hash(&self) -> zksync_types::H256 { + // use zksync_types::web3::signing::keccak256; + + // if self.tx_type != L1_TX_TYPE { + // panic!("Trying to get L1 tx hash for non-L1 tx"); + // } + + // let encoded_bytes = self.clone().abi_encode(); + + // zksync_types::H256(keccak256(&encoded_bytes)) + // } +} + +pub fn derive_overhead(gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: usize) -> u32 { + assert!( + gas_limit <= MAX_TX_ERGS_LIMIT, + "gas limit is larger than the maximal one" + ); + + // Using large U256 type to avoid overflow + let max_block_overhead = U256::from(block_overhead_gas(gas_price_per_pubdata)); + let gas_limit = U256::from(gas_limit); + let gas_price_per_pubdata = U256::from(gas_price_per_pubdata); + let encoded_len = U256::from(encoded_len); + + // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance + // circuits. + let overhead_for_single_instance_circuits = + ceil_div_u256(gas_limit * max_block_overhead, MAX_TX_ERGS_LIMIT.into()); + + // The overhead for occupying the bootloader memory + let overhead_for_length = ceil_div_u256( + encoded_len * max_block_overhead, + BOOTLOADER_TX_ENCODING_SPACE.into(), + ); + + // The overhead for occupying a single tx slot + let tx_slot_overhead = ceil_div_u256(max_block_overhead, MAX_TXS_IN_BLOCK.into()); + + // We use "ceil" here for formal reasons to allow easier approach for calculating the overhead in O(1) + let max_pubdata_in_tx = ceil_div_u256(gas_limit, gas_price_per_pubdata); + + // The maximal potential overhead from pubdata + let pubdata_overhead = ceil_div_u256( + max_pubdata_in_tx * max_block_overhead, + MAX_PUBDATA_PER_BLOCK.into(), + ); + + let overhead = vec![ + overhead_for_single_instance_circuits, + overhead_for_length, + tx_slot_overhead, + pubdata_overhead, + ] + .into_iter() + .max() + .unwrap(); + + overhead.as_u32() +} + +pub fn get_maximal_allowed_overhead( + total_gas_limit: u32, + gas_per_pubdata_byte_limit: u32, + encoded_len: usize, +) -> u32 { + // Using large U256 type to prevent overflows. + let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); + let total_gas_limit = U256::from(total_gas_limit); + let gas_per_pubdata_byte_limit = U256::from(gas_per_pubdata_byte_limit); + let encoded_len = U256::from(encoded_len); + + // Derivation of overhead consists of 4 parts: + // 1. The overhead for taking up a transaction's slot. (O1): O1 = 1 / MAX_TXS_IN_BLOCK + // 2. The overhead for taking up the bootloader's memory (O2): O2 = encoded_len / BOOTLOADER_TX_ENCODING_SPACE + // 3. The overhead for possible usage of pubdata. (O3): O3 = max_pubdata_in_tx / MAX_PUBDATA_PER_BLOCK + // 4. The overhead for possible usage of all the single-instance circuits. (O4): O4 = gas_limit / MAX_TX_ERGS_LIMIT + // + // The maximum of these is taken to derive the part of the block's overhead to be paid by the users: + // + // max_overhead = max(O1, O2, O3, O4) + // overhead_gas = ceil(max_overhead * overhead_for_block_gas). Thus, overhead_gas is a function of + // tx_gas_limit, gas_per_pubdata_byte_limit and encoded_len. + // + // While it is possible to derive the overhead with binary search in O(log n), it is too expensive to be done + // on L1, so here is a reference implementation of finding the overhead for transaction in O(1): + // + // Given total_gas_limit = tx_gas_limit + overhead_gas, we need to find overhead_gas and tx_gas_limit, such that: + // 1. overhead_gas is maximal possible (the operator is paid fairly) + // 2. overhead_gas(tx_gas_limit, gas_per_pubdata_byte_limit, encoded_len) >= overhead_gas (the user does not overpay) + // The third part boils to the following 4 inequalities (at least one of these must hold): + // ceil(O1 * overhead_for_block_gas) >= overhead_gas + // ceil(O2 * overhead_for_block_gas) >= overhead_gas + // ceil(O3 * overhead_for_block_gas) >= overhead_gas + // ceil(O4 * overhead_for_block_gas) >= overhead_gas + // + // Now, we need to solve each of these separately: + + // 1. The overhead for occupying a single tx slot is a constant: + let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()); + + // 2. The overhead for occupying the bootloader memory can be derived from encoded_len + let overhead_for_length = ceil_div_u256( + encoded_len * overhead_for_block_gas, + BOOTLOADER_TX_ENCODING_SPACE.into(), + ); + + // 3. ceil(O3 * overhead_for_block_gas) >= overhead_gas + // O3 = max_pubdata_in_tx / MAX_PUBDATA_PER_BLOCK = ceil(gas_limit / gas_per_pubdata_byte_limit) / MAX_PUBDATA_PER_BLOCK + // >= (gas_limit / (gas_per_pubdata_byte_limit * MAX_PUBDATA_PER_BLOCK). Throwing off the `ceil`, while may provide marginally lower + // overhead to the operator, provides substantially easier formula to work with. + // + // For better clarity, let's denote gas_limit = GL, MAX_PUBDATA_PER_BLOCK = MP, gas_per_pubdata_byte_limit = EP, overhead_for_block_gas = OB, total_gas_limit = TL, overhead_gas = OE + // ceil(OB * (TL - OE) / (EP * MP)) >= OE + // + // OB * (TL - OE) / (MP * EP) > OE - 1 + // OB * (TL - OE) > (OE - 1) * EP * MP + // OB * TL + EP * MP > OE * EP * MP + OE * OB + // (OB * TL + EP * MP) / (EP * MP + OB) > OE + // OE = floor((OB * TL + EP * MP) / (EP * MP + OB)) with possible -1 if the division is without remainder + let overhead_for_pubdata = { + let numerator: U256 = overhead_for_block_gas * total_gas_limit + + gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK); + let denominator = + gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK) + overhead_for_block_gas; + + // Corner case: if `total_gas_limit` = `gas_per_pubdata_byte_limit` = 0 + // then the numerator will be 0 and subtracting 1 will cause a panic, so we just return a zero. + if numerator.is_zero() { + 0.into() + } else { + (numerator - 1) / denominator + } + }; + + // 4. ceil(O4 * overhead_for_block_gas) >= overhead_gas + // O4 = gas_limit / MAX_TX_ERGS_LIMIT. Using the notation from the previous equation: + // ceil(OB * GL / MAX_TX_ERGS_LIMIT) >= OE + // ceil(OB * (TL - OE) / MAX_TX_ERGS_LIMIT) >= OE + // OB * (TL - OE) / MAX_TX_ERGS_LIMIT > OE - 1 + // OB * (TL - OE) > OE * MAX_TX_ERGS_LIMIT - MAX_TX_ERGS_LIMIT + // OB * TL + MAX_TX_ERGS_LIMIT > OE * ( MAX_TX_ERGS_LIMIT + OB) + // OE = floor(OB * TL + MAX_TX_ERGS_LIMIT / (MAX_TX_ERGS_LIMIT + OB)), with possible -1 if the division is without remainder + let overhead_for_gas = { + let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); + let denominator: U256 = U256::from(MAX_TX_ERGS_LIMIT) + overhead_for_block_gas; + + (numerator - 1) / denominator + }; + + let max_overhead = vec![ + tx_slot_overhead, + overhead_for_length, + overhead_for_pubdata, + overhead_for_gas, + ] + .into_iter() + .max() + // For the sake of consistency making sure that total_gas_limit >= max_overhead + .map(|max_overhead| std::cmp::min(max_overhead, total_gas_limit)) + .unwrap(); + + max_overhead.as_u32() +} + +pub(crate) fn block_overhead_gas(gas_per_pubdata_byte: u32) -> u32 { + BLOCK_OVERHEAD_GAS + BLOCK_OVERHEAD_PUBDATA * gas_per_pubdata_byte +} + +#[cfg(test)] +mod tests { + + use zksync_types::fee::encoding_len; + + use super::*; + + // This method returns the maximum block overhead that can be charged from the user based on the binary search approach + pub fn get_maximal_allowed_overhead_bin_search( + total_gas_limit: u32, + gas_per_pubdata_byte_limit: u32, + encoded_len: usize, + ) -> u32 { + let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { + total_gas_limit - MAX_TX_ERGS_LIMIT + } else { + 0u32 + }; + // Safe cast: the gas_limit for a transaction can not be larger than 2^32 + let mut right_bound = total_gas_limit; + + // The closure returns whether a certain overhead would be accepted by the bootloader. + // It is accepted if the derived overhead (i.e. the actual overhead that the user has to pay) + // is >= than the overhead proposed by the operator. + let is_overhead_accepted = |suggested_overhead: u32| { + let derived_overhead = derive_overhead( + total_gas_limit - suggested_overhead, + gas_per_pubdata_byte_limit, + encoded_len, + ); + + derived_overhead >= suggested_overhead + }; + + // In order to find the maximal allowed overhead we are doing binary search + while left_bound + 1 < right_bound { + let mid = (left_bound + right_bound) / 2; + + if is_overhead_accepted(mid) { + left_bound = mid; + } else { + right_bound = mid; + } + } + + if is_overhead_accepted(right_bound) { + right_bound + } else { + left_bound + } + } + + #[test] + fn test_correctness_for_efficient_overhead() { + let test_params = |total_gas_limit: u32, gas_per_pubdata: u32, encoded_len: usize| { + assert!( + total_gas_limit / gas_per_pubdata <= MAX_PUBDATA_PER_BLOCK, + "The input data should not allow too much pubdata per block" + ); + + let result_by_efficient_search = + get_maximal_allowed_overhead(total_gas_limit, gas_per_pubdata, encoded_len); + + let result_by_binary_search = get_maximal_allowed_overhead_bin_search( + total_gas_limit, + gas_per_pubdata, + encoded_len, + ); + + assert_eq!(result_by_efficient_search, result_by_binary_search); + }; + + // Some arbitrary test + test_params(60_000_000, 800, 2900); + + // Very small parameters + test_params(0, 1, 12); + + // Relatively big parameters + let max_tx_overhead = derive_overhead(MAX_TX_ERGS_LIMIT, 5000, 10000); + test_params(MAX_TX_ERGS_LIMIT + max_tx_overhead, 5000, 10000); + } + + #[test] + fn test_consistency_with_encoding_length() { + let transaction = TransactionData { + tx_type: 113, + from: Address::random(), + to: Address::random(), + gas_limit: U256::from(1u32), + pubdata_price_limit: U256::from(1u32), + max_fee_per_gas: U256::from(1u32), + max_priority_fee_per_gas: U256::from(1u32), + paymaster: Address::random(), + nonce: U256::zero(), + value: U256::zero(), + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + reserved: [U256::zero(); 4], + data: vec![0u8; 65], + signature: vec![0u8; 75], + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + factory_deps: vec![vec![0u8; 32], vec![1u8; 32]], + paymaster_input: vec![0u8; 85], + reserved_dynamic: vec![0u8; 32], + }; + + let assumed_encoded_len = encoding_len(65, 75, 2, 85, 32); + + let true_encoding_len = transaction.into_tokens().len(); + + assert_eq!(assumed_encoded_len, true_encoding_len); + } +} diff --git a/core/multivm_deps/vm_m5/src/utils.rs b/core/multivm_deps/vm_m5/src/utils.rs new file mode 100644 index 000000000000..c498e71bbe37 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/utils.rs @@ -0,0 +1,258 @@ +use crate::glue::GlueInto; +use crate::{memory::SimpleMemory, vm_with_bootloader::BlockContext}; +use once_cell::sync::Lazy; + +use zk_evm::block_properties::BlockProperties; +use zk_evm::{ + aux_structures::{LogQuery, MemoryPage, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; +use zksync_config::constants::ZKPORTER_IS_AVAILABLE; +use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_types::{Address, StorageLogQuery, H160, MAX_L2_TX_GAS_LIMIT, U256}; +use zksync_utils::h256_to_u256; + +pub const INITIAL_TIMESTAMP: u32 = 1024; +pub const INITIAL_MEMORY_COUNTER: u32 = 2048; +pub const INITIAL_CALLDATA_PAGE: u32 = 7; +pub const INITIAL_BASE_PAGE: u32 = 8; +pub const ENTRY_POINT_PAGE: u32 = code_page_candidate_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; + +/// How many gas bootloader is allowed to spend within one block. +/// Note that this value doesn't correspond to the gas limit of any particular transaction +/// (except for the fact that, of course, gas limit for each transaction should be <= `BLOCK_GAS_LIMIT`). +pub const BLOCK_GAS_LIMIT: u32 = zk_evm::zkevm_opcode_defs::system_params::VM_INITIAL_FRAME_ERGS; +pub const ETH_CALL_GAS_LIMIT: u32 = MAX_L2_TX_GAS_LIMIT as u32; + +#[derive(Debug, Clone)] +pub enum VmExecutionResult { + Ok(Vec), + Revert(Vec), + Panic, + MostLikelyDidNotFinish(Address, u16), +} + +pub const fn code_page_candidate_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0) +} + +pub const fn stack_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 1) +} + +pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 2) +} + +pub const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 3) +} + +pub(crate) fn dump_memory_page_using_primitive_value( + memory: &SimpleMemory, + ptr: PrimitiveValue, +) -> Vec { + if !ptr.is_pointer { + return vec![]; + } + let fat_ptr = FatPointer::from_u256(ptr.value); + dump_memory_page_using_fat_pointer(memory, fat_ptr) +} + +pub(crate) fn dump_memory_page_using_fat_pointer( + memory: &SimpleMemory, + fat_ptr: FatPointer, +) -> Vec { + dump_memory_page_by_offset_and_length( + memory, + fat_ptr.memory_page, + (fat_ptr.start + fat_ptr.offset) as usize, + (fat_ptr.length - fat_ptr.offset) as usize, + ) +} + +pub(crate) fn dump_memory_page_by_offset_and_length( + memory: &SimpleMemory, + page: u32, + offset: usize, + length: usize, +) -> Vec { + assert!(offset < (1u32 << 24) as usize); + assert!(length < (1u32 << 24) as usize); + let mut dump = Vec::with_capacity(length); + if length == 0 { + return dump; + } + + let first_word = offset / 32; + let end_byte = offset + length; + let mut last_word = end_byte / 32; + if end_byte % 32 != 0 { + last_word += 1; + } + + let unalignment = offset % 32; + + let page_part = + memory.dump_page_content_as_u256_words(page, (first_word as u32)..(last_word as u32)); + + let mut is_first = true; + let mut remaining = length; + for word in page_part.into_iter() { + let it = word.into_be_iter(); + if is_first { + is_first = false; + let it = it.skip(unalignment); + for next in it { + if remaining > 0 { + dump.push(next); + remaining -= 1; + } + } + } else { + for next in it { + if remaining > 0 { + dump.push(next); + remaining -= 1; + } + } + } + } + + assert_eq!( + dump.len(), + length, + "tried to dump with offset {}, length {}, got a bytestring of length {}", + offset, + length, + dump.len() + ); + + dump +} + +pub trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator +where + Self: 'a, +{ + fn next(&mut self) -> Option<::Item> { + ::next(self) + } +} + +pub trait IntoFixedLengthByteIterator { + type IntoIter: FixedLengthIterator<'static, u8, N>; + fn into_le_iter(self) -> Self::IntoIter; + fn into_be_iter(self) -> Self::IntoIter; +} + +pub struct FixedBufferValueIterator { + iter: std::array::IntoIter, +} + +impl Iterator for FixedBufferValueIterator { + type Item = T; + fn next(&mut self) -> Option { + self.iter.next() + } +} + +impl FixedLengthIterator<'static, T, N> + for FixedBufferValueIterator +{ +} + +impl IntoFixedLengthByteIterator<32> for U256 { + type IntoIter = FixedBufferValueIterator; + fn into_le_iter(self) -> Self::IntoIter { + let mut buffer = [0u8; 32]; + self.to_little_endian(&mut buffer); + + FixedBufferValueIterator { + iter: IntoIterator::into_iter(buffer), + } + } + + fn into_be_iter(self) -> Self::IntoIter { + let mut buffer = [0u8; 32]; + self.to_big_endian(&mut buffer); + + FixedBufferValueIterator { + iter: IntoIterator::into_iter(buffer), + } + } +} + +/// Collects storage log queries where `log.log_query.timestamp >= from_timestamp`. +/// Denote `n` to be the number of such queries, then it works in O(n). +pub fn collect_storage_log_queries_after_timestamp( + all_log_queries: &[StorageLogQuery], + from_timestamp: Timestamp, +) -> Vec { + let from_timestamp = from_timestamp.glue_into(); + all_log_queries + .iter() + .rev() + .take_while(|log_query| log_query.log_query.timestamp >= from_timestamp) + .cloned() + .collect::>() + .into_iter() + .rev() + .collect() +} + +/// Collects all log queries where `log_query.timestamp >= from_timestamp`. +/// Denote `n` to be the number of such queries, then it works in O(n). +pub fn collect_log_queries_after_timestamp( + all_log_queries: &[LogQuery], + from_timestamp: Timestamp, +) -> Vec { + all_log_queries + .iter() + .rev() + .take_while(|log_query| log_query.timestamp >= from_timestamp) + .cloned() + .collect::>() + .into_iter() + .rev() + .collect() +} + +/// Receives sorted slice of timestamps. +/// Returns count of timestamps that are greater than or equal to `from_timestamp`. +/// Works in O(log(sorted_timestamps.len())). +pub fn precompile_calls_count_after_timestamp( + sorted_timestamps: &[Timestamp], + from_timestamp: Timestamp, +) -> usize { + sorted_timestamps.len() - sorted_timestamps.partition_point(|t| *t < from_timestamp) +} + +pub static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub fn create_test_block_params() -> (BlockContext, BlockProperties) { + let context = BlockContext { + block_number: 1u32, + block_timestamp: 1000, + l1_gas_price: 50_000_000_000, // 50 gwei + fair_l2_gas_price: 250_000_000, // 0.25 gwei + operator_address: H160::zero(), + }; + + ( + context, + BlockProperties { + default_aa_code_hash: h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash), + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + }, + ) +} + +pub fn read_bootloader_test_code(test: &str) -> Vec { + read_zbin_bytecode(format!( + "etc/system-contracts/bootloader/tests/artifacts/{}.yul/{}.yul.zbin", + test, test + )) +} diff --git a/core/multivm_deps/vm_m5/src/vm.rs b/core/multivm_deps/vm_m5/src/vm.rs new file mode 100644 index 000000000000..1e4ddc2dc031 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/vm.rs @@ -0,0 +1,877 @@ +use std::convert::TryFrom; +use std::fmt::Debug; + +use zk_evm::aux_structures::Timestamp; +use zk_evm::vm_state::{PrimitiveValue, VmLocalState, VmState}; +use zk_evm::witness_trace::DummyTracer; +use zk_evm::zkevm_opcode_defs::decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}; +use zk_evm::zkevm_opcode_defs::definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; +use zksync_config::constants::MAX_TXS_IN_BLOCK; +use zksync_types::l2_to_l1_log::L2ToL1Log; +use zksync_types::tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}; +use zksync_types::vm_trace::VmExecutionTrace; +use zksync_types::{L1BatchNumber, StorageLogQuery, VmEvent, U256}; +use zksync_utils::bytes_to_be_words; + +use crate::bootloader_state::BootloaderState; +use crate::errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}; +use crate::event_sink::InMemoryEventSink; +use crate::events::merge_events; +use crate::glue::GlueInto; +use crate::memory::SimpleMemory; +use crate::oracles::decommitter::DecommitterOracle; +use crate::oracles::precompile::PrecompilesProcessorWithHistory; +use crate::oracles::storage::StorageOracle; +use crate::oracles::tracer::{ + BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, PubdataSpentTracer, + TransactionResultTracer, ValidationError, ValidationTracer, ValidationTracerParams, +}; +use crate::oracles::OracleWithHistory; +use crate::utils::{ + collect_log_queries_after_timestamp, collect_storage_log_queries_after_timestamp, + dump_memory_page_using_primitive_value, precompile_calls_count_after_timestamp, +}; +use crate::vm_with_bootloader::{ + BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, + OPERATOR_REFUNDS_OFFSET, +}; +use crate::Word; + +pub type ZkSyncVmState<'a> = VmState< + 'a, + StorageOracle<'a>, + SimpleMemory, + InMemoryEventSink, + PrecompilesProcessorWithHistory, + DecommitterOracle<'a, false>, + DummyTracer, +>; + +pub const MAX_MEM_SIZE_BYTES: u32 = 16777216; // 2^24 + +// Arbitrary space in memory closer to the end of the page +pub const RESULT_SUCCESS_FIRST_SLOT: u32 = + (MAX_MEM_SIZE_BYTES - (MAX_TXS_IN_BLOCK as u32) * 32) / 32; +// The slot that is used for tracking vm hooks +pub const VM_HOOK_POSITION: u32 = RESULT_SUCCESS_FIRST_SLOT - 1; +pub const VM_HOOK_PARAMS_COUNT: u32 = 2; +pub const VM_HOOK_PARAMS_START_POSITION: u32 = VM_HOOK_POSITION - VM_HOOK_PARAMS_COUNT; + +pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Vec { + memory.dump_page_content_as_u256_words( + BOOTLOADER_HEAP_PAGE, + VM_HOOK_PARAMS_START_POSITION..VM_HOOK_PARAMS_START_POSITION + VM_HOOK_PARAMS_COUNT, + ) +} + +/// MultiVM-specific addition. +/// +/// At different points in time, refunds were handled in a different way. +/// E.g., initially they were completely disabled. +/// +/// This enum allows to execute blocks with the same VM but different support for refunds. +#[derive(Debug)] +pub enum MultiVMSubversion { + /// Initial VM M5 version, refunds are fully disabled. + V1, + /// Refunds were enabled. ETH balance for bootloader address was marked as a free slot. + V2, +} + +#[derive(Debug)] +pub struct VmInstance<'a> { + pub gas_limit: u32, + pub state: ZkSyncVmState<'a>, + pub execution_mode: TxExecutionMode, + pub block_context: DerivedBlockContext, + pub(crate) bootloader_state: BootloaderState, + + pub snapshots: Vec, + + /// MultiVM-specific addition. See enum doc-comment for details. + pub(crate) refund_state: MultiVMSubversion, +} + +/// This structure stores data that accumulates during the VM run. +#[derive(Debug, PartialEq)] +pub struct VmExecutionResult { + pub events: Vec, + pub storage_log_queries: Vec, + pub used_contract_hashes: Vec, + pub l2_to_l1_logs: Vec, + pub return_data: Vec, + + /// Value denoting the amount of gas spent withing VM invocation. + /// Note that return value represents the difference between the amount of gas + /// available to VM before and after execution. + /// + /// It means, that depending on the context, `gas_used` may represent different things. + /// If VM is continously invoked and interrupted after each tx, this field may represent the + /// amount of gas spent by a single transaction. + /// + /// To understand, which value does `gas_used` represent, see the documentation for the method + /// that you use to get `VmExecutionResult` object. + /// + /// Side note: this may sound confusing, but this arises from the nature of the bootloader: for it, + /// processing multiple transactions is a single action. We *may* intrude and stop VM once transaction + /// is executed, but it's not enforced. So best we can do is to calculate the amount of gas before and + /// after the invocation, leaving the interpretation of this value to the user. + pub gas_used: u32, + pub contracts_used: usize, + pub revert_reason: Option, + pub trace: VmExecutionTrace, + pub total_log_queries: usize, + pub cycles_used: u32, +} + +impl VmExecutionResult { + pub fn error_message(&self) -> Option { + self.revert_reason + .as_ref() + .map(|result| result.revert_reason.to_string()) + } +} + +#[derive(Debug, PartialEq)] +pub struct VmBlockResult { + /// Result for the whole block execution. + pub full_result: VmExecutionResult, + /// Result for the block tip execution. + pub block_tip_result: VmPartialExecutionResult, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct VmPartialExecutionResult { + pub logs: VmExecutionLogs, + pub revert_reason: Option, + pub contracts_used: usize, + pub cycles_used: u32, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct VmTxExecutionResult { + pub status: TxExecutionStatus, + pub result: VmPartialExecutionResult, + // Gas refunded to the user at the end of the transaction + pub gas_refunded: u32, + // Gas proposed by the operator to be refunded, before the postOp call. + // This value is needed to correctly recover memory of the bootloader. + pub operator_suggested_refund: u32, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum VmExecutionStopReason { + VmFinished, + TracerRequestedStop, +} + +use crate::utils::VmExecutionResult as NewVmExecutionResult; + +fn vm_may_have_ended_inner( + vm: &VmState< + StorageOracle, + SimpleMemory, + InMemoryEventSink, + PrecompilesProcessorWithHistory, + DecommitterOracle, + DummyTracer, + >, +) -> Option { + let execution_has_ended = vm.execution_has_ended(); + + let r1 = vm.local_state.registers[RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER as usize]; + let current_address = vm.local_state.callstack.get_current_stack().this_address; + + let outer_eh_location = >::PcOrImm::MAX.as_u64(); + match ( + execution_has_ended, + vm.local_state.callstack.get_current_stack().pc.as_u64(), + ) { + (true, 0) => { + let returndata = dump_memory_page_using_primitive_value(vm.memory, r1); + + Some(NewVmExecutionResult::Ok(returndata)) + } + (false, _) => None, + (true, l) if l == outer_eh_location => { + // check r1,r2,r3 + if vm.local_state.flags.overflow_or_less_than_flag { + Some(NewVmExecutionResult::Panic) + } else { + let returndata = dump_memory_page_using_primitive_value(vm.memory, r1); + Some(NewVmExecutionResult::Revert(returndata)) + } + } + (_, a) => Some(NewVmExecutionResult::MostLikelyDidNotFinish( + current_address, + a as u16, + )), + } +} + +// This method returns `VmExecutionResult` struct, but some of the fields are left empty. +// +// `gas_before` argument is used to calculate the amount of gas spent by transaction. +// It is required because the same VM instance is continuously used to apply several transactions. +fn vm_may_have_ended(vm: &VmInstance, gas_before: u32) -> Option { + let basic_execution_result = vm_may_have_ended_inner(&vm.state)?; + + let gas_used = gas_before - vm.gas_remaining(); + + match basic_execution_result { + NewVmExecutionResult::Ok(mut data) => { + while data.len() % 32 != 0 { + data.push(0) + } + Some(VmExecutionResult { + // The correct `events` value for this field should be set separately + // later on based on the information inside the event_sink oracle. + events: vec![], + storage_log_queries: vm.get_final_log_queries(), + used_contract_hashes: vm.get_used_contracts(), + l2_to_l1_logs: vec![], + return_data: bytes_to_be_words(data), + gas_used, + contracts_used: vm + .state + .decommittment_processor + .get_used_bytecode_hashes() + .len(), + revert_reason: None, + trace: VmExecutionTrace::default(), + total_log_queries: vm.state.event_sink.get_log_queries() + + vm.state.precompiles_processor.get_timestamp_history().len() + + vm.get_final_log_queries().len(), + cycles_used: vm.state.local_state.monotonic_cycle_counter, + }) + } + NewVmExecutionResult::Revert(data) => { + let revert_reason = VmRevertReasonParsingResult::new( + TxRevertReason::parse_error(data.as_slice()), + data, + ); + + // Check if error indicates a bug in server/vm/bootloader. + if matches!( + revert_reason.revert_reason, + TxRevertReason::UnexpectedVMBehavior(_) + ) { + vlog::error!( + "Observed error that should never happen: {:?}. Full VM data: {:?}", + revert_reason, + vm + ); + } + + Some(VmExecutionResult { + events: vec![], + storage_log_queries: vm.get_final_log_queries(), + used_contract_hashes: vm.get_used_contracts(), + l2_to_l1_logs: vec![], + return_data: vec![], + gas_used, + contracts_used: vm + .state + .decommittment_processor + .get_used_bytecode_hashes() + .len(), + revert_reason: Some(revert_reason), + trace: VmExecutionTrace::default(), + total_log_queries: vm.state.event_sink.get_log_queries() + + vm.state.precompiles_processor.get_timestamp_history().len() + + vm.get_final_log_queries().len(), + cycles_used: vm.state.local_state.monotonic_cycle_counter, + }) + } + // Panic is effectively the same as Revert, but has different nature. + NewVmExecutionResult::Panic => Some(VmExecutionResult { + events: vec![], + storage_log_queries: vec![], + used_contract_hashes: vec![], + l2_to_l1_logs: vec![], + return_data: vec![], + gas_used, + contracts_used: vm + .state + .decommittment_processor + .get_used_bytecode_hashes() + .len(), + revert_reason: Some(VmRevertReasonParsingResult { + revert_reason: TxRevertReason::Unknown(VmRevertReason::VmError), + original_data: vec![], + }), + trace: VmExecutionTrace::default(), + total_log_queries: vm.state.event_sink.get_log_queries() + + vm.state.precompiles_processor.get_timestamp_history().len() + + vm.get_final_log_queries().len(), + cycles_used: vm.state.local_state.monotonic_cycle_counter, + }), + NewVmExecutionResult::MostLikelyDidNotFinish(_, _) => { + // The execution has not ended yet. It should either continue + // or throw Out-of-gas error. + None + } + } +} + +/// A snapshot of the VM that holds enough information to +/// rollback the VM to some historical state. +#[derive(Debug, Clone)] +pub struct VmSnapshot { + local_state: VmLocalState, + bootloader_state: BootloaderState, +} + +impl<'a> VmInstance<'a> { + fn has_ended(&self) -> bool { + match vm_may_have_ended_inner(&self.state) { + None | Some(NewVmExecutionResult::MostLikelyDidNotFinish(_, _)) => false, + Some( + NewVmExecutionResult::Ok(_) + | NewVmExecutionResult::Revert(_) + | NewVmExecutionResult::Panic, + ) => true, + } + } + + fn revert_reason(&self) -> Option { + match vm_may_have_ended_inner(&self.state) { + None + | Some( + NewVmExecutionResult::MostLikelyDidNotFinish(_, _) | NewVmExecutionResult::Ok(_), + ) => None, + Some(NewVmExecutionResult::Revert(data)) => { + let revert_reason = VmRevertReasonParsingResult::new( + TxRevertReason::parse_error(data.as_slice()), + data, + ); + + // Check if error indicates a bug in server/vm/bootloader. + if matches!( + revert_reason.revert_reason, + TxRevertReason::UnexpectedVMBehavior(_) + ) { + vlog::error!( + "Observed error that should never happen: {:?}. Full VM data: {:?}", + revert_reason, + self + ); + } + + Some(revert_reason) + } + Some(NewVmExecutionResult::Panic) => Some(VmRevertReasonParsingResult { + revert_reason: TxRevertReason::Unknown(VmRevertReason::VmError), + original_data: vec![], + }), + } + } + + /// Saves the snapshot of the current state of the VM that can be used + /// to roll back its state later on. + pub fn save_current_vm_as_snapshot(&mut self) { + self.snapshots.push(VmSnapshot { + // Vm local state contains O(1) various parameters (registers/etc). + // The only "expensive" copying here is copying of the callstack. + // It will take O(callstack_depth) to copy it. + // So it is generally recommended to get snapshots of the bootloader frame, + // where the depth is 1. + local_state: self.state.local_state.clone(), + bootloader_state: self.bootloader_state.clone(), + }); + } + + fn rollback_to_snapshot(&mut self, snapshot: VmSnapshot) { + let VmSnapshot { + local_state, + bootloader_state, + } = snapshot; + + let timestamp = Timestamp(local_state.timestamp); + + vlog::trace!("Rolling back decomitter"); + self.state + .decommittment_processor + .rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back event_sink"); + self.state.event_sink.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back storage"); + self.state.storage.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back memory"); + self.state.memory.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back precompiles_processor"); + self.state + .precompiles_processor + .rollback_to_timestamp(timestamp); + self.state.local_state = local_state; + self.bootloader_state = bootloader_state; + } + + /// Rollbacks the state of the VM to the state of the latest snapshot. + pub fn rollback_to_latest_snapshot(&mut self) { + let snapshot = self.snapshots.last().cloned().unwrap(); + self.rollback_to_snapshot(snapshot); + } + + /// Rollbacks the state of the VM to the state of the latest snapshot. + /// Removes that snapshot from the list. + pub fn rollback_to_latest_snapshot_popping(&mut self) { + let snapshot = self.snapshots.pop().unwrap(); + self.rollback_to_snapshot(snapshot); + } + + /// Returns the amount of gas remaining to the VM. + /// Note that this *does not* correspond to the gas limit of a transaction. + /// To calculate the amount of gas spent by transaction, you should call this method before and after + /// the execution, and subtract these values. + /// + /// Note: this method should only be called when either transaction is fully completed or VM completed + /// its execution. Remaining gas value is read from the current stack frame, so if you'll attempt to + /// read it during the transaction execution, you may receive invalid value. + pub(crate) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining + } + + /// Returns the amount of gas consumed by the VM so far (based on the `gas_limit` provided + /// to initiate the virtual machine). + /// + /// Note: this method should only be called when either transaction is fully completed or VM completed + /// its execution. Remaining gas value is read from the current stack frame, so if you'll attempt to + /// read it during the transaction execution, you may receive invalid value. + pub fn gas_consumed(&self) -> u32 { + self.gas_limit - self.gas_remaining() + } + + pub(crate) fn collect_events_and_l1_logs_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> (Vec, Vec) { + let (raw_events, l1_messages) = self + .state + .event_sink + .get_events_and_l2_l1_logs_after_timestamp(from_timestamp); + let events = merge_events(raw_events) + .into_iter() + .map(|e| e.into_vm_event(L1BatchNumber(self.block_context.context.block_number))) + .collect(); + ( + events, + l1_messages + .into_iter() + .map(|log| { + L2ToL1Log::from(GlueInto::< + zksync_types::zk_evm::reference_impls::event_sink::EventMessage, + >::glue_into(log)) + }) + .collect(), + ) + } + + fn collect_execution_logs_after_timestamp(&self, from_timestamp: Timestamp) -> VmExecutionLogs { + let storage_logs = collect_storage_log_queries_after_timestamp( + &self + .state + .storage + .frames_stack + .inner() + .current_frame() + .forward, + from_timestamp, + ); + let storage_logs_count = storage_logs.len(); + + let (events, l2_to_l1_logs) = + self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + + let log_queries = collect_log_queries_after_timestamp( + &self + .state + .event_sink + .frames_stack + .inner() + .current_frame() + .forward, + from_timestamp, + ); + + let precompile_calls_count = precompile_calls_count_after_timestamp( + self.state.precompiles_processor.timestamp_history.inner(), + from_timestamp, + ); + VmExecutionLogs { + storage_logs, + events, + l2_to_l1_logs, + total_log_queries_count: storage_logs_count + + log_queries.len() + + precompile_calls_count, + } + } + + // Returns a tuple of `VmExecutionStopReason` and the size of the refund proposed by the operator + fn execute_with_custom_tracer_and_refunds< + T: ExecutionEndTracer + PendingRefundTracer + PubdataSpentTracer, + >( + &mut self, + tracer: &mut T, + ) -> (VmExecutionStopReason, u32) { + let mut operator_refund = None; + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; + + loop { + // Sanity check: we should never reach the maximum value, because then we won't be able to process the next cycle. + assert_ne!( + self.state.local_state.monotonic_cycle_counter, + u32::MAX, + "VM reached maximum possible amount of cycles. Vm state: {:?}", + self.state + ); + + let timestamp_before_cycle = self.state.local_state.timestamp; + self.state.cycle(tracer); + + if self.has_ended() { + return ( + VmExecutionStopReason::VmFinished, + operator_refund.unwrap_or_default(), + ); + } + + if let Some(bootloader_refund) = tracer.requested_refund() { + assert!( + operator_refund.is_none(), + "Operator was asked for refund two times" + ); + + let refund_to_propose; + let refund_slot; + match self.refund_state { + MultiVMSubversion::V1 => { + refund_to_propose = bootloader_refund; + refund_slot = + OPERATOR_REFUNDS_OFFSET + self.bootloader_state.tx_to_execute() - 1; + } + MultiVMSubversion::V2 => { + let gas_spent_on_pubdata = tracer + .gas_spent_on_pubdata(&self.state.local_state) + - spent_pubdata_counter_before; + let tx_body_refund = self.tx_body_refund( + timestamp_initial, + bootloader_refund, + gas_spent_on_pubdata, + ); + + if tx_body_refund < bootloader_refund { + vlog::error!( + "Suggested tx body refund is less than bootloader refund. Tx body refund: {}, bootloader refund: {}", + tx_body_refund, + bootloader_refund + ); + } + + refund_to_propose = tx_body_refund + + self.block_overhead_refund( + timestamp_initial, + gas_remaining_before, + gas_spent_on_pubdata, + ); + + let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + refund_slot = OPERATOR_REFUNDS_OFFSET + current_tx_index; + } + }; + + // Writing the refund into memory + self.state.memory.memory.write_to_memory( + BOOTLOADER_HEAP_PAGE as usize, + refund_slot, + Some(PrimitiveValue { + value: refund_to_propose.into(), + is_pointer: false, + }), + Timestamp(timestamp_before_cycle), + ); + operator_refund = Some(refund_to_propose); + tracer.set_refund_as_done(); + } + + if tracer.should_stop_execution() { + return ( + VmExecutionStopReason::TracerRequestedStop, + operator_refund.unwrap_or_default(), + ); + } + } + } + + // Executes VM until the end or tracer says to stop. + pub(crate) fn execute_with_custom_tracer< + T: ExecutionEndTracer + PendingRefundTracer + PubdataSpentTracer, + >( + &mut self, + tracer: &mut T, + ) -> VmExecutionStopReason { + self.execute_with_custom_tracer_and_refunds(tracer).0 + } + + // Err when transaction is rejected. + // Ok(status: TxExecutionStatus::Success) when the transaction succeeded + // Ok(status: TxExecutionStatus::Failure) when the transaction failed. + // Note that failed transactions are considered properly processed and are included in blocks + pub fn execute_next_tx(&mut self) -> Result { + let tx_index = self.bootloader_state.next_unexecuted_tx() as u32; + let mut tx_tracer = OneTxTracer::default(); + + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let cycles_initial = self.state.local_state.monotonic_cycle_counter; + + let (stop_reason, operator_suggested_refund) = + self.execute_with_custom_tracer_and_refunds(&mut tx_tracer); + match stop_reason { + VmExecutionStopReason::VmFinished => { + // Bootloader resulted in panic or revert, this means either the transaction is rejected + // (e.g. not enough fee or incorrect signature) or bootloader is out of gas. + + // Collect generated events to show bootloader debug logs. + let _ = self.collect_events_and_l1_logs_after_timestamp(timestamp_initial); + + let error = if tx_tracer.is_bootloader_out_of_gas() { + TxRevertReason::BootloaderOutOfGas + } else { + self.revert_reason() + .expect("vm ended execution prematurely, but no revert reason is given") + .revert_reason + }; + Err(error) + } + VmExecutionStopReason::TracerRequestedStop => { + if tx_tracer.tx_has_been_processed() { + let tx_execution_status = + TxExecutionStatus::from_has_failed(tx_has_failed(&self.state, tx_index)); + let vm_execution_logs = + self.collect_execution_logs_after_timestamp(timestamp_initial); + + Ok(VmTxExecutionResult { + gas_refunded: tx_tracer.refund_gas, + operator_suggested_refund, + status: tx_execution_status, + result: VmPartialExecutionResult { + logs: vm_execution_logs, + // If there is a revert Err is already returned above. + revert_reason: None, + // getting contracts used during this transaction + // at least for now the number returned here is always <= to the number + // of the code hashes actually used by the transaction, since it might've + // reused bytecode hashes from some of the previous ones. + contracts_used: self + .state + .decommittment_processor + .get_decommitted_bytes_after_timestamp(timestamp_initial), + cycles_used: self.state.local_state.monotonic_cycle_counter + - cycles_initial, + }, + }) + } else { + // VM ended up in state `stop_reason == VmExecutionStopReason::TracerRequestedStop && !tx_tracer.tx_has_been_processed()`. + // It means that bootloader successfully finished its execution without executing the transaction. + // It is an unexpected situation. + panic!("VM successfully finished executing bootloader but transaction wasn't executed"); + } + } + } + } + + /// Returns full VM result and partial result produced within the current execution. + pub fn execute_till_block_end(&mut self, job_type: BootloaderJobType) -> VmBlockResult { + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let cycles_initial = self.state.local_state.monotonic_cycle_counter; + let gas_before = self.gas_remaining(); + + let mut tx_result_tracer = TransactionResultTracer::default(); + let stop_reason = self.execute_with_custom_tracer(&mut tx_result_tracer); + match stop_reason { + VmExecutionStopReason::VmFinished => { + let mut full_result = vm_may_have_ended(self, gas_before).unwrap(); + + if job_type == BootloaderJobType::TransactionExecution + && tx_has_failed(&self.state, 0) + && full_result.revert_reason.is_none() + { + let revert_reason = tx_result_tracer + .revert_reason + .map(|reason| { + let vm_revert_reason = VmRevertReason::try_from(reason.as_slice()) + .unwrap_or_else(|_| VmRevertReason::Unknown { + function_selector: vec![], + data: reason.clone(), + }); + + VmRevertReasonParsingResult { + revert_reason: TxRevertReason::TxReverted(vm_revert_reason), + original_data: reason, + } + }) + .unwrap_or_else(|| VmRevertReasonParsingResult { + revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { + msg: "Transaction reverted with empty reason. Possibly out of gas" + .to_string(), + }), + original_data: vec![], + }); + + full_result.revert_reason = Some(revert_reason); + } + + let block_tip_result = VmPartialExecutionResult { + logs: self.collect_execution_logs_after_timestamp(timestamp_initial), + revert_reason: full_result.revert_reason.clone().map(|r| r.revert_reason), + contracts_used: self + .state + .decommittment_processor + .get_decommitted_bytes_after_timestamp(timestamp_initial), + cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + }; + + // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` + // after because draining will drop timestamps. + let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + full_result.events = merge_events(raw_events) + .into_iter() + .map(|e| { + e.into_vm_event(L1BatchNumber(self.block_context.context.block_number)) + }) + .collect(); + full_result.l2_to_l1_logs = l1_messages + .into_iter() + .map(|log| { + L2ToL1Log::from(GlueInto::< + zksync_types::zk_evm::reference_impls::event_sink::EventMessage, + >::glue_into(log)) + }) + .collect(); + VmBlockResult { + full_result, + block_tip_result, + } + } + VmExecutionStopReason::TracerRequestedStop => { + unreachable!("NoopMemoryTracer will never stop execution until the block ends") + } + } + } + + /// Unlike `execute_till_block_end` methods returns only result for the block tip execution. + pub fn execute_block_tip(&mut self) -> VmPartialExecutionResult { + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let cycles_initial = self.state.local_state.monotonic_cycle_counter; + let mut bootloader_tracer = BootloaderTracer::default(); + + let stop_reason = self.execute_with_custom_tracer(&mut bootloader_tracer); + let revert_reason = match stop_reason { + VmExecutionStopReason::VmFinished => { + // Bootloader panicked or reverted. + let revert_reason = if bootloader_tracer.is_bootloader_out_of_gas() { + TxRevertReason::BootloaderOutOfGas + } else { + self.revert_reason() + .expect("vm ended execution prematurely, but no revert reason is given") + .revert_reason + }; + Some(revert_reason) + } + VmExecutionStopReason::TracerRequestedStop => { + // Bootloader finished successfully. + None + } + }; + VmPartialExecutionResult { + logs: self.collect_execution_logs_after_timestamp(timestamp_initial), + revert_reason, + contracts_used: self + .state + .decommittment_processor + .get_decommitted_bytes_after_timestamp(timestamp_initial), + cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + } + } + + pub fn execute_validation( + &mut self, + validation_params: ValidationTracerParams, + ) -> Result<(), ValidationError> { + let mut validation_tracer = ValidationTracer::new( + self.state.storage.storage.inner().get_ptr(), + validation_params, + ); + + let stop_reason = self.execute_with_custom_tracer(&mut validation_tracer); + + match (stop_reason, validation_tracer.validation_error) { + (VmExecutionStopReason::VmFinished, _) => { + // The tx should only end in case of a revert, so it is safe to unwrap here + Err(ValidationError::FailedTx(self.revert_reason().unwrap())) + } + (VmExecutionStopReason::TracerRequestedStop, Some(err)) => { + Err(ValidationError::VioalatedRule(err)) + } + (VmExecutionStopReason::TracerRequestedStop, None) => Ok(()), + } + } + + // returns Some only when there is just one frame in execution trace. + fn get_final_log_queries(&self) -> Vec { + assert_eq!( + self.state.storage.frames_stack.inner().len(), + 1, + "VM finished execution in unexpected state" + ); + + let result = self + .state + .storage + .frames_stack + .inner() + .current_frame() + .forward + .clone(); + + result + } + + fn get_used_contracts(&self) -> Vec { + self.state + .decommittment_processor + .known_bytecodes + .inner() + .keys() + .cloned() + .collect() + } + + pub fn number_of_updated_storage_slots(&self) -> usize { + self.state + .storage + .storage + .inner() + .get_ptr() + .borrow_mut() + .number_of_updated_storage_slots() + } +} + +// Reads the bootloader memory and checks whether the execution step of the transaction +// has failed. +pub(crate) fn tx_has_failed(state: &ZkSyncVmState<'_>, tx_id: u32) -> bool { + let mem_slot = RESULT_SUCCESS_FIRST_SLOT + tx_id; + let mem_value = state + .memory + .dump_page_content_as_u256_words(BOOTLOADER_HEAP_PAGE, mem_slot..mem_slot + 1)[0]; + + mem_value == U256::zero() +} diff --git a/core/multivm_deps/vm_m5/src/vm_with_bootloader.rs b/core/multivm_deps/vm_m5/src/vm_with_bootloader.rs new file mode 100644 index 000000000000..5f9546f39ff8 --- /dev/null +++ b/core/multivm_deps/vm_m5/src/vm_with_bootloader.rs @@ -0,0 +1,582 @@ +use std::{collections::HashMap, time::Instant}; + +use zk_evm::{ + abstractions::{MAX_HEAP_PAGE_SIZE_IN_WORDS, MAX_MEMORY_BYTES}, + aux_structures::{MemoryPage, Timestamp}, + block_properties::BlockProperties, + vm_state::{CallStackEntry, PrimitiveValue, VmState}, + zkevm_opcode_defs::{ + system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, FatPointer, BOOTLOADER_BASE_PAGE, + BOOTLOADER_CALLDATA_PAGE, STARTING_BASE_PAGE, STARTING_TIMESTAMP, + }, +}; +use zksync_config::constants::MAX_TXS_IN_BLOCK; +use zksync_contracts::BaseSystemContracts; + +use zksync_types::{ + zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, BOOTLOADER_ADDRESS, + L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, +}; +use zksync_utils::{ + address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, +}; + +use crate::{ + bootloader_state::BootloaderState, + oracles::OracleWithHistory, + transaction_data::TransactionData, + utils::{ + code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, + }, + vm::{MultiVMSubversion, ZkSyncVmState}, + OracleTools, VmInstance, +}; + +pub const BLOCK_OVERHEAD_GAS: u32 = 1200000; +pub const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; +pub const BLOCK_OVERHEAD_PUBDATA: u32 = BLOCK_OVERHEAD_L1_GAS / L1_GAS_PER_PUBDATA_BYTE; + +pub const MAX_BLOCK_MULTIINSTANCE_GAS_LIMIT: u32 = 300_000_000; + +/// `BlockContext` is a structure that contains parameters for +/// a block that are used as input for the bootloader and not the VM per se. +/// +/// These values are generally unique for each block (the exception is the operator's address). +#[derive(Clone, Debug, Copy)] +pub struct BlockContext { + pub block_number: u32, + pub block_timestamp: u64, + pub operator_address: Address, + pub l1_gas_price: u64, + pub fair_l2_gas_price: u64, +} + +/// Besides the raw values from the `BlockContext`, contains the values that are to be derived +/// from the other values +#[derive(Debug, Copy, Clone)] +pub struct DerivedBlockContext { + pub context: BlockContext, + pub base_fee: u64, +} + +pub(crate) fn eth_price_per_pubdata_byte(l1_gas_price: u64) -> u64 { + // This value will typically be a lot less than u64 + // unless the gas price on L1 goes beyond tens of millions of gwei + l1_gas_price * (L1_GAS_PER_PUBDATA_BYTE as u64) +} + +pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { + let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); + + ceil_div(eth_price_per_pubdata_byte, base_fee) +} + +pub fn derive_base_fee_and_gas_per_pubdata(l1_gas_price: u64, fair_gas_price: u64) -> (u64, u64) { + let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); + + // The baseFee is set in such a way that it is always possible to a transaciton to + // publish enough public data while compensating us for it. + let base_fee = std::cmp::max( + fair_gas_price, + ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + ); + + ( + base_fee, + base_fee_to_gas_per_pubdata(l1_gas_price, base_fee), + ) +} + +impl From for DerivedBlockContext { + fn from(context: BlockContext) -> Self { + let base_fee = + derive_base_fee_and_gas_per_pubdata(context.l1_gas_price, context.fair_l2_gas_price).0; + + DerivedBlockContext { context, base_fee } + } +} + +// The first 32 slots are reserved for debugging purposes +pub const DEBUG_SLOTS_OFFSET: usize = 8; +pub const DEBUG_FIRST_SLOTS: usize = 32; +// The next 33 slots are reserved for dealing with the paymaster context (1 slot for storing length + 32 slots for storing the actual context). +pub const PAYMASTER_CONTEXT_SLOTS: usize = 32 + 1; +// The next PAYMASTER_CONTEXT_SLOTS + 7 slots free slots are needed before each tx, so that the +// postOp operation could be encoded correctly. +pub const MAX_POSTOP_SLOTS: usize = PAYMASTER_CONTEXT_SLOTS + 7; + +// Slots used to store the current L2 transaction's hash and the hash recommended +// to be used for signing the transaction's content. +const CURRENT_L2_TX_HASHES_SLOTS: usize = 2; + +// Slots used to store the calldata for the KnownCodesStorage to mark new factory +// dependencies as known ones. Besides the slots for the new factory dependencies themselves +// another 4 slots are needed for: selector, marker of whether the user should pay for the pubdata, +// the offset for the encoding of the array as well as the length of the array. +pub const NEW_FACTORY_DEPS_RESERVED_SLOTS: usize = MAX_NEW_FACTORY_DEPS + 4; + +// The operator can provide for each transaction the proposed minimal refund +pub const OPERATOR_REFUNDS_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub const OPERATOR_REFUNDS_OFFSET: usize = DEBUG_SLOTS_OFFSET + + DEBUG_FIRST_SLOTS + + PAYMASTER_CONTEXT_SLOTS + + CURRENT_L2_TX_HASHES_SLOTS + + NEW_FACTORY_DEPS_RESERVED_SLOTS; + +pub const TX_OVERHEAD_OFFSET: usize = OPERATOR_REFUNDS_OFFSET + OPERATOR_REFUNDS_SLOTS; +pub const TX_OVERHEAD_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub const BOOTLOADER_TX_DESCRIPTION_OFFSET: usize = TX_OVERHEAD_OFFSET + TX_OVERHEAD_SLOTS; + +// The size of the bootloader memory dedicated to the encodings of transactions +pub const BOOTLOADER_TX_ENCODING_SPACE: u32 = + (MAX_HEAP_PAGE_SIZE_IN_WORDS - TX_DESCRIPTION_OFFSET - MAX_TXS_IN_BLOCK) as u32; + +// Size of the bootloader tx description in words +pub const BOOTLOADER_TX_DESCRIPTION_SIZE: usize = 2; + +// The actual descriptions of transactions should start after the minor descriptions and a MAX_POSTOP_SLOTS +// free slots to allow postOp encoding. +pub const TX_DESCRIPTION_OFFSET: usize = BOOTLOADER_TX_DESCRIPTION_OFFSET + + BOOTLOADER_TX_DESCRIPTION_SIZE * MAX_TXS_IN_BLOCK + + MAX_POSTOP_SLOTS; + +pub const TX_GAS_LIMIT_OFFSET: usize = 4; + +pub(crate) const BOOTLOADER_HEAP_PAGE: u32 = heap_page_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; +const BOOTLOADER_CODE_PAGE: u32 = code_page_candidate_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; + +/// Enum denoting the *in-server* execution mode for the bootloader transactions. +/// +/// If `EthCall` mode is chosen, the bootloader will use `mimicCall` opcode +/// to simulate the call instead of using the standard `execute` method of account. +/// This is needed to be able to behave equivalently to Ethereum without much overhead for custom account builders. +/// With `VerifyExecute` mode, transaction will be executed normally. +/// With `EstimateFee`, the bootloader will be used that has the same behavior +/// as the full `VerifyExecute` block, but errors in the account validation will be ignored. +#[derive(Debug, Clone, Copy)] +pub enum TxExecutionMode { + VerifyExecute, + EstimateFee, + EthCall, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum BootloaderJobType { + TransactionExecution, + BlockPostprocessing, +} + +impl Default for TxExecutionMode { + fn default() -> Self { + Self::VerifyExecute + } +} + +pub fn init_vm<'a>( + refund_state: MultiVMSubversion, + oracle_tools: &'a mut OracleTools<'a, false>, + block_context: BlockContextMode, + block_properties: &'a BlockProperties, + execution_mode: TxExecutionMode, + base_system_contract: &BaseSystemContracts, +) -> Box> { + init_vm_with_gas_limit( + refund_state, + oracle_tools, + block_context, + block_properties, + execution_mode, + base_system_contract, + BLOCK_GAS_LIMIT, + ) +} + +pub fn init_vm_with_gas_limit<'a>( + refund_state: MultiVMSubversion, + oracle_tools: &'a mut OracleTools<'a, false>, + block_context: BlockContextMode, + block_properties: &'a BlockProperties, + execution_mode: TxExecutionMode, + base_system_contract: &BaseSystemContracts, + gas_limit: u32, +) -> Box> { + init_vm_inner( + refund_state, + oracle_tools, + block_context, + block_properties, + gas_limit, + base_system_contract, + execution_mode, + ) +} + +#[derive(Debug, Clone, Copy)] +// The block.number/block.timestamp data are stored in the CONTEXT_SYSTEM_CONTRACT. +// The bootloader can support execution in two modes: +// - "NewBlock" when the new block is created. It is enforced that the block.number is incremented by 1 +// and the timestamp is non-decreasing. Also, the L2->L1 message used to verify the correctness of the previous root hash is sent. +// This is the mode that should be used in the state keeper. +// - "OverrideCurrent" when we need to provide custom block.number and block.timestamp. ONLY to be used in testing/ethCalls. +pub enum BlockContextMode { + NewBlock(DerivedBlockContext, U256), + OverrideCurrent(DerivedBlockContext), +} + +impl BlockContextMode { + const OPERATOR_ADDRESS_SLOT: usize = 0; + const PREV_BLOCK_HASH_SLOT: usize = 1; + const NEW_BLOCK_TIMESTAMP_SLOT: usize = 2; + const NEW_BLOCK_NUMBER_SLOT: usize = 3; + const L1_GAS_PRICE_SLOT: usize = 4; + const FAIR_L2_GAS_PRICE_SLOT: usize = 5; + const EXPECTED_BASE_FEE_SLOT: usize = 6; + const SHOULD_SET_NEW_BLOCK_SLOT: usize = 7; + + // Returns the previous block hash and timestamp fields that should be used by the bootloader. + // If the timestamp is 0, then the bootloader will not attempt to start a new block + // and will continue using the existing block properties. + fn bootloader_block_params(&self) -> Vec<(usize, U256)> { + let DerivedBlockContext { context, base_fee } = self.inner_block_context(); + + let mut base_params: HashMap = vec![ + ( + Self::OPERATOR_ADDRESS_SLOT, + address_to_u256(&context.operator_address), + ), + (Self::PREV_BLOCK_HASH_SLOT, Default::default()), + ( + Self::NEW_BLOCK_TIMESTAMP_SLOT, + U256::from(context.block_timestamp), + ), + ( + Self::NEW_BLOCK_NUMBER_SLOT, + U256::from(context.block_number), + ), + (Self::L1_GAS_PRICE_SLOT, U256::from(context.l1_gas_price)), + ( + Self::FAIR_L2_GAS_PRICE_SLOT, + U256::from(context.fair_l2_gas_price), + ), + (Self::EXPECTED_BASE_FEE_SLOT, U256::from(base_fee)), + (Self::SHOULD_SET_NEW_BLOCK_SLOT, U256::from(0u32)), + ] + .into_iter() + .collect(); + + match *self { + BlockContextMode::OverrideCurrent(_) => base_params.into_iter().collect(), + BlockContextMode::NewBlock(_, prev_block_hash) => { + base_params.insert(Self::PREV_BLOCK_HASH_SLOT, prev_block_hash); + base_params.insert(Self::SHOULD_SET_NEW_BLOCK_SLOT, U256::from(1u32)); + base_params.into_iter().collect() + } + } + } + + pub fn inner_block_context(&self) -> DerivedBlockContext { + match *self { + BlockContextMode::OverrideCurrent(props) => props, + BlockContextMode::NewBlock(props, _) => props, + } + } + + pub fn timestamp(&self) -> u64 { + self.inner_block_context().context.block_timestamp + } +} + +// This method accepts a custom bootloader code. +// It should be used only in tests. +pub fn init_vm_inner<'a>( + refund_state: MultiVMSubversion, + oracle_tools: &'a mut OracleTools<'a, false>, + block_context: BlockContextMode, + block_properties: &'a BlockProperties, + gas_limit: u32, + base_system_contract: &BaseSystemContracts, + execution_mode: TxExecutionMode, +) -> Box> { + let start = Instant::now(); + + oracle_tools.decommittment_processor.populate( + vec![( + h256_to_u256(base_system_contract.default_aa.hash), + base_system_contract.default_aa.code.clone(), + )], + Timestamp(0), + ); + + oracle_tools.memory.populate( + vec![( + BOOTLOADER_CODE_PAGE, + base_system_contract.bootloader.code.clone(), + )], + Timestamp(0), + ); + + oracle_tools.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + bootloader_initial_memory(&block_context), + Timestamp(0), + ); + + let state = get_default_local_state(oracle_tools, block_properties, gas_limit); + + let vm = Box::new(VmInstance { + refund_state, + gas_limit, + state, + execution_mode, + block_context: block_context.inner_block_context(), + bootloader_state: BootloaderState::new(), + snapshots: Vec::new(), + }); + + metrics::histogram!("server.vm.init", start.elapsed()); + vm +} + +fn bootloader_initial_memory(block_properties: &BlockContextMode) -> Vec<(usize, U256)> { + block_properties.bootloader_block_params() +} + +pub fn get_bootloader_memory( + txs: Vec, + predefined_refunds: Vec, + execution_mode: TxExecutionMode, + block_context: BlockContextMode, +) -> Vec<(usize, U256)> { + let mut memory = bootloader_initial_memory(&block_context); + + let mut already_included_txs_size = 0; + for (tx_index_in_block, tx) in txs.into_iter().enumerate() { + let memory_for_current_tx = get_bootloader_memory_for_tx( + tx.clone(), + tx_index_in_block, + execution_mode, + already_included_txs_size, + predefined_refunds[tx_index_in_block], + ); + memory.extend(memory_for_current_tx); + let encoded_struct = tx.into_tokens(); + let encoding_length = encoded_struct.len(); + already_included_txs_size += encoding_length; + } + memory +} + +pub fn push_transaction_to_bootloader_memory( + vm: &mut VmInstance, + tx: &Transaction, + execution_mode: TxExecutionMode, +) { + let tx: TransactionData = tx.clone().into(); + let overhead = tx.overhead_gas(); + push_raw_transaction_to_bootloader_memory(vm, tx, execution_mode, overhead); +} + +pub fn push_raw_transaction_to_bootloader_memory( + vm: &mut VmInstance, + tx: TransactionData, + execution_mode: TxExecutionMode, + predefined_overhead: u32, +) { + let tx_index_in_block = vm.bootloader_state.free_tx_index(); + let already_included_txs_size = vm.bootloader_state.free_tx_offset(); + + let timestamp = Timestamp(vm.state.local_state.timestamp); + let codes_for_decommiter = tx + .factory_deps + .iter() + .map(|dep| bytecode_to_factory_dep(dep.clone())) + .collect(); + vm.state + .decommittment_processor + .populate(codes_for_decommiter, timestamp); + + let encoded_tx = tx.into_tokens(); + let encoded_tx_size = encoded_tx.len(); + + let bootloader_memory = get_bootloader_memory_for_encoded_tx( + encoded_tx, + tx_index_in_block, + execution_mode, + already_included_txs_size, + 0, + predefined_overhead, + ); + + vm.state.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + bootloader_memory, + Timestamp(vm.state.local_state.timestamp), + ); + vm.bootloader_state.add_tx_data(encoded_tx_size); +} + +fn get_bootloader_memory_for_tx( + tx: TransactionData, + tx_index_in_block: usize, + execution_mode: TxExecutionMode, + already_included_txs_size: usize, + predefined_refund: u32, +) -> Vec<(usize, U256)> { + let overhead_gas = tx.overhead_gas(); + get_bootloader_memory_for_encoded_tx( + tx.into_tokens(), + tx_index_in_block, + execution_mode, + already_included_txs_size, + predefined_refund, + overhead_gas, + ) +} + +pub(crate) fn get_bootloader_memory_for_encoded_tx( + encoded_tx: Vec, + tx_index_in_block: usize, + execution_mode: TxExecutionMode, + already_included_txs_size: usize, + predefined_refund: u32, + predefined_overhead: u32, +) -> Vec<(usize, U256)> { + let mut memory: Vec<(usize, U256)> = Vec::default(); + let bootloader_description_offset = + BOOTLOADER_TX_DESCRIPTION_OFFSET + BOOTLOADER_TX_DESCRIPTION_SIZE * tx_index_in_block; + + let tx_description_offset = TX_DESCRIPTION_OFFSET + already_included_txs_size; + + // Marking that this transaction should be executed. + memory.push(( + bootloader_description_offset, + assemble_tx_meta(execution_mode, true), + )); + memory.push(( + bootloader_description_offset + 1, + U256::from_big_endian(&(32 * tx_description_offset).to_be_bytes()), + )); + + let refund_offset = OPERATOR_REFUNDS_OFFSET + tx_index_in_block; + memory.push((refund_offset, predefined_refund.into())); + + let overhead_offset = TX_OVERHEAD_OFFSET + tx_index_in_block; + memory.push((overhead_offset, predefined_overhead.into())); + + // Now we need to actually put the transaction description: + let encoding_length = encoded_tx.len(); + memory.extend((tx_description_offset..tx_description_offset + encoding_length).zip(encoded_tx)); + + memory +} + +fn get_default_local_state<'a>( + tools: &'a mut OracleTools<'a, false>, + block_properties: &'a BlockProperties, + gas_limit: u32, +) -> ZkSyncVmState<'a> { + let mut vm = VmState::empty_state( + &mut tools.storage, + &mut tools.memory, + &mut tools.event_sink, + &mut tools.precompiles_processor, + &mut tools.decommittment_processor, + &mut tools.witness_tracer, + block_properties, + ); + + let initial_context = CallStackEntry { + this_address: BOOTLOADER_ADDRESS, + msg_sender: Address::zero(), + code_address: BOOTLOADER_ADDRESS, + base_memory_page: MemoryPage(BOOTLOADER_BASE_PAGE), + code_page: MemoryPage(BOOTLOADER_CODE_PAGE), + sp: 0, + pc: 0, + // Note, that since the results are written at the end of the memory + // it is needed to have the entire heap available from the beginning + heap_bound: MAX_MEMORY_BYTES as u32, + aux_heap_bound: MAX_MEMORY_BYTES as u32, + exception_handler_location: INITIAL_FRAME_FORMAL_EH_LOCATION, + ergs_remaining: gas_limit, + this_shard_id: 0, + caller_shard_id: 0, + code_shard_id: 0, + is_static: false, + is_local_frame: false, + context_u128_value: 0, + }; + + // We consider the contract that is being run as a bootloader + vm.push_bootloader_context(INITIAL_MONOTONIC_CYCLE_COUNTER - 1, initial_context); + vm.local_state.timestamp = STARTING_TIMESTAMP; + vm.local_state.memory_page_counter = STARTING_BASE_PAGE; + vm.local_state.monotonic_cycle_counter = INITIAL_MONOTONIC_CYCLE_COUNTER; + vm.local_state.current_ergs_per_pubdata_byte = 0; + vm.local_state.registers[0] = formal_calldata_abi(); + + // Deleting all the historical records brought by the initial + // initialization of the VM to make them permanent. + vm.decommittment_processor.delete_history(); + vm.event_sink.delete_history(); + vm.storage.delete_history(); + vm.memory.delete_history(); + vm.precompiles_processor.delete_history(); + + vm +} + +fn formal_calldata_abi() -> PrimitiveValue { + let fat_pointer = FatPointer { + offset: 0, + memory_page: BOOTLOADER_CALLDATA_PAGE, + start: 0, + length: 0, + }; + + PrimitiveValue { + value: fat_pointer.to_u256(), + is_pointer: true, + } +} + +pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { + let bytecode_hash = hash_bytecode(&bytecode); + let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); + + let bytecode_words = bytes_to_be_words(bytecode); + + (bytecode_hash, bytecode_words) +} + +/// Forms a word that contains meta information for the transaction execution. +/// +/// # Current layout +/// +/// - 0 byte (MSB): server-side tx execution mode +/// In the server, we may want to execute different parts of the transaction in the different context +/// For example, when checking validity, we don't want to actually execute transaction and have side effects. +/// +/// Possible values: +/// - 0x00: validate & execute (normal mode) +/// - 0x01: validate but DO NOT execute +/// - 0x02: execute but DO NOT validate +/// +/// - 31 byte (LSB): whether to execute transaction or not (at all). +fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool) -> U256 { + let mut output = [0u8; 32]; + + // Set 0 byte (execution mode) + output[0] = match execution_mode { + TxExecutionMode::VerifyExecute => 0x00, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, + }; + + // Set 31 byte (marker for tx execution) + output[31] = u8::from(execute_tx); + + U256::from_big_endian(&output) +} diff --git a/core/multivm_deps/vm_m6/Cargo.toml b/core/multivm_deps/vm_m6/Cargo.toml new file mode 100644 index 000000000000..756da12ff412 --- /dev/null +++ b/core/multivm_deps/vm_m6/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "vm_m6" +version = "0.1.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-2" +license = "Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +[dependencies] +zkevm-assembly = { git = "https://github.com/matter-labs/era-zkEVM-assembly.git", branch = "v1.3.1" } +zksync_crypto = { path = "../../lib/crypto", version = "1.0" } +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } +zksync_config = { path = "../../lib/config", version = "1.0" } +zksync_state = {path = "../../lib/state", version = "1.0" } +zksync_storage = {path = "../../lib/storage", version = "1.0" } + +zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.1"} +zksync_contracts = { path = "../../lib/contracts" } + +hex = "0.4" +thiserror = "1.0" +itertools = "0.10" +once_cell = "1.7" +vlog = { path = "../../lib/vlog", version = "1.0" } +metrics = "0.20" + +tracing = "0.1" + +[dev-dependencies] +tempfile = "3.0.2" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" diff --git a/core/multivm_deps/vm_m6/README.md b/core/multivm_deps/vm_m6/README.md new file mode 100644 index 000000000000..deb11bb51371 --- /dev/null +++ b/core/multivm_deps/vm_m6/README.md @@ -0,0 +1,41 @@ +# VM Crate + +This crate contains code that interacts with the VM - the VM itself is in a separate repo internal: +[zk_evm][zk_evm_repo] or external:[era-zk_evm][zk_evm_repo_ext] + +## VM dependencies + +The VM relies on several subcomponents or traits, such as Memory and Storage. These traits are defined in the `zk_evm` +repo, while their implementations can be found in this crate, like the storage implementation in `oracles/storage.rs` +and the Memory implementation in `memory.rs`. + +Many of these implementations also support easy rollbacks and history, which is useful when creating a block with +multiple transactions and needing to return the VM to a previous state if transaction doesn't fit. + +### Tracers + +The VM implementation allows for the addition of `Tracers`, which are activated before and after each instruction. This +gives a more in-depth look into the VM, collecting detailed debugging information and logs. More details can be found in +the `tracer/` directory. + +## Running the VM + +To interact with the VM, first create it using methods in `vm_with_bootloader.rs`, such as `init_vm()`. Then, inject a +transaction using `push_transaction_to_bootloader_memory()` and execute the VM, for example using +`execute_till_block_end()` from vm.rs. + +### Bootloader + +In the context of zkEVM, we usually think about transactions. However, from the VM's perspective, it runs a single +program called the bootloader, which internally processes multiple transactions. + +### Rollbacks + +The `VMInstance` in `vm.rs` allows for easy rollbacks. You can save the current state at any moment by calling +`save_current_vm_as_snapshot()` and return to that state using `rollback_to_latest_snapshot()`. + +This rollback affects all subcomponents, like memory, storage, and events, and is mainly used if a transaction doesn't +fit in a block. + +[zk_evm_repo]: https://github.com/matter-labs/zk_evm 'internal zk EVM repo' +[zk_evm_repo_ext]: https://github.com/matter-labs/era-zk_evm 'external zk EVM repo' diff --git a/core/multivm_deps/vm_m6/fuzz/.gitignore b/core/multivm_deps/vm_m6/fuzz/.gitignore new file mode 100644 index 000000000000..1a45eee7760d --- /dev/null +++ b/core/multivm_deps/vm_m6/fuzz/.gitignore @@ -0,0 +1,4 @@ +target +corpus +artifacts +coverage diff --git a/core/multivm_deps/vm_m6/fuzz/Cargo.toml b/core/multivm_deps/vm_m6/fuzz/Cargo.toml new file mode 100644 index 000000000000..fb659b9c6284 --- /dev/null +++ b/core/multivm_deps/vm_m6/fuzz/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "vm-fuzz" +version = "0.0.0" +publish = false +edition = "2018" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = "0.4" +vm-benchmark = {path = "../../../tests/vm-benchmark"} +zksync_types = {path = "../../types"} + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[profile.release] +debug = 1 + +[[bin]] +name = "deploy_transaction" +path = "fuzz_targets/deploy_transaction.rs" +test = false +doc = false diff --git a/core/multivm_deps/vm_m6/fuzz/compare.sh b/core/multivm_deps/vm_m6/fuzz/compare.sh new file mode 100644 index 000000000000..6edf91e091a3 --- /dev/null +++ b/core/multivm_deps/vm_m6/fuzz/compare.sh @@ -0,0 +1,3 @@ +gdb ./bad --command=show_cycle.gdb -ex "show_cycle $1" > badout +gdb ./good --command=show_cycle.gdb -ex "show_cycle $1" > goodout +delta goodout badout --max-line-length 100000 diff --git a/core/multivm_deps/vm_m6/fuzz/fuzz.sh b/core/multivm_deps/vm_m6/fuzz/fuzz.sh new file mode 100644 index 000000000000..3b5b5e89f9cb --- /dev/null +++ b/core/multivm_deps/vm_m6/fuzz/fuzz.sh @@ -0,0 +1 @@ +cargo +nightly fuzz run --no-cfg-fuzzing --strip-dead-code --sanitizer none --release deploy_transaction diff --git a/core/multivm_deps/vm_m6/fuzz/fuzz_targets/deploy_transaction.rs b/core/multivm_deps/vm_m6/fuzz/fuzz_targets/deploy_transaction.rs new file mode 100644 index 000000000000..e116eeecae96 --- /dev/null +++ b/core/multivm_deps/vm_m6/fuzz/fuzz_targets/deploy_transaction.rs @@ -0,0 +1,27 @@ +#![no_main] + +use libfuzzer_sys::fuzz_target; +use vm_benchmark::{BenchmarkingVm, get_deploy_tx}; +use zksync_types::tx::tx_execution_info::TxExecutionStatus::Success; + +fuzz_target!(|input: &[u8]| { + if let Some(contract_code) = cut_to_allowed_bytecode_size(input) { + if let Ok(x) = BenchmarkingVm::new().run_transaction(&get_deploy_tx(contract_code)) { + if x.status == Success { + panic!("managed to produce valid code!"); + } + } + } +}); + +fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> Option<&[u8]> { + let mut words = bytes.len() / 32; + if words == 0 { + return None; + } + + if words & 1 == 0 { + words -= 1; + } + Some(&bytes[..32 * words]) +} diff --git a/core/multivm_deps/vm_m6/fuzz/show_cycle.gdb b/core/multivm_deps/vm_m6/fuzz/show_cycle.gdb new file mode 100644 index 000000000000..7069afaa34a8 --- /dev/null +++ b/core/multivm_deps/vm_m6/fuzz/show_cycle.gdb @@ -0,0 +1,8 @@ +set pagination off + +define show_cycle + break cycle.rs:395 if self.local_state.monotonic_cycle_counter == $arg0 + run + p self.local_state.registers + quit +end diff --git a/core/multivm_deps/vm_m6/src/bootloader_state.rs b/core/multivm_deps/vm_m6/src/bootloader_state.rs new file mode 100644 index 000000000000..2ecb845dfa64 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/bootloader_state.rs @@ -0,0 +1,120 @@ +use crate::vm_with_bootloader::TX_DESCRIPTION_OFFSET; + +/// Intermediate bootloader-related VM state. +/// +/// Required to process transactions one by one (since we intercept the VM execution to execute +/// transactions and add new ones to the memory on the fly). +/// Think about it like a two-pointer scheme: one pointer (`free_tx_index`) tracks the end of the +/// initialized memory; while another (`tx_to_execute`) tracks our progess in this initialized memory. +/// This is required since it's possible to push several transactions to the bootloader memory and then +/// execute it one by one. +/// +/// Serves two purposes: +/// - Tracks where next tx should be pushed to in the bootloader memory. +/// - Tracks which transaction should be executed next. +#[derive(Debug, Default, Clone)] +pub(crate) struct BootloaderState { + /// Memory offset (in words) for the next transaction data. + free_tx_offset: usize, + /// ID of the next transaction to be executed. + /// See the structure doc-comment for a better explanation of purpose. + tx_to_execute: usize, + /// Vector that contains sizes of all pushed transactions. + tx_sizes: Vec, + + /// The number of 32-byte words spent on the already included compressed bytecodes. + compressed_bytecodes_encoding: usize, +} + +impl BootloaderState { + /// Creates an empty bootloader state. + pub(crate) fn new() -> Self { + Self::default() + } + + /// Notifies the state about the fact that new transaction was pushed into the memory. + pub(crate) fn add_tx_data(&mut self, tx_size: usize) { + self.free_tx_offset += tx_size; + self.tx_sizes.push(tx_size); + } + + /// Returns the next "free" transaction index. + pub(crate) fn free_tx_index(&self) -> usize { + self.tx_sizes.len() + } + + /// Returns the next index of transaction to execute. + pub(crate) fn tx_to_execute(&self) -> usize { + self.tx_to_execute + } + + /// Returns the memory offset for the new transaction. + pub(crate) fn free_tx_offset(&self) -> usize { + self.free_tx_offset + } + + /// Returns the ID of the next transaction to be executed and increments the local transaction counter. + pub(crate) fn next_unexecuted_tx(&mut self) -> usize { + assert!( + self.tx_to_execute < self.tx_sizes.len(), + "Attempt to execute tx that was not pushed to memory. Tx ID: {}, txs in bootloader: {}", + self.tx_to_execute, + self.tx_sizes.len() + ); + + let old = self.tx_to_execute; + self.tx_to_execute += 1; + old + } + + /// Returns the size of the transaction with given index. + /// Panics if there is no such transaction. + #[allow(dead_code)] + pub(crate) fn get_tx_size(&self, tx_index: usize) -> usize { + self.tx_sizes[tx_index] + } + + pub(crate) fn get_tx_description_offset(&self, tx_index: usize) -> usize { + TX_DESCRIPTION_OFFSET + self.tx_sizes.iter().take(tx_index).sum::() + } + + pub(crate) fn add_compressed_bytecode(&mut self, bytecode_compression_encoding_length: usize) { + self.compressed_bytecodes_encoding += bytecode_compression_encoding_length; + } + + pub(crate) fn get_compressed_bytecodes(&self) -> usize { + self.compressed_bytecodes_encoding + } +} + +#[cfg(test)] +mod tests { + use super::BootloaderState; + + #[test] + fn workflow() { + let mut state = BootloaderState::new(); + assert_eq!(state.free_tx_index(), 0); + assert_eq!(state.free_tx_offset(), 0); + + state.add_tx_data(2); + assert_eq!(state.free_tx_index(), 1); + assert_eq!(state.free_tx_offset(), 2); + + state.add_tx_data(4); + assert_eq!(state.free_tx_index(), 2); + assert_eq!(state.free_tx_offset(), 6); + + assert_eq!(state.next_unexecuted_tx(), 0); + assert_eq!(state.next_unexecuted_tx(), 1); + } + + #[test] + #[should_panic( + expected = "Attempt to execute tx that was not pushed to memory. Tx ID: 0, txs in bootloader: 0" + )] + fn get_not_pushed_tx() { + let mut state = BootloaderState::new(); + state.next_unexecuted_tx(); + } +} diff --git a/core/multivm_deps/vm_m6/src/errors/bootloader_error.rs b/core/multivm_deps/vm_m6/src/errors/bootloader_error.rs new file mode 100644 index 000000000000..bfbef44a42bd --- /dev/null +++ b/core/multivm_deps/vm_m6/src/errors/bootloader_error.rs @@ -0,0 +1,58 @@ +#[derive(Debug)] +pub(crate) enum BootloaderErrorCode { + EthCall, + AccountTxValidationFailed, + FailedToChargeFee, + FromIsNotAnAccount, + FailedToCheckAccount, + UnacceptableGasPrice, + PayForTxFailed, + PrePaymasterPreparationFailed, + PaymasterValidationFailed, + FailedToSendFeesToTheOperator, + FailedToSetPrevBlockHash, + UnacceptablePubdataPrice, + TxValidationError, + MaxPriorityFeeGreaterThanMaxFee, + BaseFeeGreaterThanMaxFeePerGas, + PaymasterReturnedInvalidContext, + PaymasterContextIsTooLong, + AssertionError, + FailedToMarkFactoryDeps, + TxValidationOutOfGas, + NotEnoughGasProvided, + AccountReturnedInvalidMagic, + PaymasterReturnedInvalidMagic, + Unknown, +} + +impl From for BootloaderErrorCode { + fn from(code: u8) -> BootloaderErrorCode { + match code { + 0 => BootloaderErrorCode::EthCall, + 1 => BootloaderErrorCode::AccountTxValidationFailed, + 2 => BootloaderErrorCode::FailedToChargeFee, + 3 => BootloaderErrorCode::FromIsNotAnAccount, + 4 => BootloaderErrorCode::FailedToCheckAccount, + 5 => BootloaderErrorCode::UnacceptableGasPrice, + 6 => BootloaderErrorCode::FailedToSetPrevBlockHash, + 7 => BootloaderErrorCode::PayForTxFailed, + 8 => BootloaderErrorCode::PrePaymasterPreparationFailed, + 9 => BootloaderErrorCode::PaymasterValidationFailed, + 10 => BootloaderErrorCode::FailedToSendFeesToTheOperator, + 11 => BootloaderErrorCode::UnacceptablePubdataPrice, + 12 => BootloaderErrorCode::TxValidationError, + 13 => BootloaderErrorCode::MaxPriorityFeeGreaterThanMaxFee, + 14 => BootloaderErrorCode::BaseFeeGreaterThanMaxFeePerGas, + 15 => BootloaderErrorCode::PaymasterReturnedInvalidContext, + 16 => BootloaderErrorCode::PaymasterContextIsTooLong, + 17 => BootloaderErrorCode::AssertionError, + 18 => BootloaderErrorCode::FailedToMarkFactoryDeps, + 19 => BootloaderErrorCode::TxValidationOutOfGas, + 20 => BootloaderErrorCode::NotEnoughGasProvided, + 21 => BootloaderErrorCode::AccountReturnedInvalidMagic, + 22 => BootloaderErrorCode::PaymasterReturnedInvalidMagic, + _ => BootloaderErrorCode::Unknown, + } + } +} diff --git a/core/multivm_deps/vm_m6/src/errors/mod.rs b/core/multivm_deps/vm_m6/src/errors/mod.rs new file mode 100644 index 000000000000..462330b41f98 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/errors/mod.rs @@ -0,0 +1,9 @@ +mod bootloader_error; +mod tx_revert_reason; +mod vm_revert_reason; + +pub(crate) use bootloader_error::BootloaderErrorCode; +pub use tx_revert_reason::TxRevertReason; +pub use vm_revert_reason::{ + VmRevertReason, VmRevertReasonParsingError, VmRevertReasonParsingResult, +}; diff --git a/core/multivm_deps/vm_m6/src/errors/tx_revert_reason.rs b/core/multivm_deps/vm_m6/src/errors/tx_revert_reason.rs new file mode 100644 index 000000000000..4775d8339f79 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/errors/tx_revert_reason.rs @@ -0,0 +1,215 @@ +use std::{convert::TryFrom, fmt::Display}; + +use super::{BootloaderErrorCode, VmRevertReason}; + +// Note that currently only EthCall transactions have valid Revert Reason. +// Same transaction executed in bootloader will just have `InnerTxError`. +// Reasons why the transaction executed inside the bootloader could fail. +#[derive(Debug, Clone, PartialEq)] +pub enum TxRevertReason { + // Can only be returned in EthCall execution mode (=ExecuteOnly) + EthCall(VmRevertReason), + // Returned when the execution of an L2 transaction has failed + TxReverted(VmRevertReason), + // Can only be returned in VerifyAndExecute + ValidationFailed(VmRevertReason), + PaymasterValidationFailed(VmRevertReason), + PrePaymasterPreparationFailed(VmRevertReason), + PayForTxFailed(VmRevertReason), + FailedToMarkFactoryDependencies(VmRevertReason), + FailedToChargeFee(VmRevertReason), + // Emitted when trying to call a transaction from an account that has not + // been deployed as an account (i.e. the `from` is just a contract). + // Can only be returned in VerifyAndExecute + FromIsNotAnAccount, + // Currently cannot be returned. Should be removed when refactoring errors. + InnerTxError, + Unknown(VmRevertReason), + // Temporarily used instead of panics to provide better experience for developers: + // their transaction would simply be rejected and they'll be able to provide + // information about the cause to us. + UnexpectedVMBehavior(String), + // Bootloader is out of gas. + BootloaderOutOfGas, + // Transaction has a too big gas limit and will not be executed by the server. + TooBigGasLimit, + // The bootloader did not have enough gas to start the transaction in the first place + NotEnoughGasProvided, + // The tx consumes too much missing invocations to memory + MissingInvocationLimitReached, +} + +impl TxRevertReason { + pub fn parse_error(bytes: &[u8]) -> Self { + // The first 32 bytes should correspond with error code. + // If the error is smaller than that, we will use a standardized bootloader error. + if bytes.is_empty() { + return Self::UnexpectedVMBehavior("Bootloader returned an empty error".to_string()); + } + + let (error_code, error_msg) = bytes.split_at(1); + let revert_reason = match VmRevertReason::try_from(error_msg) { + Ok(reason) => reason, + Err(_) => { + let function_selector = if error_msg.len() >= 4 { + error_msg[0..4].to_vec() + } else { + error_msg.to_vec() + }; + + let data = if error_msg.len() > 4 { + error_msg[4..].to_vec() + } else { + vec![] + }; + + VmRevertReason::Unknown { + function_selector, + data, + } + } + }; + + // `error_code` is a big-endian number, so we can safely take the first byte of it. + match BootloaderErrorCode::from(error_code[0]) { + BootloaderErrorCode::EthCall => Self::EthCall(revert_reason), + BootloaderErrorCode::AccountTxValidationFailed => Self::ValidationFailed(revert_reason), + BootloaderErrorCode::FailedToChargeFee => Self::FailedToChargeFee(revert_reason), + BootloaderErrorCode::FromIsNotAnAccount => Self::FromIsNotAnAccount, + BootloaderErrorCode::FailedToCheckAccount => Self::ValidationFailed(VmRevertReason::General { + msg: "Failed to check if `from` is an account. Most likely not enough gas provided".to_string(), + data: vec![], + }), + BootloaderErrorCode::UnacceptableGasPrice => Self::UnexpectedVMBehavior( + "The operator included transaction with an unacceptable gas price".to_owned(), + ), + BootloaderErrorCode::PrePaymasterPreparationFailed => { + Self::PrePaymasterPreparationFailed(revert_reason) + } + BootloaderErrorCode::PaymasterValidationFailed => { + Self::PaymasterValidationFailed(revert_reason) + } + BootloaderErrorCode::FailedToSendFeesToTheOperator => { + Self::UnexpectedVMBehavior("FailedToSendFeesToTheOperator".to_owned()) + } + BootloaderErrorCode::FailedToSetPrevBlockHash => { + panic!( + "The bootloader failed to set previous block hash. Reason: {}", + revert_reason + ) + } + BootloaderErrorCode::UnacceptablePubdataPrice => { + Self::UnexpectedVMBehavior("UnacceptablePubdataPrice".to_owned()) + } + // This is different from AccountTxValidationFailed error in a way that it means that + // the error was not produced by the account itself, but for some other unknown reason (most likely not enough gas) + BootloaderErrorCode::TxValidationError => Self::ValidationFailed(revert_reason), + // Note, that `InnerTxError` is derived only after the actual tx execution, so + // it is not parsed here. Unknown error means that bootloader failed by a reason + // that was not specified by the protocol: + BootloaderErrorCode::MaxPriorityFeeGreaterThanMaxFee => { + Self::UnexpectedVMBehavior("Max priority fee greater than max fee".to_owned()) + } + BootloaderErrorCode::PaymasterReturnedInvalidContext => { + Self::PaymasterValidationFailed(VmRevertReason::General { + msg: String::from("Paymaster returned invalid context"), + data: vec![], + }) + } + BootloaderErrorCode::PaymasterContextIsTooLong => { + Self::PaymasterValidationFailed(VmRevertReason::General { + msg: String::from("Paymaster returned context that is too long"), + data: vec![], + }) + } + BootloaderErrorCode::AssertionError => { + Self::UnexpectedVMBehavior(format!("Assertion error: {}", revert_reason)) + } + BootloaderErrorCode::BaseFeeGreaterThanMaxFeePerGas => Self::UnexpectedVMBehavior( + "Block.basefee is greater than max fee per gas".to_owned(), + ), + BootloaderErrorCode::PayForTxFailed => { + Self::PayForTxFailed(revert_reason) + }, + BootloaderErrorCode::FailedToMarkFactoryDeps => { + let (msg, data) = if let VmRevertReason::General { msg , data} = revert_reason { + (msg, data) + } else { + (String::from("Most likely not enough gas provided"), vec![]) + }; + Self::FailedToMarkFactoryDependencies(VmRevertReason::General { + msg, data + }) + }, + BootloaderErrorCode::TxValidationOutOfGas => { + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Not enough gas for transaction validation"), data: vec![] }) + }, + BootloaderErrorCode::NotEnoughGasProvided => { + Self::NotEnoughGasProvided + }, + BootloaderErrorCode::AccountReturnedInvalidMagic => { + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Account validation returned invalid magic value. Most often this means that the signature is incorrect"), data: vec![] }) + }, + BootloaderErrorCode::PaymasterReturnedInvalidMagic => { + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Paymaster validation returned invalid magic value. Please refer to the documentation of the paymaster for more details"), data: vec![] }) + } + BootloaderErrorCode::Unknown => Self::UnexpectedVMBehavior(format!( + "Unsupported error code: {}. Revert reason: {}", + error_code[0], revert_reason + )), + } + } +} + +impl Display for TxRevertReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self { + // EthCall reason is usually returned unchanged. + TxRevertReason::EthCall(reason) => write!(f, "{}", reason), + TxRevertReason::TxReverted(reason) => write!(f, "{}", reason), + TxRevertReason::ValidationFailed(reason) => { + write!(f, "Account validation error: {}", reason) + } + TxRevertReason::FailedToChargeFee(reason) => { + write!(f, "Failed to charge fee: {}", reason) + } + // Emitted when trying to call a transaction from an account that has no + // been deployed as an account (i.e. the `from` is just a contract). + TxRevertReason::FromIsNotAnAccount => write!(f, "Sender is not an account"), + TxRevertReason::InnerTxError => write!(f, "Bootloader-based tx failed"), + TxRevertReason::PaymasterValidationFailed(reason) => { + write!(f, "Paymaster validation error: {}", reason) + } + TxRevertReason::PrePaymasterPreparationFailed(reason) => { + write!(f, "Pre-paymaster preparation error: {}", reason) + } + TxRevertReason::Unknown(reason) => write!(f, "Unknown reason: {}", reason), + TxRevertReason::UnexpectedVMBehavior(problem) => { + write!(f, + "virtual machine entered unexpected state. Please contact developers and provide transaction details \ + that caused this error. Error description: {problem}" + ) + } + TxRevertReason::BootloaderOutOfGas => write!(f, "Bootloader out of gas"), + TxRevertReason::NotEnoughGasProvided => write!( + f, + "Bootloader did not have enough gas to start the transaction" + ), + TxRevertReason::FailedToMarkFactoryDependencies(reason) => { + write!(f, "Failed to mark factory dependencies: {}", reason) + } + TxRevertReason::PayForTxFailed(reason) => { + write!(f, "Failed to pay for the transaction: {}", reason) + } + TxRevertReason::TooBigGasLimit => { + write!( + f, + "Transaction has a too big ergs limit and will not be executed by the server" + ) + } + TxRevertReason::MissingInvocationLimitReached => { + write!(f, "Tx produced too much cold storage accesses") + } + } + } +} diff --git a/core/multivm_deps/vm_m6/src/errors/vm_revert_reason.rs b/core/multivm_deps/vm_m6/src/errors/vm_revert_reason.rs new file mode 100644 index 000000000000..93d6cd370705 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/errors/vm_revert_reason.rs @@ -0,0 +1,250 @@ +use std::convert::TryFrom; +use std::fmt::{Debug, Display}; + +use zksync_types::U256; + +use crate::TxRevertReason; + +#[derive(Debug, thiserror::Error)] +pub enum VmRevertReasonParsingError { + #[error("Incorrect data offset. Data: {0:?}")] + IncorrectDataOffset(Vec), + #[error("Input is too short. Data: {0:?}")] + InputIsTooShort(Vec), + #[error("Incorrect string length. Data: {0:?}")] + IncorrectStringLength(Vec), +} + +/// Rich Revert Reasons https://github.com/0xProject/ZEIPs/issues/32 +#[derive(Debug, Clone, PartialEq)] +pub enum VmRevertReason { + General { + msg: String, + data: Vec, + }, + InnerTxError, + VmError, + Unknown { + function_selector: Vec, + data: Vec, + }, +} + +impl VmRevertReason { + const GENERAL_ERROR_SELECTOR: &'static [u8] = &[0x08, 0xc3, 0x79, 0xa0]; + fn parse_general_error(original_bytes: &[u8]) -> Result { + let bytes = &original_bytes[4..]; + if bytes.len() < 32 { + return Err(VmRevertReasonParsingError::InputIsTooShort(bytes.to_vec())); + } + let data_offset = U256::from_big_endian(&bytes[0..32]).as_usize(); + + // Data offset couldn't be less than 32 because data offset size is 32 bytes + // and data offset bytes are part of the offset. Also data offset couldn't be greater than + // data length + if data_offset > bytes.len() || data_offset < 32 { + return Err(VmRevertReasonParsingError::IncorrectDataOffset( + bytes.to_vec(), + )); + }; + + let data = &bytes[data_offset..]; + + if data.len() < 32 { + return Err(VmRevertReasonParsingError::InputIsTooShort(bytes.to_vec())); + }; + + let string_length = U256::from_big_endian(&data[0..32]).as_usize(); + + if string_length + 32 > data.len() { + return Err(VmRevertReasonParsingError::IncorrectStringLength( + bytes.to_vec(), + )); + }; + + let raw_data = &data[32..32 + string_length]; + Ok(Self::General { + msg: String::from_utf8_lossy(raw_data).to_string(), + data: original_bytes.to_vec(), + }) + } + + pub fn to_user_friendly_string(&self) -> String { + match self { + // In case of `Unknown` reason we suppress it to prevent verbose Error function_selector = 0x{} + // message shown to user. + VmRevertReason::Unknown { .. } => "".to_owned(), + _ => self.to_string(), + } + } + + pub fn encoded_data(&self) -> Vec { + match self { + VmRevertReason::Unknown { data, .. } => data.clone(), + VmRevertReason::General { data, .. } => data.clone(), + _ => vec![], + } + } +} + +impl TryFrom<&[u8]> for VmRevertReason { + type Error = VmRevertReasonParsingError; + + fn try_from(bytes: &[u8]) -> Result { + if bytes.len() < 4 { + // Note, that when the method reverts with no data + // the selector is empty as well. + // For now, we only accept errors with either no data or + // the data with complete selectors. + if !bytes.is_empty() { + return Err(VmRevertReasonParsingError::IncorrectStringLength( + bytes.to_owned(), + )); + } + + let result = VmRevertReason::Unknown { + function_selector: vec![], + data: bytes.to_vec(), + }; + + return Ok(result); + } + + let function_selector = &bytes[0..4]; + match function_selector { + VmRevertReason::GENERAL_ERROR_SELECTOR => Self::parse_general_error(bytes), + _ => { + let result = VmRevertReason::Unknown { + function_selector: function_selector.to_vec(), + data: bytes.to_vec(), + }; + vlog::warn!("Unsupported error type: {}", result); + Ok(result) + } + } + } +} + +impl Display for VmRevertReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use VmRevertReason::{General, InnerTxError, Unknown, VmError}; + + match self { + General { msg, .. } => write!(f, "{}", msg), + VmError => write!(f, "VM Error",), + InnerTxError => write!(f, "Bootloader-based tx failed"), + Unknown { + function_selector, + data, + } => write!( + f, + "Error function_selector = 0x{}, data = 0x{}", + hex::encode(function_selector), + hex::encode(data) + ), + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct VmRevertReasonParsingResult { + pub revert_reason: TxRevertReason, + pub original_data: Vec, +} + +impl VmRevertReasonParsingResult { + pub fn new(revert_reason: TxRevertReason, original_data: Vec) -> Self { + Self { + revert_reason, + original_data, + } + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use super::VmRevertReason; + + #[test] + fn revert_reason_parsing() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()).expect("Shouldn't be error"); + assert_eq!( + reason, + VmRevertReason::General { + msg: "ERC20: transfer amount exceeds balance".to_string(), + data: msg + } + ); + } + + #[test] + fn revert_reason_with_wrong_function_selector() { + let msg = vec![ + 8, 195, 121, 161, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()).expect("Shouldn't be error"); + assert!(matches!(reason, VmRevertReason::Unknown { .. })); + } + + #[test] + fn revert_reason_with_wrong_data_offset() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()); + assert!(reason.is_err()); + } + + #[test] + fn revert_reason_with_big_data_offset() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()); + assert!(reason.is_err()); + } + + #[test] + fn revert_reason_with_wrong_string_length() { + let msg = vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158, 69, 82, 67, 50, 48, 58, 32, 116, 114, 97, 110, + 115, 102, 101, 114, 32, 97, 109, 111, 117, 110, 116, 32, 101, 120, 99, 101, 101, 100, + 115, 32, 98, 97, 108, 97, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let reason = VmRevertReason::try_from(msg.as_slice()); + assert!(reason.is_err()); + } +} diff --git a/core/multivm_deps/vm_m6/src/event_sink.rs b/core/multivm_deps/vm_m6/src/event_sink.rs new file mode 100644 index 000000000000..868f06482e48 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/event_sink.rs @@ -0,0 +1,161 @@ +use crate::{ + history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, + oracles::OracleWithHistory, + utils::collect_log_queries_after_timestamp, +}; +use std::collections::HashMap; +use zk_evm::{ + abstractions::EventSink, + aux_structures::{LogQuery, Timestamp}, + reference_impls::event_sink::EventMessage, + zkevm_opcode_defs::system_params::{ + BOOTLOADER_FORMAL_ADDRESS, EVENT_AUX_BYTE, L1_MESSAGE_AUX_BYTE, + }, +}; + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct InMemoryEventSink { + pub frames_stack: AppDataFrameManagerWithHistory, +} + +impl OracleWithHistory for InMemoryEventSink { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.frames_stack.rollback_to_timestamp(timestamp); + } +} + +// as usual, if we rollback the current frame then we apply changes to storage immediately, +// otherwise we carry rollbacks to the parent's frames + +impl InMemoryEventSink { + pub fn flatten(&self) -> (Vec, Vec, Vec) { + assert_eq!( + self.frames_stack.len(), + 1, + "there must exist an initial keeper frame" + ); + // we forget rollbacks as we have finished the execution and can just apply them + let history = self.frames_stack.forward().current_frame(); + let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); + (history.to_vec(), events, l1_messages) + } + + pub fn get_log_queries(&self) -> usize { + let history = &self.frames_stack.forward().current_frame(); + history.len() + } + + pub fn get_events_and_l2_l1_logs_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> (Vec, Vec) { + let history = collect_log_queries_after_timestamp( + self.frames_stack.forward().current_frame(), + from_timestamp, + ); + Self::events_and_l1_messages_from_history(&history) + } + + fn events_and_l1_messages_from_history( + history: &[LogQuery], + ) -> (Vec, Vec) { + let mut tmp = HashMap::::with_capacity(history.len()); + + // note that we only use "forward" part and discard the rollbacks at the end, + // since if rollbacks of parents were not appended anywhere we just still keep them + for el in history { + // we are time ordered here in terms of rollbacks + if tmp.get(&el.timestamp.0).is_some() { + assert!(el.rollback); + tmp.remove(&el.timestamp.0); + } else { + assert!(!el.rollback); + tmp.insert(el.timestamp.0, *el); + } + } + + // naturally sorted by timestamp + let mut keys: Vec<_> = tmp.keys().cloned().collect(); + keys.sort_unstable(); + + let mut events = vec![]; + let mut l1_messages = vec![]; + + for k in keys.into_iter() { + let el = tmp.remove(&k).unwrap(); + let LogQuery { + shard_id, + is_service, + tx_number_in_block, + address, + key, + written_value, + aux_byte, + .. + } = el; + + let event = EventMessage { + shard_id, + is_first: is_service, + tx_number_in_block, + address, + key, + value: written_value, + }; + + if aux_byte == EVENT_AUX_BYTE { + events.push(event); + } else { + l1_messages.push(event); + } + } + + (events, l1_messages) + } + + pub fn get_size(&self) -> usize { + self.frames_stack.get_size() + } + + pub fn get_history_size(&self) -> usize { + self.frames_stack.get_history_size() + } + + pub fn delete_history(&mut self) { + self.frames_stack.delete_history(); + } +} + +impl EventSink for InMemoryEventSink { + // when we enter a new frame we should remember all our current applications and rollbacks + // when we exit the current frame then if we did panic we should concatenate all current + // forward and rollback cases + + fn add_partial_query(&mut self, _monotonic_cycle_counter: u32, mut query: LogQuery) { + assert!(query.rw_flag); + assert!(query.aux_byte == EVENT_AUX_BYTE || query.aux_byte == L1_MESSAGE_AUX_BYTE); + assert!(!query.rollback); + // just append to rollbacks and a full history + + self.frames_stack.push_forward(query, query.timestamp); + // we do not need it explicitly here, but let's be consistent with circuit counterpart + query.rollback = true; + self.frames_stack.push_rollback(query, query.timestamp); + } + + fn start_frame(&mut self, timestamp: Timestamp) { + self.frames_stack.push_frame(timestamp) + } + + fn finish_frame(&mut self, panicked: bool, timestamp: Timestamp) { + // if we panic then we append forward and rollbacks to the forward of parent, + // otherwise we place rollbacks of child before rollbacks of the parent + if panicked { + self.frames_stack.move_rollback_to_forward( + |q| q.address != *BOOTLOADER_FORMAL_ADDRESS || q.aux_byte != EVENT_AUX_BYTE, + timestamp, + ); + } + self.frames_stack.merge_frame(timestamp); + } +} diff --git a/core/multivm_deps/vm_m6/src/events.rs b/core/multivm_deps/vm_m6/src/events.rs new file mode 100644 index 000000000000..0d11d9102ea2 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/events.rs @@ -0,0 +1,146 @@ +use zk_evm::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; +use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; + +#[derive(Clone)] +pub struct SolidityLikeEvent { + pub shard_id: u8, + pub tx_number_in_block: u16, + pub address: Address, + pub topics: Vec<[u8; 32]>, + pub data: Vec, +} + +impl SolidityLikeEvent { + pub fn into_vm_event(self, block_number: L1BatchNumber) -> VmEvent { + VmEvent { + location: (block_number, self.tx_number_in_block as u32), + address: self.address, + indexed_topics: be_chunks_to_h256_words(self.topics), + value: self.data, + } + } +} + +fn merge_events_inner(events: Vec) -> Vec { + let mut result = vec![]; + let mut current: Option<(usize, u32, SolidityLikeEvent)> = None; + + for message in events.into_iter() { + if !message.is_first { + let EventMessage { + shard_id, + is_first: _, + tx_number_in_block, + address, + key, + value, + } = message; + + if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = + current.take() + { + if event.address != address + || event.shard_id != shard_id + || event.tx_number_in_block != tx_number_in_block + { + continue; + } + let mut data_0 = [0u8; 32]; + let mut data_1 = [0u8; 32]; + key.to_big_endian(&mut data_0); + value.to_big_endian(&mut data_1); + for el in [data_0, data_1].iter() { + if remaining_topics != 0 { + event.topics.push(*el); + remaining_topics -= 1; + } else if remaining_data_length != 0 { + if remaining_data_length >= 32 { + event.data.extend_from_slice(el); + remaining_data_length -= 32; + } else { + event.data.extend_from_slice(&el[..remaining_data_length]); + remaining_data_length = 0; + } + } + } + + if remaining_data_length != 0 || remaining_topics != 0 { + current = Some((remaining_data_length, remaining_topics, event)) + } else { + result.push(event); + } + } + } else { + // start new one. First take the old one only if it's well formed + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event); + } + } + + let EventMessage { + shard_id, + is_first: _, + tx_number_in_block, + address, + key, + value, + } = message; + // split key as our internal marker. Ignore higher bits + let mut num_topics = key.0[0] as u32; + let mut data_length = (key.0[0] >> 32) as usize; + let mut buffer = [0u8; 32]; + value.to_big_endian(&mut buffer); + + let (topics, data) = if num_topics == 0 && data_length == 0 { + (vec![], vec![]) + } else if num_topics == 0 { + data_length -= 32; + (vec![], buffer.to_vec()) + } else { + num_topics -= 1; + (vec![buffer], vec![]) + }; + + let new_event = SolidityLikeEvent { + shard_id, + tx_number_in_block, + address, + topics, + data, + }; + + current = Some((data_length, num_topics, new_event)) + } + } + + // add the last one + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event); + } + } + + result +} + +pub fn merge_events(events: Vec) -> Vec { + let raw_events = merge_events_inner(events); + + raw_events + .into_iter() + .filter(|e| e.address == EVENT_WRITER_ADDRESS) + .map(|event| { + // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics + let address = h256_to_account_address(&H256(event.topics[0])); + let topics = event.topics.into_iter().skip(1).collect(); + + SolidityLikeEvent { + topics, + address, + ..event + } + }) + .collect() +} diff --git a/core/multivm_deps/vm_m6/src/glue.rs b/core/multivm_deps/vm_m6/src/glue.rs new file mode 100644 index 000000000000..5bf70ecc77d8 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/glue.rs @@ -0,0 +1,116 @@ +pub trait GlueFrom: Sized { + fn glue_from(value: T) -> Self; +} + +/// See the description of [`GlueFrom`] trait above. +pub trait GlueInto: Sized { + fn glue_into(self) -> T; +} + +// Blaknet `GlueInto` impl for any type that implements `GlueFrom`. +impl GlueInto for T +where + U: GlueFrom, +{ + fn glue_into(self) -> U { + U::glue_from(self) + } +} + +// Identity impl. +impl GlueFrom for T { + fn glue_from(this: T) -> Self { + this + } +} + +impl GlueFrom + for zksync_types::zk_evm::aux_structures::Timestamp +{ + fn glue_from(timestamp: zk_evm::aux_structures::Timestamp) -> Self { + zksync_types::zk_evm::aux_structures::Timestamp(timestamp.0) + } +} + +impl GlueFrom for zksync_types::zk_evm::aux_structures::LogQuery { + fn glue_from(query: zk_evm::aux_structures::LogQuery) -> Self { + zksync_types::zk_evm::aux_structures::LogQuery { + address: query.address, + key: query.key, + written_value: query.written_value, + timestamp: query.timestamp.glue_into(), + shard_id: query.shard_id, + rollback: query.rollback, + tx_number_in_block: query.tx_number_in_block, + aux_byte: query.aux_byte, + read_value: query.read_value, + rw_flag: query.rw_flag, + is_service: query.is_service, + } + } +} + +impl GlueFrom + for zk_evm::aux_structures::Timestamp +{ + fn glue_from(timestamp: zksync_types::zk_evm::aux_structures::Timestamp) -> Self { + zk_evm::aux_structures::Timestamp(timestamp.0) + } +} + +impl GlueFrom for zk_evm::aux_structures::LogQuery { + fn glue_from(query: zksync_types::zk_evm::aux_structures::LogQuery) -> Self { + zk_evm::aux_structures::LogQuery { + address: query.address, + key: query.key, + written_value: query.written_value, + timestamp: query.timestamp.glue_into(), + shard_id: query.shard_id, + rollback: query.rollback, + tx_number_in_block: query.tx_number_in_block, + aux_byte: query.aux_byte, + read_value: query.read_value, + rw_flag: query.rw_flag, + is_service: query.is_service, + } + } +} + +impl GlueFrom + for zksync_types::zk_evm::reference_impls::event_sink::EventMessage +{ + fn glue_from(event: zk_evm::reference_impls::event_sink::EventMessage) -> Self { + zksync_types::zk_evm::reference_impls::event_sink::EventMessage { + shard_id: event.shard_id, + is_first: event.is_first, + tx_number_in_block: event.tx_number_in_block, + address: event.address, + key: event.key, + value: event.value, + } + } +} + +impl GlueFrom + for zksync_types::zk_evm::zkevm_opcode_defs::FarCallOpcode +{ + fn glue_from(value: zk_evm::zkevm_opcode_defs::FarCallOpcode) -> Self { + match value { + zk_evm::zkevm_opcode_defs::FarCallOpcode::Normal => Self::Normal, + zk_evm::zkevm_opcode_defs::FarCallOpcode::Delegate => Self::Delegate, + zk_evm::zkevm_opcode_defs::FarCallOpcode::Mimic => Self::Mimic, + } + } +} + +impl GlueFrom + for zk_evm::zkevm_opcode_defs::FarCallOpcode +{ + fn glue_from(value: zksync_types::zk_evm::zkevm_opcode_defs::FarCallOpcode) -> Self { + match value { + zksync_types::zk_evm::zkevm_opcode_defs::FarCallOpcode::Normal => Self::Normal, + zksync_types::zk_evm::zkevm_opcode_defs::FarCallOpcode::Delegate => Self::Delegate, + zksync_types::zk_evm::zkevm_opcode_defs::FarCallOpcode::Mimic => Self::Mimic, + } + } +} diff --git a/core/multivm_deps/vm_m6/src/history_recorder.rs b/core/multivm_deps/vm_m6/src/history_recorder.rs new file mode 100644 index 000000000000..b3ae1e756765 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/history_recorder.rs @@ -0,0 +1,719 @@ +use std::{ + collections::HashMap, + fmt::Debug, + hash::{BuildHasherDefault, Hash, Hasher}, +}; + +use crate::storage::StoragePtr; + +use zk_evm::{ + aux_structures::Timestamp, + vm_state::PrimitiveValue, + zkevm_opcode_defs::{self}, +}; + +use zksync_types::{StorageKey, U256}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +pub type MemoryWithHistory = HistoryRecorder; +pub type IntFrameManagerWithHistory = HistoryRecorder, H>; + +// Within the same cycle, timestamps in range timestamp..timestamp+TIME_DELTA_PER_CYCLE-1 +// can be used. This can sometimes vioalate monotonicity of the timestamp within the +// same cycle, so it should be normalized. +#[inline] +fn normalize_timestamp(timestamp: Timestamp) -> Timestamp { + let timestamp = timestamp.0; + + // Making sure it is divisible by TIME_DELTA_PER_CYCLE + Timestamp(timestamp - timestamp % zkevm_opcode_defs::TIME_DELTA_PER_CYCLE) +} + +/// Accepts history item as its parameter and applies it. +pub trait WithHistory { + type HistoryRecord; + type ReturnValue; + + // Applies an action and returns the action that would + // rollback its effect as well as some returned value + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue); +} + +type EventList = Vec<(Timestamp, ::HistoryRecord)>; + +/// Controls if rolling back is possible or not. +/// Either [HistoryEnabled] or [HistoryDisabled]. +pub trait HistoryMode: private::Sealed + Debug + Clone + Default { + type History: Default; + + fn clone_history(history: &Self::History) -> Self::History + where + T::HistoryRecord: Clone; + fn mutate_history)>( + recorder: &mut HistoryRecorder, + f: F, + ); + fn borrow_history) -> R, R>( + recorder: &HistoryRecorder, + f: F, + default: R, + ) -> R; +} + +mod private { + pub trait Sealed {} + impl Sealed for super::HistoryEnabled {} + impl Sealed for super::HistoryDisabled {} +} + +// derives require that all type parameters implement the trait, which is why +// HistoryEnabled/Disabled derive so many traits even though they mostly don't +// exist at runtime. + +/// A data structure with this parameter can be rolled back. +/// See also: [HistoryDisabled] +#[derive(Debug, Clone, Default, PartialEq)] +pub struct HistoryEnabled; + +/// A data structure with this parameter cannot be rolled back. +/// It won't even have rollback methods. +/// See also: [HistoryEnabled] +#[derive(Debug, Clone, Default)] +pub struct HistoryDisabled; + +impl HistoryMode for HistoryEnabled { + type History = EventList; + + fn clone_history(history: &Self::History) -> Self::History + where + T::HistoryRecord: Clone, + { + history.clone() + } + fn mutate_history)>( + recorder: &mut HistoryRecorder, + f: F, + ) { + f(&mut recorder.history) + } + fn borrow_history) -> R, R>( + recorder: &HistoryRecorder, + f: F, + _: R, + ) -> R { + f(&recorder.history) + } +} + +impl HistoryMode for HistoryDisabled { + type History = (); + + fn clone_history(_: &Self::History) -> Self::History {} + fn mutate_history)>( + _: &mut HistoryRecorder, + _: F, + ) { + } + fn borrow_history) -> R, R>( + _: &HistoryRecorder, + _: F, + default: R, + ) -> R { + default + } +} + +/// A struct responsible for tracking history for +/// a component that is passed as a generic parameter to it (`inner`). +#[derive(Default)] +pub struct HistoryRecorder { + inner: T, + history: H::History, +} + +impl PartialEq for HistoryRecorder +where + T::HistoryRecord: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + self.inner == other.inner + && self.borrow_history(|h1| other.borrow_history(|h2| h1 == h2, true), true) + } +} + +impl Debug for HistoryRecorder +where + T::HistoryRecord: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut debug_struct = f.debug_struct("HistoryRecorder"); + debug_struct.field("inner", &self.inner); + self.borrow_history( + |h| { + debug_struct.field("history", h); + }, + (), + ); + debug_struct.finish() + } +} + +impl Clone for HistoryRecorder +where + T::HistoryRecord: Clone, + H: HistoryMode, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + history: H::clone_history(&self.history), + } + } +} + +impl HistoryRecorder { + pub fn from_inner(inner: T) -> Self { + Self { + inner, + history: Default::default(), + } + } + + pub fn inner(&self) -> &T { + &self.inner + } + + /// If history exists, modify it using `f`. + pub fn mutate_history)>(&mut self, f: F) { + H::mutate_history(self, f); + } + + /// If history exists, feed it into `f`. Otherwise return `default`. + pub fn borrow_history) -> R, R>(&self, f: F, default: R) -> R { + H::borrow_history(self, f, default) + } + + pub fn apply_historic_record( + &mut self, + item: T::HistoryRecord, + timestamp: Timestamp, + ) -> T::ReturnValue { + let (reversed_item, return_value) = self.inner.apply_historic_record(item); + + self.mutate_history(|history| { + let last_recorded_timestamp = history.last().map(|(t, _)| *t).unwrap_or(Timestamp(0)); + let timestamp = normalize_timestamp(timestamp); + assert!( + last_recorded_timestamp <= timestamp, + "Timestamps are not monotonic" + ); + history.push((timestamp, reversed_item)); + }); + + return_value + } + + /// Deletes all the history for its component, making + /// its current state irreversible + pub fn delete_history(&mut self) { + self.mutate_history(|h| h.clear()) + } +} + +impl HistoryRecorder { + pub fn history(&self) -> &Vec<(Timestamp, T::HistoryRecord)> { + &self.history + } + + pub fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + loop { + let should_undo = self + .history + .last() + .map(|(item_timestamp, _)| *item_timestamp >= timestamp) + .unwrap_or(false); + if !should_undo { + break; + } + + let (_, item_to_apply) = self.history.pop().unwrap(); + self.inner.apply_historic_record(item_to_apply); + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum VectorHistoryEvent { + Push(X), + Pop, +} + +impl WithHistory for Vec { + type HistoryRecord = VectorHistoryEvent; + type ReturnValue = Option; + fn apply_historic_record( + &mut self, + item: VectorHistoryEvent, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + match item { + VectorHistoryEvent::Pop => { + // Note, that here we assume that the users + // will check themselves whether this vector is empty + // prior to popping from it. + let poped_item = self.pop().unwrap(); + + (VectorHistoryEvent::Push(poped_item), Some(poped_item)) + } + VectorHistoryEvent::Push(x) => { + self.push(x); + + (VectorHistoryEvent::Pop, None) + } + } + } +} + +impl HistoryRecorder, H> { + pub fn push(&mut self, elem: T, timestamp: Timestamp) { + self.apply_historic_record(VectorHistoryEvent::Push(elem), timestamp); + } + + pub fn pop(&mut self, timestamp: Timestamp) -> T { + self.apply_historic_record(VectorHistoryEvent::Pop, timestamp) + .unwrap() + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct HashMapHistoryEvent { + pub key: K, + pub value: Option, +} + +impl WithHistory for HashMap { + type HistoryRecord = HashMapHistoryEvent; + type ReturnValue = Option; + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let HashMapHistoryEvent { key, value } = item; + + let prev_value = match value { + Some(x) => self.insert(key, x), + None => self.remove(&key), + }; + + ( + HashMapHistoryEvent { + key, + value: prev_value.clone(), + }, + prev_value, + ) + } +} + +impl HistoryRecorder, H> { + pub fn insert(&mut self, key: K, value: V, timestamp: Timestamp) -> Option { + self.apply_historic_record( + HashMapHistoryEvent { + key, + value: Some(value), + }, + timestamp, + ) + } +} + +/// A stack of stacks. The inner stacks are called frames. +/// +/// Does not support popping from the outer stack. Instead, the outer stack can +/// push its topmost frame's contents onto the previous frame. +#[derive(Debug, Clone, PartialEq)] +pub struct FramedStack { + data: Vec, + frame_start_indices: Vec, +} + +impl Default for FramedStack { + fn default() -> Self { + // We typically require at least the first frame to be there + // since the last user-provided frame might be reverted + Self { + data: vec![], + frame_start_indices: vec![0], + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum FramedStackEvent { + Push(T), + Pop, + PushFrame(usize), + MergeFrame, +} + +impl WithHistory for FramedStack { + type HistoryRecord = FramedStackEvent; + type ReturnValue = (); + + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + use FramedStackEvent::*; + match item { + Push(x) => { + self.data.push(x); + (Pop, ()) + } + Pop => { + let x = self.data.pop().unwrap(); + (Push(x), ()) + } + PushFrame(i) => { + self.frame_start_indices.push(i); + (MergeFrame, ()) + } + MergeFrame => { + let pos = self.frame_start_indices.pop().unwrap(); + (PushFrame(pos), ()) + } + } + } +} + +impl FramedStack { + fn push_frame(&self) -> FramedStackEvent { + FramedStackEvent::PushFrame(self.data.len()) + } + + pub fn current_frame(&self) -> &[T] { + &self.data[*self.frame_start_indices.last().unwrap()..self.data.len()] + } + + fn len(&self) -> usize { + self.frame_start_indices.len() + } + + /// Returns the amount of memory taken up by the stored items + pub fn get_size(&self) -> usize { + self.data.len() * std::mem::size_of::() + } +} + +impl HistoryRecorder, H> { + pub fn push_to_frame(&mut self, x: T, timestamp: Timestamp) { + self.apply_historic_record(FramedStackEvent::Push(x), timestamp); + } + pub fn clear_frame(&mut self, timestamp: Timestamp) { + let start = *self.inner.frame_start_indices.last().unwrap(); + while self.inner.data.len() > start { + self.apply_historic_record(FramedStackEvent::Pop, timestamp); + } + } + pub fn extend_frame(&mut self, items: impl IntoIterator, timestamp: Timestamp) { + for x in items { + self.push_to_frame(x, timestamp); + } + } + pub fn push_frame(&mut self, timestamp: Timestamp) { + self.apply_historic_record(self.inner.push_frame(), timestamp); + } + pub fn merge_frame(&mut self, timestamp: Timestamp) { + self.apply_historic_record(FramedStackEvent::MergeFrame, timestamp); + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct AppDataFrameManagerWithHistory { + forward: HistoryRecorder, H>, + rollback: HistoryRecorder, H>, +} + +impl Default for AppDataFrameManagerWithHistory { + fn default() -> Self { + Self { + forward: Default::default(), + rollback: Default::default(), + } + } +} + +impl AppDataFrameManagerWithHistory { + pub fn delete_history(&mut self) { + self.forward.delete_history(); + self.rollback.delete_history(); + } + + pub fn push_forward(&mut self, item: T, timestamp: Timestamp) { + self.forward.push_to_frame(item, timestamp); + } + pub fn push_rollback(&mut self, item: T, timestamp: Timestamp) { + self.rollback.push_to_frame(item, timestamp); + } + pub fn push_frame(&mut self, timestamp: Timestamp) { + self.forward.push_frame(timestamp); + self.rollback.push_frame(timestamp); + } + pub fn merge_frame(&mut self, timestamp: Timestamp) { + self.forward.merge_frame(timestamp); + self.rollback.merge_frame(timestamp); + } + + pub fn len(&self) -> usize { + self.forward.inner.len() + } + pub fn forward(&self) -> &FramedStack { + &self.forward.inner + } + pub fn rollback(&self) -> &FramedStack { + &self.rollback.inner + } + + /// Returns the amount of memory taken up by the stored items + pub fn get_size(&self) -> usize { + self.forward().get_size() + self.rollback().get_size() + } + + pub fn get_history_size(&self) -> usize { + (self.forward.borrow_history(|h| h.len(), 0) + self.rollback.borrow_history(|h| h.len(), 0)) + * std::mem::size_of::< as WithHistory>::HistoryRecord>() + } +} + +impl AppDataFrameManagerWithHistory { + pub fn move_rollback_to_forward bool>(&mut self, filter: F, timestamp: Timestamp) { + for x in self.rollback.inner.current_frame().iter().rev() { + if filter(x) { + self.forward.push_to_frame(x.clone(), timestamp); + } + } + self.rollback.clear_frame(timestamp); + } +} + +impl AppDataFrameManagerWithHistory { + pub fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.forward.rollback_to_timestamp(timestamp); + self.rollback.rollback_to_timestamp(timestamp); + } +} + +#[derive(Default)] +pub struct NoopHasher(u64); + +impl Hasher for NoopHasher { + fn write_usize(&mut self, value: usize) { + self.0 = value as u64; + } + + fn write(&mut self, _bytes: &[u8]) { + unreachable!("internal hasher only handles usize type"); + } + + fn finish(&self) -> u64 { + self.0 + } +} + +#[derive(Debug, Default, Clone, PartialEq)] +pub struct MemoryWrapper { + pub memory: Vec>>, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MemoryHistoryRecord { + pub page: usize, + pub slot: usize, + pub set_value: PrimitiveValue, +} + +impl MemoryWrapper { + pub fn shrink_pages(&mut self) { + while self.memory.last().map(|h| h.is_empty()).unwrap_or(false) { + self.memory.pop(); + } + } + + pub fn ensure_page_exists(&mut self, page: usize) { + if self.memory.len() <= page { + // We don't need to record such events in history + // because all these vectors will be empty + self.memory.resize_with(page + 1, HashMap::default); + } + } + + pub fn dump_page_content_as_u256_words( + &self, + page_number: u32, + range: std::ops::Range, + ) -> Vec { + if let Some(page) = self.memory.get(page_number as usize) { + let mut result = vec![]; + for i in range { + if let Some(word) = page.get(&(i as usize)) { + result.push(*word); + } else { + result.push(PrimitiveValue::empty()); + } + } + + result + } else { + vec![PrimitiveValue::empty(); range.len()] + } + } + + const EMPTY: PrimitiveValue = PrimitiveValue::empty(); + + pub fn read_slot(&self, page: usize, slot: usize) -> &PrimitiveValue { + self.memory + .get(page) + .and_then(|page| page.get(&slot)) + .unwrap_or(&Self::EMPTY) + } +} + +impl WithHistory for MemoryWrapper { + type HistoryRecord = MemoryHistoryRecord; + type ReturnValue = PrimitiveValue; + + fn apply_historic_record( + &mut self, + item: MemoryHistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let MemoryHistoryRecord { + page, + slot, + set_value, + } = item; + + self.ensure_page_exists(page); + let page_handle = self.memory.get_mut(page).unwrap(); + let prev_value = if set_value == PrimitiveValue::empty() { + page_handle.remove(&slot) + } else { + page_handle.insert(slot, set_value) + } + .unwrap_or(PrimitiveValue::empty()); + self.shrink_pages(); + + let reserved_item = MemoryHistoryRecord { + page, + slot, + set_value: prev_value, + }; + + (reserved_item, prev_value) + } +} + +impl HistoryRecorder { + pub fn write_to_memory( + &mut self, + page: usize, + slot: usize, + value: PrimitiveValue, + timestamp: Timestamp, + ) -> PrimitiveValue { + self.apply_historic_record( + MemoryHistoryRecord { + page, + slot, + set_value: value, + }, + timestamp, + ) + } + + pub fn clear_page(&mut self, page: usize, timestamp: Timestamp) { + let slots_to_clear: Vec<_> = match self.inner.memory.get(page) { + None => return, + Some(x) => x.keys().copied().collect(), + }; + + // We manually clear the page to preserve correct history + for slot in slots_to_clear { + self.write_to_memory(page, slot, PrimitiveValue::empty(), timestamp); + } + } +} + +#[derive(Debug)] +pub struct StorageWrapper<'a> { + storage_ptr: StoragePtr<'a>, +} + +impl<'a> StorageWrapper<'a> { + pub fn new(storage_ptr: StoragePtr<'a>) -> Self { + Self { storage_ptr } + } + + pub fn get_ptr(&self) -> StoragePtr<'a> { + self.storage_ptr.clone() + } + + pub fn read_from_storage(&self, key: &StorageKey) -> U256 { + h256_to_u256(self.storage_ptr.borrow_mut().get_value(key)) + } +} + +#[derive(Debug, Clone)] +pub struct StorageHistoryRecord { + pub key: StorageKey, + pub value: U256, +} + +impl<'a> WithHistory for StorageWrapper<'a> { + type HistoryRecord = StorageHistoryRecord; + type ReturnValue = U256; + + fn apply_historic_record( + &mut self, + item: Self::HistoryRecord, + ) -> (Self::HistoryRecord, Self::ReturnValue) { + let prev_value = h256_to_u256( + self.storage_ptr + .borrow_mut() + .set_value(&item.key, u256_to_h256(item.value)), + ); + + let reverse_item = StorageHistoryRecord { + key: item.key, + value: prev_value, + }; + + (reverse_item, prev_value) + } +} + +impl<'a, H: HistoryMode> HistoryRecorder, H> { + pub fn read_from_storage(&self, key: &StorageKey) -> U256 { + self.inner.read_from_storage(key) + } + + pub fn write_to_storage(&mut self, key: StorageKey, value: U256, timestamp: Timestamp) -> U256 { + self.apply_historic_record(StorageHistoryRecord { key, value }, timestamp) + } + + /// Returns a pointer to the storage. + /// Note, that any changes done to the storage via this pointer + /// will NOT be recorded as its history. + pub fn get_ptr(&self) -> StoragePtr<'a> { + self.inner.get_ptr() + } +} diff --git a/core/multivm_deps/vm_m6/src/lib.rs b/core/multivm_deps/vm_m6/src/lib.rs new file mode 100644 index 000000000000..f88ae4a42b18 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/lib.rs @@ -0,0 +1,38 @@ +#![allow(clippy::derive_partial_eq_without_eq)] + +mod bootloader_state; +pub mod errors; +pub mod event_sink; +mod events; +pub(crate) mod glue; +mod history_recorder; +pub mod memory; +mod oracle_tools; +pub mod oracles; +mod pubdata_utils; +mod refunds; +pub mod storage; +pub mod test_utils; +pub mod transaction_data; +pub mod utils; +pub mod vm; +pub mod vm_with_bootloader; + +#[cfg(test)] +mod tests; + +pub use crate::errors::TxRevertReason; +pub use crate::history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}; +pub use crate::oracle_tools::OracleTools; +pub use crate::oracles::storage::StorageOracle; +pub use crate::vm::{VmBlockResult, VmExecutionResult, VmInstance}; +pub use zk_evm; +pub use zksync_types::vm_trace::VmExecutionTrace; + +pub type Word = zksync_types::U256; + +pub const MEMORY_SIZE: usize = 1 << 16; +pub const MAX_CALLS: usize = 65536; +pub const REGISTERS_COUNT: usize = 16; +pub const MAX_STACK_SIZE: usize = 256; +pub const MAX_CYCLES_FOR_TX: u32 = u32::MAX; diff --git a/core/multivm_deps/vm_m6/src/memory.rs b/core/multivm_deps/vm_m6/src/memory.rs new file mode 100644 index 000000000000..ecc8dc9c04c5 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/memory.rs @@ -0,0 +1,322 @@ +use zk_evm::abstractions::{Memory, MemoryType, MEMORY_CELLS_OTHER_PAGES}; +use zk_evm::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; +use zk_evm::vm_state::PrimitiveValue; +use zk_evm::zkevm_opcode_defs::FatPointer; +use zksync_types::U256; + +use crate::history_recorder::{ + FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, + MemoryWrapper, WithHistory, +}; +use crate::oracles::OracleWithHistory; +use crate::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct SimpleMemory { + pub memory: MemoryWithHistory, + pub observable_pages: IntFrameManagerWithHistory, +} + +impl OracleWithHistory for SimpleMemory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.memory.rollback_to_timestamp(timestamp); + self.observable_pages.rollback_to_timestamp(timestamp); + } +} + +impl SimpleMemory { + pub fn populate(&mut self, elements: Vec<(u32, Vec)>, timestamp: Timestamp) { + for (page, values) in elements.into_iter() { + // Resizing the pages array to fit the page. + let len = values.len(); + assert!(len <= MEMORY_CELLS_OTHER_PAGES); + + for (i, value) in values.into_iter().enumerate() { + let value = PrimitiveValue { + value, + is_pointer: false, + }; + self.memory + .write_to_memory(page as usize, i, value, timestamp); + } + } + } + + pub fn populate_page( + &mut self, + page: usize, + elements: Vec<(usize, U256)>, + timestamp: Timestamp, + ) { + elements.into_iter().for_each(|(offset, value)| { + let value = PrimitiveValue { + value, + is_pointer: false, + }; + + self.memory.write_to_memory(page, offset, value, timestamp); + }); + } + + pub fn dump_page_content_as_u256_words( + &self, + page: u32, + range: std::ops::Range, + ) -> Vec { + self.memory + .inner() + .dump_page_content_as_u256_words(page, range) + .into_iter() + .map(|v| v.value) + .collect() + } + + pub fn read_slot(&self, page: usize, slot: usize) -> &PrimitiveValue { + self.memory.inner().read_slot(page, slot) + } + + // This method should be used with relatively small lengths, since + // we don't heavily optimize here for cases with long lengths + pub fn read_unaligned_bytes(&self, page: usize, start: usize, length: usize) -> Vec { + if length == 0 { + return vec![]; + } + + let end = start + length - 1; + + let mut current_word = start / 32; + let mut result = vec![]; + while current_word * 32 <= end { + let word_value = self.read_slot(page, current_word).value; + let word_value = { + let mut bytes: Vec = vec![0u8; 32]; + word_value.to_big_endian(&mut bytes); + bytes + }; + + result.extend(extract_needed_bytes_from_word( + word_value, + current_word, + start, + end, + )); + + current_word += 1; + } + + assert_eq!(result.len(), length); + + result + } + + pub fn get_size(&self) -> usize { + // Hashmap memory overhead is neglected. + let memory_size = self + .memory + .inner() + .memory + .iter() + .map(|page| page.len() * std::mem::size_of::<(usize, PrimitiveValue)>()) + .sum::(); + let observable_pages_size = self.observable_pages.inner().get_size(); + + memory_size + observable_pages_size + } + + pub fn get_history_size(&self) -> usize { + let memory_size = self.memory.borrow_history(|h| h.len(), 0) + * std::mem::size_of::<::HistoryRecord>(); + let observable_pages_size = self.observable_pages.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + + memory_size + observable_pages_size + } + + pub fn delete_history(&mut self) { + self.memory.delete_history(); + self.observable_pages.delete_history(); + } +} + +impl Memory for SimpleMemory { + fn execute_partial_query( + &mut self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + match query.location.memory_type { + MemoryType::Stack => {} + MemoryType::Heap | MemoryType::AuxHeap => { + // The following assertion works fine even when doing a read + // from heap through pointer, since `value_is_pointer` can only be set to + // `true` during memory writes. + assert!( + !query.value_is_pointer, + "Pointers can only be stored on stack" + ); + } + MemoryType::FatPointer => { + assert!(!query.rw_flag); + assert!( + !query.value_is_pointer, + "Pointers can only be stored on stack" + ); + } + MemoryType::Code => { + unreachable!("code should be through specialized query"); + } + } + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + if query.rw_flag { + self.memory.write_to_memory( + page, + slot, + PrimitiveValue { + value: query.value, + is_pointer: query.value_is_pointer, + }, + query.timestamp, + ); + } else { + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + } + + query + } + + fn specialized_code_query( + &mut self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + assert_eq!(query.location.memory_type, MemoryType::Code); + assert!( + !query.value_is_pointer, + "Pointers are not used for decommmits" + ); + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + if query.rw_flag { + self.memory.write_to_memory( + page, + slot, + PrimitiveValue { + value: query.value, + is_pointer: query.value_is_pointer, + }, + query.timestamp, + ); + } else { + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + } + + query + } + + fn read_code_query( + &self, + _monotonic_cycle_counter: u32, + mut query: MemoryQuery, + ) -> MemoryQuery { + assert_eq!(query.location.memory_type, MemoryType::Code); + assert!( + !query.value_is_pointer, + "Pointers are not used for decommmits" + ); + assert!(!query.rw_flag, "Only read queries can be processed"); + + let page = query.location.page.0 as usize; + let slot = query.location.index.0 as usize; + + let current_value = self.read_slot(page, slot); + query.value = current_value.value; + query.value_is_pointer = current_value.is_pointer; + + query + } + + fn start_global_frame( + &mut self, + _current_base_page: MemoryPage, + new_base_page: MemoryPage, + calldata_fat_pointer: FatPointer, + timestamp: Timestamp, + ) { + // Besides the calldata page, we also formally include the current stack + // page, heap page and aux heap page. + // The code page will be always left observable, so we don't include it here. + self.observable_pages.push_frame(timestamp); + self.observable_pages.extend_frame( + vec![ + calldata_fat_pointer.memory_page, + stack_page_from_base(new_base_page).0, + heap_page_from_base(new_base_page).0, + aux_heap_page_from_base(new_base_page).0, + ], + timestamp, + ); + } + + fn finish_global_frame( + &mut self, + base_page: MemoryPage, + returndata_fat_pointer: FatPointer, + timestamp: Timestamp, + ) { + // Safe to unwrap here, since `finish_global_frame` is never called with empty stack + let current_observable_pages = self.observable_pages.inner().current_frame(); + let returndata_page = returndata_fat_pointer.memory_page; + + for &page in current_observable_pages { + // If the page's number is greater than or equal to the base_page, + // it means that it was created by the internal calls of this contract. + // We need to add this check as the calldata pointer is also part of the + // observable pages. + if page >= base_page.0 && page != returndata_page { + self.memory.clear_page(page as usize, timestamp); + } + } + + self.observable_pages.clear_frame(timestamp); + self.observable_pages.merge_frame(timestamp); + + self.observable_pages + .push_to_frame(returndata_page, timestamp); + } +} + +// It is expected that there is some intersection between [word_number*32..word_number*32+31] and [start, end] +fn extract_needed_bytes_from_word( + word_value: Vec, + word_number: usize, + start: usize, + end: usize, +) -> Vec { + let word_start = word_number * 32; + let word_end = word_start + 31; // Note, that at word_start + 32 a new word already starts + + let intersection_left = std::cmp::max(word_start, start); + let intersection_right = std::cmp::min(word_end, end); + + if intersection_right < intersection_left { + vec![] + } else { + let start_bytes = intersection_left - word_start; + let to_take = intersection_right - intersection_left + 1; + + word_value + .into_iter() + .skip(start_bytes) + .take(to_take) + .collect() + } +} diff --git a/core/multivm_deps/vm_m6/src/oracle_tools.rs b/core/multivm_deps/vm_m6/src/oracle_tools.rs new file mode 100644 index 000000000000..b1f8c4de2913 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracle_tools.rs @@ -0,0 +1,45 @@ +use crate::memory::SimpleMemory; +use std::cell::RefCell; + +use std::fmt::Debug; +use std::rc::Rc; + +use crate::event_sink::InMemoryEventSink; +use crate::history_recorder::HistoryMode; +use crate::oracles::{ + decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, +}; +use crate::storage::Storage; +use zk_evm::witness_trace::DummyTracer; + +/// zkEVM requires a bunch of objects implementing given traits to work. +/// For example: Storage, Memory, PrecompilerProcessor etc +/// (you can find all these traites in zk_evm crate -> src/abstractions/mod.rs) +/// For each of these traits, we have a local implementation (for example StorageOracle) +/// that also support additional features (like rollbacks & history). +/// The OracleTools struct, holds all these things together in one place. +#[derive(Debug)] +pub struct OracleTools<'a, const B: bool, H: HistoryMode> { + pub storage: StorageOracle<'a, H>, + pub memory: SimpleMemory, + pub event_sink: InMemoryEventSink, + pub precompiles_processor: PrecompilesProcessorWithHistory, + pub decommittment_processor: DecommitterOracle<'a, B, H>, + pub witness_tracer: DummyTracer, +} + +impl<'a, H: HistoryMode> OracleTools<'a, false, H> { + pub fn new(storage_view: &'a mut dyn Storage, _: H) -> Self { + let pointer: Rc> = Rc::new(RefCell::new(storage_view)); + + Self { + storage: StorageOracle::new(pointer.clone()), + memory: Default::default(), + event_sink: Default::default(), + precompiles_processor: Default::default(), + decommittment_processor: DecommitterOracle::new(pointer.clone()), + witness_tracer: DummyTracer {}, + } + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/decommitter.rs b/core/multivm_deps/vm_m6/src/oracles/decommitter.rs new file mode 100644 index 000000000000..8001f7df69a1 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/decommitter.rs @@ -0,0 +1,229 @@ +use std::collections::HashMap; + +use crate::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory}; +use crate::storage::StoragePtr; + +use zk_evm::abstractions::MemoryType; +use zk_evm::aux_structures::Timestamp; +use zk_evm::{ + abstractions::{DecommittmentProcessor, Memory}, + aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, +}; +use zksync_types::U256; +use zksync_utils::bytecode::bytecode_len_in_words; +use zksync_utils::{bytes_to_be_words, u256_to_h256}; + +use super::OracleWithHistory; + +/// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is +/// used by the VM to 'load' bytecodes into memory. +#[derive(Debug)] +pub struct DecommitterOracle<'a, const B: bool, H: HistoryMode> { + /// Pointer that enables to read contract bytecodes from the database. + storage: StoragePtr<'a>, + /// The cache of bytecodes that the bootloader "knows", but that are not necessarily in the database. + /// And it is also used as a database cache. + pub known_bytecodes: HistoryRecorder>, H>, + /// Stores pages of memory where certain code hashes have already been decommitted. + /// It is expected that they all are present in the DB. + // `decommitted_code_hashes` history is necessary + pub decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, + /// Stores history of decommitment requests. + decommitment_requests: HistoryRecorder, H>, +} + +impl<'a, const B: bool, H: HistoryMode> DecommitterOracle<'a, B, H> { + pub fn new(storage: StoragePtr<'a>) -> Self { + Self { + storage, + known_bytecodes: Default::default(), + decommitted_code_hashes: Default::default(), + decommitment_requests: Default::default(), + } + } + + /// Gets the bytecode for a given hash (either from storage, or from 'known_bytecodes' that were populated by `populate` method). + /// Panics if bytecode doesn't exist. + pub fn get_bytecode(&mut self, hash: U256, timestamp: Timestamp) -> Vec { + let entry = self.known_bytecodes.inner().get(&hash); + + match entry { + Some(x) => x.clone(), + None => { + // It is ok to panic here, since the decommitter is never called directly by + // the users and always called by the VM. VM will never let decommit the + // code hash which we didn't previously claim to know the preimage of. + let value = self + .storage + .borrow_mut() + .load_factory_dep(u256_to_h256(hash)) + .expect("Trying to decode unexisting hash"); + + let value = bytes_to_be_words(value); + self.known_bytecodes.insert(hash, value.clone(), timestamp); + value + } + } + } + + /// Adds additional bytecodes. They will take precendent over the bytecodes from storage. + pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { + for (hash, bytecode) in bytecodes { + self.known_bytecodes.insert(hash, bytecode, timestamp); + } + } + + pub fn get_used_bytecode_hashes(&self) -> Vec { + self.decommitted_code_hashes + .inner() + .iter() + .map(|item| *item.0) + .collect() + } + + pub fn get_decommitted_bytecodes_after_timestamp(&self, timestamp: Timestamp) -> usize { + // Note, that here we rely on the fact that for each used bytecode + // there is one and only one corresponding event in the history of it. + self.decommitted_code_hashes + .history() + .iter() + .rev() + .take_while(|(t, _)| *t >= timestamp) + .count() + } + + pub fn get_decommitted_code_hashes_with_history( + &self, + ) -> &HistoryRecorder, HistoryEnabled> { + &self.decommitted_code_hashes + } + + /// Returns the storage handle. Used only in tests. + pub fn get_storage(&self) -> StoragePtr<'a> { + self.storage.clone() + } + + /// Measures the amount of memory used by this Oracle (used for metrics only). + pub fn get_size(&self) -> usize { + // Hashmap memory overhead is neglected. + let known_bytecodes_size = self + .known_bytecodes + .inner() + .iter() + .map(|(_, value)| value.len() * std::mem::size_of::()) + .sum::(); + let decommitted_code_hashes_size = + self.decommitted_code_hashes.inner().len() * std::mem::size_of::<(U256, u32)>(); + + known_bytecodes_size + decommitted_code_hashes_size + } + + pub fn get_history_size(&self) -> usize { + let known_bytecodes_stack_size = self.known_bytecodes.borrow_history(|h| h.len(), 0) + * std::mem::size_of::<> as WithHistory>::HistoryRecord>(); + let known_bytecodes_heap_size = self.known_bytecodes.borrow_history( + |h| { + h.iter() + .map(|(_, event)| { + if let Some(bytecode) = event.value.as_ref() { + bytecode.len() * std::mem::size_of::() + } else { + 0 + } + }) + .sum::() + }, + 0, + ); + let decommitted_code_hashes_size = + self.decommitted_code_hashes.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + + known_bytecodes_stack_size + known_bytecodes_heap_size + decommitted_code_hashes_size + } + + pub fn delete_history(&mut self) { + self.decommitted_code_hashes.delete_history(); + self.known_bytecodes.delete_history(); + self.decommitment_requests.delete_history(); + } +} + +impl<'a, const B: bool> OracleWithHistory for DecommitterOracle<'a, B, HistoryEnabled> { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.decommitted_code_hashes + .rollback_to_timestamp(timestamp); + self.known_bytecodes.rollback_to_timestamp(timestamp); + self.decommitment_requests.rollback_to_timestamp(timestamp); + } +} + +impl<'a, const B: bool, H: HistoryMode> DecommittmentProcessor for DecommitterOracle<'a, B, H> { + /// Loads a given bytecode hash into memory (see trait description for more details). + fn decommit_into_memory( + &mut self, + monotonic_cycle_counter: u32, + mut partial_query: DecommittmentQuery, + memory: &mut M, + ) -> (DecommittmentQuery, Option>) { + self.decommitment_requests.push((), partial_query.timestamp); + // First - check if we didn't fetch this bytecode in the past. + // If we did - we can just return the page that we used before (as the memory is read only). + if let Some(memory_page) = self + .decommitted_code_hashes + .inner() + .get(&partial_query.hash) + .copied() + { + partial_query.is_fresh = false; + partial_query.memory_page = MemoryPage(memory_page); + partial_query.decommitted_length = + bytecode_len_in_words(&u256_to_h256(partial_query.hash)); + + (partial_query, None) + } else { + // We are fetching a fresh bytecode that we didn't read before. + let values = self.get_bytecode(partial_query.hash, partial_query.timestamp); + let page_to_use = partial_query.memory_page; + let timestamp = partial_query.timestamp; + partial_query.decommitted_length = values.len() as u16; + partial_query.is_fresh = true; + + // Create a template query, that we'll use for writing into memory. + // value & index are set to 0 - as they will be updated in the inner loop below. + let mut tmp_q = MemoryQuery { + timestamp, + location: MemoryLocation { + memory_type: MemoryType::Code, + page: page_to_use, + index: MemoryIndex(0), + }, + value: U256::zero(), + value_is_pointer: false, + rw_flag: true, + is_pended: false, + }; + self.decommitted_code_hashes + .insert(partial_query.hash, page_to_use.0, timestamp); + + // Copy the bytecode (that is stored in 'values' Vec) into the memory page. + if B { + for (i, value) in values.iter().enumerate() { + tmp_q.location.index = MemoryIndex(i as u32); + tmp_q.value = *value; + memory.specialized_code_query(monotonic_cycle_counter, tmp_q); + } + // If we're in the witness mode - we also have to return the values. + (partial_query, Some(values)) + } else { + for (i, value) in values.into_iter().enumerate() { + tmp_q.location.index = MemoryIndex(i as u32); + tmp_q.value = value; + memory.specialized_code_query(monotonic_cycle_counter, tmp_q); + } + + (partial_query, None) + } + } + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/mod.rs b/core/multivm_deps/vm_m6/src/oracles/mod.rs new file mode 100644 index 000000000000..d219216b25f8 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/mod.rs @@ -0,0 +1,18 @@ +use zk_evm::aux_structures::Timestamp; +// We will discard RAM as soon as the execution of a tx ends, so +// it is ok for now to use SimpleMemory +pub use zk_evm::reference_impls::memory::SimpleMemory as RamOracle; +// All the changes to the events in the DB will be applied after the tx is executed, +// so fow now it is fine. +pub use zk_evm::reference_impls::event_sink::InMemoryEventSink as EventSinkOracle; + +pub use zk_evm::testing::simple_tracer::NoopTracer; + +pub mod decommitter; +pub mod precompile; +pub mod storage; +pub mod tracer; + +pub trait OracleWithHistory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp); +} diff --git a/core/multivm_deps/vm_m6/src/oracles/precompile.rs b/core/multivm_deps/vm_m6/src/oracles/precompile.rs new file mode 100644 index 000000000000..c75a8899f8f5 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/precompile.rs @@ -0,0 +1,75 @@ +use zk_evm::{ + abstractions::Memory, + abstractions::PrecompileCyclesWitness, + abstractions::PrecompilesProcessor, + aux_structures::{LogQuery, MemoryQuery, Timestamp}, + precompiles::DefaultPrecompilesProcessor, +}; + +use crate::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; + +use super::OracleWithHistory; + +/// Wrap of DefaultPrecompilesProcessor that store queue +/// of timestamp when precompiles are called to be executed. +/// Number of precompiles per block is strictly limited, +/// saving timestamps allows us to check the exact number +/// of log queries, that were used during the tx execution. +#[derive(Debug, Clone)] +pub struct PrecompilesProcessorWithHistory { + pub timestamp_history: HistoryRecorder, H>, + pub default_precompiles_processor: DefaultPrecompilesProcessor, +} + +impl Default for PrecompilesProcessorWithHistory { + fn default() -> Self { + Self { + timestamp_history: Default::default(), + default_precompiles_processor: DefaultPrecompilesProcessor, + } + } +} + +impl OracleWithHistory for PrecompilesProcessorWithHistory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.timestamp_history.rollback_to_timestamp(timestamp); + } +} + +impl PrecompilesProcessorWithHistory { + pub fn get_timestamp_history(&self) -> &Vec { + self.timestamp_history.inner() + } + + pub fn delete_history(&mut self) { + self.timestamp_history.delete_history(); + } +} + +impl PrecompilesProcessor for PrecompilesProcessorWithHistory { + fn start_frame(&mut self) { + self.default_precompiles_processor.start_frame(); + } + fn execute_precompile( + &mut self, + monotonic_cycle_counter: u32, + query: LogQuery, + memory: &mut M, + ) -> Option<(Vec, Vec, PrecompileCyclesWitness)> { + // In the next line we same `query.timestamp` as both + // an operation in the history of precompiles processor and + // the time when this operation occured. + // While slightly weird, it is done for consistency with other oracles + // where operations and timestamp have different types. + self.timestamp_history + .push(query.timestamp, query.timestamp); + self.default_precompiles_processor.execute_precompile( + monotonic_cycle_counter, + query, + memory, + ) + } + fn finish_frame(&mut self, _panicked: bool) { + self.default_precompiles_processor.finish_frame(_panicked); + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/storage.rs b/core/multivm_deps/vm_m6/src/oracles/storage.rs new file mode 100644 index 000000000000..a3391025b0f1 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/storage.rs @@ -0,0 +1,306 @@ +use std::collections::HashMap; + +use crate::glue::GlueInto; +use crate::storage::StoragePtr; + +use crate::history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, +}; + +use zk_evm::abstractions::RefundedAmounts; +use zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; +use zk_evm::{ + abstractions::{RefundType, Storage as VmStorageOracle}, + aux_structures::{LogQuery, Timestamp}, +}; +use zksync_types::utils::storage_key_for_eth_balance; +use zksync_types::{ + AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, + U256, +}; +use zksync_utils::u256_to_h256; + +use super::OracleWithHistory; + +// While the storage does not support different shards, it was decided to write the +// code of the StorageOracle with the shard parameters in mind. +pub fn triplet_to_storage_key(_shard_id: u8, address: Address, key: U256) -> StorageKey { + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)) +} + +pub fn storage_key_of_log(query: &LogQuery) -> StorageKey { + triplet_to_storage_key(query.shard_id, query.address, query.key) +} + +#[derive(Debug)] +pub struct StorageOracle<'a, H: HistoryMode> { + // Access to the persistent storage. Please note that it + // is used only for read access. All the actual writes happen + // after the execution ended. + pub storage: HistoryRecorder, H>, + + pub frames_stack: AppDataFrameManagerWithHistory, + + // The changes that have been paid for in previous transactions. + // It is a mapping from storage key to the number of *bytes* that was paid by the user + // to cover this slot. + // `paid_changes` history is necessary + pub paid_changes: HistoryRecorder, HistoryEnabled>, +} + +impl OracleWithHistory for StorageOracle<'_, HistoryEnabled> { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.frames_stack.rollback_to_timestamp(timestamp); + self.storage.rollback_to_timestamp(timestamp); + self.paid_changes.rollback_to_timestamp(timestamp); + } +} + +impl<'a, H: HistoryMode> StorageOracle<'a, H> { + pub fn new(storage: StoragePtr<'a>) -> Self { + Self { + storage: HistoryRecorder::from_inner(StorageWrapper::new(storage)), + frames_stack: Default::default(), + paid_changes: Default::default(), + } + } + + pub fn delete_history(&mut self) { + self.frames_stack.delete_history(); + self.storage.delete_history(); + self.paid_changes.delete_history(); + } + + fn is_storage_key_free(&self, key: &StorageKey) -> bool { + key.address() == &zksync_config::constants::SYSTEM_CONTEXT_ADDRESS + || *key == storage_key_for_eth_balance(&BOOTLOADER_ADDRESS) + } + + pub fn read_value(&mut self, mut query: LogQuery) -> LogQuery { + let key = triplet_to_storage_key(query.shard_id, query.address, query.key); + let current_value = self.storage.read_from_storage(&key); + + query.read_value = current_value; + + self.frames_stack.push_forward( + StorageLogQuery { + log_query: query.glue_into(), + log_type: StorageLogQueryType::Read, + }, + query.timestamp, + ); + + query + } + + pub fn write_value(&mut self, mut query: LogQuery) -> LogQuery { + let key = triplet_to_storage_key(query.shard_id, query.address, query.key); + let current_value = + self.storage + .write_to_storage(key, query.written_value, query.timestamp); + + let log_query_type = if self.storage.get_ptr().borrow_mut().is_write_initial(&key) { + StorageLogQueryType::InitialWrite + } else { + StorageLogQueryType::RepeatedWrite + }; + + query.read_value = current_value; + + let mut storage_log_query = StorageLogQuery { + log_query: query.glue_into(), + log_type: log_query_type, + }; + self.frames_stack + .push_forward(storage_log_query, query.timestamp); + storage_log_query.log_query.rollback = true; + self.frames_stack + .push_rollback(storage_log_query, query.timestamp); + storage_log_query.log_query.rollback = false; + + query + } + + // Returns the amount of funds that has been already paid for writes into the storage slot + fn prepaid_for_write(&self, storage_key: &StorageKey) -> u32 { + self.paid_changes + .inner() + .get(storage_key) + .copied() + .unwrap_or_default() + } + + pub(crate) fn base_price_for_write(&self, query: &LogQuery) -> u32 { + let storage_key = storage_key_of_log(query); + + if self.is_storage_key_free(&storage_key) { + return 0; + } + + let is_initial = self + .storage + .get_ptr() + .borrow_mut() + .is_write_initial(&storage_key); + + get_pubdata_price_bytes(query, is_initial) + } + + // Returns the price of the update in terms of pubdata bytes. + fn value_update_price(&self, query: &LogQuery) -> u32 { + let storage_key = storage_key_of_log(query); + + let base_cost = self.base_price_for_write(query); + + let already_paid = self.prepaid_for_write(&storage_key); + + if base_cost <= already_paid { + // Some other transaction has already paid for this slot, no need to pay anything + 0u32 + } else { + base_cost - already_paid + } + } + + pub fn get_size(&self) -> usize { + let frames_stack_size = self.frames_stack.get_size(); + let paid_changes_size = + self.paid_changes.inner().len() * std::mem::size_of::<(StorageKey, u32)>(); + + frames_stack_size + paid_changes_size + } + + pub fn get_history_size(&self) -> usize { + let storage_size = self.storage.borrow_history(|h| h.len(), 0) + * std::mem::size_of::<::HistoryRecord>(); + let frames_stack_size = self.frames_stack.get_history_size(); + let paid_changes_size = self.paid_changes.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + storage_size + frames_stack_size + paid_changes_size + } +} + +impl VmStorageOracle for StorageOracle<'_, H> { + // Perform a storage read/write access by taking an partially filled query + // and returning filled query and cold/warm marker for pricing purposes + fn execute_partial_query( + &mut self, + _monotonic_cycle_counter: u32, + query: LogQuery, + ) -> LogQuery { + // vlog::trace!( + // "execute partial query cyc {:?} addr {:?} key {:?}, rw {:?}, wr {:?}, tx {:?}", + // _monotonic_cycle_counter, + // query.address, + // query.key, + // query.rw_flag, + // query.written_value, + // query.tx_number_in_block + // ); + assert!(!query.rollback); + if query.rw_flag { + // The number of bytes that have been compensated by the user to perform this write + let storage_key = storage_key_of_log(&query); + + // It is considered that the user has paid for the whole base price for the writes + let to_pay_by_user = self.base_price_for_write(&query); + let prepaid = self.prepaid_for_write(&storage_key); + + if to_pay_by_user > prepaid { + self.paid_changes.apply_historic_record( + HashMapHistoryEvent { + key: storage_key, + value: Some(to_pay_by_user), + }, + query.timestamp, + ); + } + self.write_value(query) + } else { + self.read_value(query) + } + } + + // We can return the size of the refund before each storage query. + // Note, that while the `RefundType` allows to provide refunds both in + // `ergs` and `pubdata`, only refunds in pubdata will be compensated for the users + fn estimate_refunds_for_write( + &mut self, // to avoid any hacks inside, like prefetch + _monotonic_cycle_counter: u32, + partial_query: &LogQuery, + ) -> RefundType { + let price_to_pay = self.value_update_price(partial_query); + + RefundType::RepeatedWrite(RefundedAmounts { + ergs: 0, + // `INITIAL_STORAGE_WRITE_PUBDATA_BYTES` is the default amount of pubdata bytes the user pays for. + pubdata_bytes: (INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32) - price_to_pay, + }) + } + + // Indicate a start of execution frame for rollback purposes + fn start_frame(&mut self, timestamp: Timestamp) { + self.frames_stack.push_frame(timestamp); + } + + // Indicate that execution frame went out from the scope, so we can + // log the history and either rollback immediately or keep records to rollback later + fn finish_frame(&mut self, timestamp: Timestamp, panicked: bool) { + // If we panic then we append forward and rollbacks to the forward of parent, + // otherwise we place rollbacks of child before rollbacks of the parent + if panicked { + // perform actual rollback + for query in self.frames_stack.rollback().current_frame().iter().rev() { + let read_value = match query.log_type { + StorageLogQueryType::Read => { + // Having Read logs in rollback is not possible + vlog::warn!("Read log in rollback queue {:?}", query); + continue; + } + StorageLogQueryType::InitialWrite | StorageLogQueryType::RepeatedWrite => { + query.log_query.read_value + } + }; + + let LogQuery { written_value, .. } = query.log_query.glue_into(); + let key = triplet_to_storage_key( + query.log_query.shard_id, + query.log_query.address, + query.log_query.key, + ); + let current_value = self.storage.write_to_storage( + key, + // NOTE, that since it is a rollback query, + // the `read_value` is being set + read_value, timestamp, + ); + + // Additional validation that the current value was correct + // Unwrap is safe because the return value from write_inner is the previous value in this leaf. + // It is impossible to set leaf value to `None` + assert_eq!(current_value, written_value); + } + + self.frames_stack + .move_rollback_to_forward(|_| true, timestamp); + } + self.frames_stack.merge_frame(timestamp); + } +} + +/// Returns the number of bytes needed to publish a slot. +// Since we need to publish the state diffs onchain, for each of the updated storage slot +// we basically need to publish the following pair: (). +// While new_value is always 32 bytes long, for key we use the following optimization: +// - The first time we publish it, we use 32 bytes. +// Then, we remember a 8-byte id for this slot and assign it to it. We call this initial write. +// - The second time we publish it, we will use this 8-byte instead of the 32 bytes of the entire key. +// So the total size of the publish pubdata is 40 bytes. We call this kind of write the repeated one +fn get_pubdata_price_bytes(_query: &LogQuery, is_initial: bool) -> u32 { + if is_initial { + zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32 + } else { + zk_evm::zkevm_opcode_defs::system_params::REPEATED_STORAGE_WRITE_PUBDATA_BYTES as u32 + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/tracer/bootloader.rs b/core/multivm_deps/vm_m6/src/oracles/tracer/bootloader.rs new file mode 100644 index 000000000000..c2a02a5690bb --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/tracer/bootloader.rs @@ -0,0 +1,110 @@ +use std::marker::PhantomData; + +use crate::history_recorder::HistoryMode; +use crate::memory::SimpleMemory; +use crate::oracles::tracer::{ + utils::gas_spent_on_bytecodes_and_long_messages_this_opcode, ExecutionEndTracer, + PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, +}; + +use zk_evm::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + vm_state::{ErrorFlags, VmLocalState}, + witness_trace::DummyTracer, + zkevm_opcode_defs::{Opcode, RetOpcode}, +}; + +/// Tells the VM to end the execution before `ret` from the booloader if there is no panic or revert. +/// Also, saves the information if this `ret` was caused by "out of gas" panic. +#[derive(Debug, Clone, Default)] +pub struct BootloaderTracer { + is_bootloader_out_of_gas: bool, + ret_from_the_bootloader: Option, + gas_spent_on_bytecodes_and_long_messages: u32, + _marker: PhantomData, +} + +impl Tracer for BootloaderTracer { + const CALL_AFTER_DECODING: bool = true; + const CALL_BEFORE_EXECUTION: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + state: VmLocalStateData<'_>, + data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + // We should check not only for the `NOT_ENOUGH_ERGS` flag but if the current frame is bootloader too. + if Self::current_frame_is_bootloader(state.vm_local_state) + && data + .error_flags_accumulated + .contains(ErrorFlags::NOT_ENOUGH_ERGS) + { + self.is_bootloader_out_of_gas = true; + } + } + + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + _memory: &Self::SupportedMemory, + ) { + self.gas_spent_on_bytecodes_and_long_messages += + gas_spent_on_bytecodes_and_long_messages_this_opcode(&state, &data); + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + // Decodes next opcode. + // `self` is passed as `tracer`, so `self.after_decoding` will be called and it will catch "out of gas". + let (next_opcode, _, _) = + zk_evm::vm_state::read_and_decode(state.vm_local_state, memory, &mut DummyTracer, self); + if Self::current_frame_is_bootloader(state.vm_local_state) { + if let Opcode::Ret(ret) = next_opcode.inner.variant.opcode { + self.ret_from_the_bootloader = Some(ret); + } + } + } +} + +impl ExecutionEndTracer for BootloaderTracer { + fn should_stop_execution(&self) -> bool { + self.ret_from_the_bootloader == Some(RetOpcode::Ok) + } +} + +impl PendingRefundTracer for BootloaderTracer {} +impl StorageInvocationTracer for BootloaderTracer {} + +impl PubdataSpentTracer for BootloaderTracer { + fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter + } +} + +impl BootloaderTracer { + fn current_frame_is_bootloader(local_state: &VmLocalState) -> bool { + // The current frame is bootloader if the callstack depth is 1. + // Some of the near calls inside the bootloader can be out of gas, which is totally normal behavior + // and it shouldn't result in `is_bootloader_out_of_gas` becoming true. + local_state.callstack.inner.len() == 1 + } + + pub fn is_bootloader_out_of_gas(&self) -> bool { + self.is_bootloader_out_of_gas + } + + pub fn bootloader_panicked(&self) -> bool { + self.ret_from_the_bootloader == Some(RetOpcode::Panic) + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/tracer/call.rs b/core/multivm_deps/vm_m6/src/oracles/tracer/call.rs new file mode 100644 index 000000000000..fc345a64e482 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/tracer/call.rs @@ -0,0 +1,316 @@ +use crate::errors::VmRevertReason; +use crate::glue::GlueInto; +use crate::history_recorder::HistoryMode; +use crate::memory::SimpleMemory; +use std::convert::TryFrom; +use std::marker::PhantomData; +use std::mem; +use zk_evm::abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, +}; +use zk_evm::zkevm_opcode_defs::{ + FarCallABI, FarCallOpcode, Opcode, RetOpcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, +}; +use zkevm_assembly::zkevm_opcode_defs::FatPointer; +use zksync_config::constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_types::vm_trace::{Call, CallType}; +use zksync_types::U256; + +/// NOTE Auto implementing clone for this tracer can cause stack overflow. +/// This is because of the stack field which is a Vec with nested vecs inside. +/// If you will need to implement clone for this tracer, please consider to not copy the stack field. +/// Method `extract_calls` will extract the necessary stack for you. +#[derive(Debug, Default)] +pub struct CallTracer { + stack: Vec, + _phantom: PhantomData, +} + +impl CallTracer { + pub fn new() -> Self { + Self { + stack: vec![], + _phantom: PhantomData, + } + } +} + +impl Tracer for CallTracer { + const CALL_AFTER_EXECUTION: bool = true; + + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + + fn before_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: BeforeExecutionData, + _memory: &Self::SupportedMemory, + ) { + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + let call_type = match data.opcode.variant.opcode { + Opcode::NearCall(_) => CallType::NearCall, + Opcode::FarCall(far_call) => CallType::Call(far_call.glue_into()), + Opcode::Ret(ret_code) => { + self.handle_ret_op_code(state, data, memory, ret_code); + return; + } + _ => { + return; + } + }; + + let mut current_call = Call { + r#type: call_type, + gas: 0, + ..Default::default() + }; + match call_type { + CallType::Call(_) | CallType::Create => { + self.handle_far_call_op_code(state, data, memory, &mut current_call) + } + CallType::NearCall => { + self.handle_near_call_op_code(state, data, memory, &mut current_call); + } + } + self.stack.push(current_call); + } +} + +impl CallTracer { + /// We use parent gas for propery calculation of gas used in the trace. + /// This method updates parent gas for the current call. + fn update_parent_gas(&mut self, state: &VmLocalStateData<'_>, current_call: &mut Call) { + let current = state.vm_local_state.callstack.current; + let parent_gas = state + .vm_local_state + .callstack + .inner + .last() + .map(|call| call.ergs_remaining + current.ergs_remaining) + .unwrap_or(current.ergs_remaining); + current_call.parent_gas = parent_gas; + } + + fn handle_near_call_op_code( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + _memory: &SimpleMemory, + current_call: &mut Call, + ) { + self.update_parent_gas(&state, current_call); + } + + fn handle_far_call_op_code( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &SimpleMemory, + current_call: &mut Call, + ) { + self.update_parent_gas(&state, current_call); + let current = state.vm_local_state.callstack.current; + // All calls from the actual users are mimic calls, + // so we need to check that the previous call was to the deployer. + // Actually it's a call of the constructor. + // And at this stage caller is user and callee is deployed contract. + let call_type = if let CallType::Call(far_call) = current_call.r#type { + if matches!(far_call.glue_into(), FarCallOpcode::Mimic) { + let previous_caller = state + .vm_local_state + .callstack + .inner + .last() + .map(|call| call.this_address) + // Actually it's safe to just unwrap here, because we have at least one call in the stack + // But i want to be sure that we will not have any problems in the future + .unwrap_or(current.this_address); + if previous_caller == CONTRACT_DEPLOYER_ADDRESS { + CallType::Create + } else { + CallType::Call(far_call) + } + } else { + CallType::Call(far_call) + } + } else { + unreachable!() + }; + let calldata = if current.code_page.0 == 0 || current.ergs_remaining == 0 { + vec![] + } else { + let packed_abi = + state.vm_local_state.registers[CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER as usize]; + assert!(packed_abi.is_pointer); + let far_call_abi = FarCallABI::from_u256(packed_abi.value); + memory.read_unaligned_bytes( + far_call_abi.memory_quasi_fat_pointer.memory_page as usize, + far_call_abi.memory_quasi_fat_pointer.start as usize, + far_call_abi.memory_quasi_fat_pointer.length as usize, + ) + }; + + current_call.input = calldata; + current_call.r#type = call_type; + current_call.from = current.msg_sender; + current_call.to = current.this_address; + current_call.value = U256::from(current.context_u128_value); + current_call.gas = current.ergs_remaining; + } + + fn save_output( + &mut self, + state: VmLocalStateData<'_>, + memory: &SimpleMemory, + ret_opcode: RetOpcode, + current_call: &mut Call, + ) { + let fat_data_pointer = + state.vm_local_state.registers[RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER as usize]; + + // if fat_data_pointer is not a pointer then there is no output + let output = if fat_data_pointer.is_pointer { + let fat_data_pointer = FatPointer::from_u256(fat_data_pointer.value); + if !fat_data_pointer.is_trivial() { + Some(memory.read_unaligned_bytes( + fat_data_pointer.memory_page as usize, + fat_data_pointer.start as usize, + fat_data_pointer.length as usize, + )) + } else { + None + } + } else { + None + }; + + match ret_opcode { + RetOpcode::Ok => { + current_call.output = output.unwrap_or_default(); + } + RetOpcode::Revert => { + if let Some(output) = output { + match VmRevertReason::try_from(output.as_slice()) { + Ok(rev) => { + current_call.revert_reason = Some(rev.to_string()); + } + Err(_) => { + current_call.revert_reason = Some(format!("{:?}", hex::encode(output))); + } + } + } else { + current_call.revert_reason = Some("Unknown revert reason".to_string()); + } + } + RetOpcode::Panic => { + current_call.error = Some("Panic".to_string()); + } + } + } + + fn handle_ret_op_code( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &SimpleMemory, + ret_opcode: RetOpcode, + ) { + // It's safe to unwrap here because we are sure that we have at least one call in the stack + let mut current_call = self.stack.pop().unwrap(); + current_call.gas_used = + current_call.parent_gas - state.vm_local_state.callstack.current.ergs_remaining; + + if current_call.r#type != CallType::NearCall { + self.save_output(state, memory, ret_opcode, &mut current_call); + } + + // If there is a parent call, push the current call to it + // Otherwise, push the current call to the stack, because it's the top level call + if let Some(parent_call) = self.stack.last_mut() { + parent_call.calls.push(current_call); + } else { + self.stack.push(current_call); + } + } + + // Filter all near calls from the call stack + // Important that the very first call is near call + // And this NearCall includes several Normal or Mimic calls + // So we return all childrens of this NearCall + pub fn extract_calls(&mut self) -> Vec { + if let Some(current_call) = self.stack.pop() { + filter_near_call(current_call) + } else { + vec![] + } + } +} + +// Filter all near calls from the call stack +// Normally wr are not interested in NearCall, because it's just a wrapper for internal calls +fn filter_near_call(mut call: Call) -> Vec { + let mut calls = vec![]; + let original_calls = std::mem::take(&mut call.calls); + for call in original_calls { + calls.append(&mut filter_near_call(call)); + } + call.calls = calls; + + if call.r#type == CallType::NearCall { + mem::take(&mut call.calls) + } else { + vec![call] + } +} + +#[cfg(test)] +mod tests { + use crate::{ + glue::GlueInto, + oracles::tracer::call::{filter_near_call, Call, CallType}, + }; + use zk_evm::zkevm_opcode_defs::FarCallOpcode; + + #[test] + fn test_filter_near_calls() { + let mut call = Call::default(); + let filtered_call = filter_near_call(call.clone()); + assert_eq!(filtered_call.len(), 1); + + let mut near_call = call.clone(); + near_call.r#type = CallType::NearCall; + let filtered_call = filter_near_call(near_call.clone()); + assert_eq!(filtered_call.len(), 0); + + call.r#type = CallType::Call(FarCallOpcode::Mimic.glue_into()); + call.calls = vec![Call::default(), Call::default(), near_call.clone()]; + let filtered_call = filter_near_call(call.clone()); + assert_eq!(filtered_call.len(), 1); + assert_eq!(filtered_call[0].calls.len(), 2); + + let mut near_call = near_call; + near_call.calls = vec![Call::default(), Call::default(), near_call.clone()]; + call.calls = vec![Call::default(), Call::default(), near_call]; + let filtered_call = filter_near_call(call); + assert_eq!(filtered_call.len(), 1); + assert_eq!(filtered_call[0].calls.len(), 4); + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/tracer/mod.rs b/core/multivm_deps/vm_m6/src/oracles/tracer/mod.rs new file mode 100644 index 000000000000..f4a3dcda1b5f --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/tracer/mod.rs @@ -0,0 +1,52 @@ +use zk_evm::abstractions::Tracer; +use zk_evm::vm_state::VmLocalState; + +mod bootloader; +mod call; +mod one_tx; +mod transaction_result; +mod utils; +mod validation; + +pub use bootloader::BootloaderTracer; +pub use call::CallTracer; +pub use one_tx::OneTxTracer; +pub use validation::{ValidationError, ValidationTracer, ValidationTracerParams}; + +pub(crate) use transaction_result::TransactionResultTracer; + +use crate::history_recorder::HistoryMode; +use crate::memory::SimpleMemory; + +pub trait ExecutionEndTracer: Tracer> { + // Returns whether the vm execution should stop. + fn should_stop_execution(&self) -> bool; +} + +pub trait PendingRefundTracer: Tracer> { + /// Some(x) means that the bootloader has asked the operator to provide the refund for the + /// transaction, where `x` is the refund that the bootloader has suggested on its own. + fn requested_refund(&self) -> Option { + None + } + + /// Set the current request for refund as fulfilled + fn set_refund_as_done(&mut self) {} +} + +pub trait PubdataSpentTracer: Tracer> { + /// Returns how much gas was spent on pubdata. + fn gas_spent_on_pubdata(&self, _vm_local_state: &VmLocalState) -> u32 { + 0 + } +} + +pub trait StorageInvocationTracer: + Tracer> +{ + /// Set how many invocation of the storage oracle were missed. + fn set_missed_storage_invocations(&mut self, _missed_storage_invocation: usize) {} + fn is_limit_reached(&self) -> bool { + false + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/tracer/one_tx.rs b/core/multivm_deps/vm_m6/src/oracles/tracer/one_tx.rs new file mode 100644 index 000000000000..b2b04b13181c --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/tracer/one_tx.rs @@ -0,0 +1,166 @@ +use super::utils::{computational_gas_price, print_debug_if_needed}; +use crate::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, VmHook}, + BootloaderTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + }, + vm::get_vm_hook_params, +}; + +use crate::oracles::tracer::{CallTracer, StorageInvocationTracer}; +use zk_evm::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + vm_state::VmLocalState, +}; +use zksync_types::vm_trace::Call; + +/// Allows any opcodes, but tells the VM to end the execution once the tx is over. +// Internally depeds on Bootloader's VMHooks to get the notification once the transaction is finished. +#[derive(Debug)] +pub struct OneTxTracer { + tx_has_been_processed: bool, + + // Some(x) means that the bootloader has asked the operator + // to provide the refund the user, where `x` is the refund proposed + // by the bootloader itself. + pending_operator_refund: Option, + + pub refund_gas: u32, + pub gas_spent_on_bytecodes_and_long_messages: u32, + + // Amount of gas used during account validation. + computational_gas_used: u32, + // Maximum number of gas that we're allowed to use during account validation. + computational_gas_limit: u32, + in_account_validation: bool, + + bootloader_tracer: BootloaderTracer, + call_tracer: Option>, +} + +impl Tracer for OneTxTracer { + const CALL_BEFORE_EXECUTION: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + if self.in_account_validation { + self.computational_gas_used = self + .computational_gas_used + .saturating_add(computational_gas_price(state, &data)); + } + + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + match hook { + VmHook::TxHasEnded => self.tx_has_been_processed = true, + VmHook::NotifyAboutRefund => self.refund_gas = get_vm_hook_params(memory)[0].as_u32(), + VmHook::AskOperatorForRefund => { + self.pending_operator_refund = Some(get_vm_hook_params(memory)[0].as_u32()) + } + VmHook::NoValidationEntered => self.in_account_validation = false, + VmHook::AccountValidationEntered => self.in_account_validation = true, + _ => {} + } + + self.gas_spent_on_bytecodes_and_long_messages += + gas_spent_on_bytecodes_and_long_messages_this_opcode(&state, &data); + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + self.bootloader_tracer.after_execution(state, data, memory); + if let Some(call_tracer) = self.call_tracer.as_mut() { + call_tracer.after_execution(state, data, memory); + } + } +} + +impl ExecutionEndTracer for OneTxTracer { + fn should_stop_execution(&self) -> bool { + self.tx_has_been_processed + || self.bootloader_tracer.should_stop_execution() + || self.validation_run_out_of_gas() + } +} + +impl PendingRefundTracer for OneTxTracer { + fn requested_refund(&self) -> Option { + self.pending_operator_refund + } + + fn set_refund_as_done(&mut self) { + self.pending_operator_refund = None; + } +} + +impl PubdataSpentTracer for OneTxTracer { + fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter + } +} + +impl StorageInvocationTracer for OneTxTracer {} + +impl OneTxTracer { + pub fn new(computational_gas_limit: u32, with_call_tracer: bool) -> Self { + let call_tracer = if with_call_tracer { + Some(CallTracer::new()) + } else { + None + }; + Self { + tx_has_been_processed: false, + pending_operator_refund: None, + refund_gas: 0, + gas_spent_on_bytecodes_and_long_messages: 0, + computational_gas_used: 0, + computational_gas_limit, + in_account_validation: false, + bootloader_tracer: BootloaderTracer::default(), + call_tracer, + } + } + + pub fn is_bootloader_out_of_gas(&self) -> bool { + self.bootloader_tracer.is_bootloader_out_of_gas() + } + + pub fn tx_has_been_processed(&self) -> bool { + self.tx_has_been_processed + } + + pub fn validation_run_out_of_gas(&self) -> bool { + self.computational_gas_used > self.computational_gas_limit + } + + pub fn call_traces(&mut self) -> Vec { + self.call_tracer + .as_mut() + .map_or(vec![], |call_tracer| call_tracer.extract_calls()) + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/tracer/transaction_result.rs b/core/multivm_deps/vm_m6/src/oracles/tracer/transaction_result.rs new file mode 100644 index 000000000000..0adbf36a9bed --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/tracer/transaction_result.rs @@ -0,0 +1,129 @@ +use zk_evm::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + vm_state::VmLocalState, + zkevm_opcode_defs::FatPointer, +}; +use zksync_types::{vm_trace, U256}; + +use crate::memory::SimpleMemory; +use crate::oracles::tracer::{ + CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, +}; +use crate::vm::get_vm_hook_params; +use crate::{ + history_recorder::HistoryMode, + oracles::tracer::utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, read_pointer, + VmHook, + }, +}; + +#[derive(Debug)] +pub(crate) struct TransactionResultTracer { + pub(crate) revert_reason: Option>, + gas_spent_on_bytecodes_and_long_messages: u32, + pub(crate) call_tracer: Option>, + missed_storage_invocation_limit: usize, + missed_storage_invocation: usize, +} + +impl TransactionResultTracer { + pub(crate) fn new(missed_storage_invocation_limit: usize, with_call_tracer: bool) -> Self { + let call_tracer = if with_call_tracer { + Some(CallTracer::new()) + } else { + None + }; + Self { + missed_storage_invocation_limit, + revert_reason: None, + gas_spent_on_bytecodes_and_long_messages: 0, + missed_storage_invocation: 0, + call_tracer, + } + } + pub fn call_trace(&mut self) -> Option> { + self.call_tracer + .as_mut() + .map(|call_tracer| call_tracer.extract_calls()) + } +} + +impl Tracer for TransactionResultTracer { + const CALL_BEFORE_EXECUTION: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + if matches!(hook, VmHook::ExecutionResult) { + let vm_hook_params = get_vm_hook_params(memory); + + let success = vm_hook_params[0]; + let returndata_ptr = FatPointer::from_u256(vm_hook_params[1]); + let returndata = read_pointer(memory, returndata_ptr); + + if success == U256::zero() { + self.revert_reason = Some(returndata); + } else { + self.revert_reason = None; + } + } + + self.gas_spent_on_bytecodes_and_long_messages += + gas_spent_on_bytecodes_and_long_messages_this_opcode(&state, &data); + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + if let Some(call_tracer) = self.call_tracer.as_mut() { + call_tracer.after_execution(state, data, memory); + } + } +} + +impl ExecutionEndTracer for TransactionResultTracer { + // If we reach the limit of memory invocations, we stop the execution and return the error to user + fn should_stop_execution(&self) -> bool { + self.is_limit_reached() + } +} + +impl PendingRefundTracer for TransactionResultTracer {} + +impl PubdataSpentTracer for TransactionResultTracer { + fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter + } +} + +impl StorageInvocationTracer for TransactionResultTracer { + fn set_missed_storage_invocations(&mut self, missed_storage_invocation: usize) { + self.missed_storage_invocation = missed_storage_invocation; + } + fn is_limit_reached(&self) -> bool { + self.missed_storage_invocation > self.missed_storage_invocation_limit + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/tracer/utils.rs b/core/multivm_deps/vm_m6/src/oracles/tracer/utils.rs new file mode 100644 index 000000000000..c500a213cced --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/tracer/utils.rs @@ -0,0 +1,205 @@ +use crate::history_recorder::HistoryMode; +use crate::memory::SimpleMemory; +use crate::utils::{aux_heap_page_from_base, heap_page_from_base}; +use crate::vm::{get_vm_hook_params, VM_HOOK_POSITION}; +use crate::vm_with_bootloader::BOOTLOADER_HEAP_PAGE; + +use zk_evm::aux_structures::MemoryPage; +use zk_evm::zkevm_opcode_defs::{FarCallABI, FarCallForwardPageType}; +use zk_evm::{ + abstractions::{BeforeExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{FatPointer, LogOpcode, Opcode, UMAOpcode}, +}; +use zksync_config::constants::{ + ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, + L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, +}; +use zksync_types::U256; +use zksync_utils::u256_to_h256; + +#[derive(Clone, Debug, Copy)] +pub(crate) enum VmHook { + AccountValidationEntered, + PaymasterValidationEntered, + NoValidationEntered, + ValidationStepEndeded, + TxHasEnded, + DebugLog, + DebugReturnData, + NoHook, + NearCallCatch, + AskOperatorForRefund, + NotifyAboutRefund, + ExecutionResult, +} + +impl VmHook { + pub fn from_opcode_memory(state: &VmLocalStateData<'_>, data: &BeforeExecutionData) -> Self { + let opcode_variant = data.opcode.variant; + let heap_page = + heap_page_from_base(state.vm_local_state.callstack.current.base_memory_page).0; + + let src0_value = data.src0_value.value; + + let fat_ptr = FatPointer::from_u256(src0_value); + + let value = data.src1_value.value; + + // Only UMA opcodes in the bootloader serve for vm hooks + if !matches!(opcode_variant.opcode, Opcode::UMA(UMAOpcode::HeapWrite)) + || heap_page != BOOTLOADER_HEAP_PAGE + || fat_ptr.offset != VM_HOOK_POSITION * 32 + { + return Self::NoHook; + } + + match value.as_u32() { + 0 => Self::AccountValidationEntered, + 1 => Self::PaymasterValidationEntered, + 2 => Self::NoValidationEntered, + 3 => Self::ValidationStepEndeded, + 4 => Self::TxHasEnded, + 5 => Self::DebugLog, + 6 => Self::DebugReturnData, + 7 => Self::NearCallCatch, + 8 => Self::AskOperatorForRefund, + 9 => Self::NotifyAboutRefund, + 10 => Self::ExecutionResult, + _ => panic!("Unkown hook"), + } + } +} + +pub(crate) fn get_debug_log( + state: &VmLocalStateData<'_>, + memory: &SimpleMemory, +) -> String { + let vm_hook_params: Vec<_> = get_vm_hook_params(memory) + .into_iter() + .map(u256_to_h256) + .collect(); + let msg = vm_hook_params[0].as_bytes().to_vec(); + let data = vm_hook_params[1].as_bytes().to_vec(); + + let msg = String::from_utf8(msg).expect("Invalid debug message"); + let data = U256::from_big_endian(&data); + + // For long data, it is better to use hex-encoding for greater readibility + let data_str = if data > U256::from(u64::max_value()) { + let mut bytes = [0u8; 32]; + data.to_big_endian(&mut bytes); + format!("0x{}", hex::encode(bytes)) + } else { + data.to_string() + }; + + let tx_id = state.vm_local_state.tx_number_in_block; + + format!("Bootloader transaction {}: {} {}", tx_id, msg, data_str) +} + +/// Reads the memory slice represented by the fat pointer. +/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +pub(crate) fn read_pointer( + memory: &SimpleMemory, + pointer: FatPointer, +) -> Vec { + let FatPointer { + offset, + length, + start, + memory_page, + } = pointer; + + // The actual bounds of the returndata ptr is [start+offset..start+length] + let mem_region_start = start + offset; + let mem_region_length = length - offset; + + memory.read_unaligned_bytes( + memory_page as usize, + mem_region_start as usize, + mem_region_length as usize, + ) +} + +/// Outputs the returndata for the latest call. +/// This is usually used to output the revert reason. +pub(crate) fn get_debug_returndata(memory: &SimpleMemory) -> String { + let vm_hook_params: Vec<_> = get_vm_hook_params(memory); + let returndata_ptr = FatPointer::from_u256(vm_hook_params[0]); + let returndata = read_pointer(memory, returndata_ptr); + + format!("0x{}", hex::encode(returndata)) +} + +/// Accepts a vm hook and, if it requires to output some debug log, outputs it. +pub(crate) fn print_debug_if_needed( + hook: &VmHook, + state: &VmLocalStateData<'_>, + memory: &SimpleMemory, +) { + let log = match hook { + VmHook::DebugLog => get_debug_log(state, memory), + VmHook::DebugReturnData => get_debug_returndata(memory), + _ => return, + }; + + vlog::trace!("{}", log); +} + +pub(crate) fn computational_gas_price( + state: VmLocalStateData<'_>, + data: &BeforeExecutionData, +) -> u32 { + // We calculate computational gas used as a raw price for opcode plus cost for precompiles. + // This calculation is incomplete as it misses decommitment and memory growth costs. + // To calculate decommitment cost we need an access to decommitter oracle which is missing in tracer now. + // Memory growth calculation is complex and it will require different logic for different opcodes (`FarCall`, `Ret`, `UMA`). + let base_price = data.opcode.inner.variant.ergs_price(); + let precompile_price = match data.opcode.variant.opcode { + Opcode::Log(LogOpcode::PrecompileCall) => { + let address = state.vm_local_state.callstack.current.this_address; + + if address == KECCAK256_PRECOMPILE_ADDRESS + || address == SHA256_PRECOMPILE_ADDRESS + || address == ECRECOVER_PRECOMPILE_ADDRESS + { + data.src1_value.value.low_u32() + } else { + 0 + } + } + _ => 0, + }; + base_price + precompile_price +} + +pub(crate) fn gas_spent_on_bytecodes_and_long_messages_this_opcode( + state: &VmLocalStateData<'_>, + data: &BeforeExecutionData, +) -> u32 { + if data.opcode.variant.opcode == Opcode::Log(LogOpcode::PrecompileCall) { + let current_stack = state.vm_local_state.callstack.get_current_stack(); + // Trace for precompile calls from `KNOWN_CODES_STORAGE_ADDRESS` and `L1_MESSENGER_ADDRESS` that burn some gas. + // Note, that if there is less gas left than requested to burn it will be burnt anyway. + if current_stack.this_address == KNOWN_CODES_STORAGE_ADDRESS + || current_stack.this_address == L1_MESSENGER_ADDRESS + { + std::cmp::min(data.src1_value.value.as_u32(), current_stack.ergs_remaining) + } else { + 0 + } + } else { + 0 + } +} + +pub(crate) fn get_calldata_page_via_abi(far_call_abi: &FarCallABI, base_page: MemoryPage) -> u32 { + match far_call_abi.forwarding_mode { + FarCallForwardPageType::ForwardFatPointer => { + far_call_abi.memory_quasi_fat_pointer.memory_page + } + FarCallForwardPageType::UseAuxHeap => aux_heap_page_from_base(base_page).0, + FarCallForwardPageType::UseHeap => heap_page_from_base(base_page).0, + } +} diff --git a/core/multivm_deps/vm_m6/src/oracles/tracer/validation.rs b/core/multivm_deps/vm_m6/src/oracles/tracer/validation.rs new file mode 100644 index 000000000000..0c338b20fb95 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/oracles/tracer/validation.rs @@ -0,0 +1,482 @@ +use std::fmt; +use std::fmt::Display; +use std::{collections::HashSet, marker::PhantomData}; + +use crate::{ + errors::VmRevertReasonParsingResult, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{computational_gas_price, print_debug_if_needed, VmHook}, + ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + }, +}; + +use zk_evm::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, +}; + +use crate::oracles::tracer::{utils::get_calldata_page_via_abi, StorageInvocationTracer}; +use crate::storage::StoragePtr; +use zksync_config::constants::{ + ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, + KECCAK256_PRECOMPILE_ADDRESS, L2_ETH_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, + SYSTEM_CONTEXT_ADDRESS, +}; +use zksync_types::{ + get_code_key, web3::signing::keccak256, AccountTreeId, Address, StorageKey, H256, U256, +}; +use zksync_utils::{ + be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, +}; + +#[derive(Debug, Clone, Eq, PartialEq, Copy)] +#[allow(clippy::enum_variant_names)] +pub enum ValidationTracerMode { + // Should be activated when the transaction is being validated by user. + UserTxValidation, + // Should be activated when the transaction is being validated by the paymaster. + PaymasterTxValidation, + // Is a state when there are no restrictions on the execution. + NoValidation, +} + +#[derive(Debug, Clone)] +pub enum ViolatedValidationRule { + TouchedUnallowedStorageSlots(Address, U256), + CalledContractWithNoCode(Address), + TouchedUnallowedContext, + TookTooManyComputationalGas(u32), +} + +pub enum ValidationError { + FailedTx(VmRevertReasonParsingResult), + VioalatedRule(ViolatedValidationRule), +} + +impl Display for ViolatedValidationRule { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ViolatedValidationRule::TouchedUnallowedStorageSlots(contract, key) => write!( + f, + "Touched unallowed storage slots: address {}, key: {}", + hex::encode(contract), + hex::encode(u256_to_h256(*key)) + ), + ViolatedValidationRule::CalledContractWithNoCode(contract) => { + write!(f, "Called contract with no code: {}", hex::encode(contract)) + } + ViolatedValidationRule::TouchedUnallowedContext => { + write!(f, "Touched unallowed context") + } + ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { + write!( + f, + "Took too many computational gas, allowed limit: {}", + gas_limit + ) + } + } + } +} + +impl Display for ValidationError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::FailedTx(revert_reason) => { + write!(f, "Validation revert: {}", revert_reason.revert_reason) + } + Self::VioalatedRule(rule) => { + write!(f, "Violated validation rules: {}", rule) + } + } + } +} + +fn touches_allowed_context(address: Address, key: U256) -> bool { + // Context is not touched at all + if address != SYSTEM_CONTEXT_ADDRESS { + return false; + } + + // Only chain_id is allowed to be touched. + key == U256::from(0u32) +} + +fn is_constant_code_hash(address: Address, key: U256, storage: StoragePtr<'_>) -> bool { + if address != ACCOUNT_CODE_STORAGE_ADDRESS { + // Not a code hash + return false; + } + + let value = storage.borrow_mut().get_value(&StorageKey::new( + AccountTreeId::new(address), + u256_to_h256(key), + )); + + value != H256::zero() +} + +fn valid_eth_token_call(address: Address, msg_sender: Address) -> bool { + let is_valid_caller = msg_sender == MSG_VALUE_SIMULATOR_ADDRESS + || msg_sender == CONTRACT_DEPLOYER_ADDRESS + || msg_sender == BOOTLOADER_ADDRESS; + address == L2_ETH_TOKEN_ADDRESS && is_valid_caller +} + +/// Tracer that is used to ensure that the validation adheres to all the rules +/// to prevent DDoS attacks on the server. +#[derive(Clone)] +pub struct ValidationTracer<'a, H> { + // A copy of it should be used in the Storage oracle + pub storage: StoragePtr<'a>, + pub validation_mode: ValidationTracerMode, + pub auxilary_allowed_slots: HashSet, + pub validation_error: Option, + + user_address: Address, + paymaster_address: Address, + should_stop_execution: bool, + trusted_slots: HashSet<(Address, U256)>, + trusted_addresses: HashSet
, + trusted_address_slots: HashSet<(Address, U256)>, + computational_gas_used: u32, + computational_gas_limit: u32, + + _marker: PhantomData, +} + +impl fmt::Debug for ValidationTracer<'_, H> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ValidationTracer") + .field("storage", &"StoragePtr") + .field("validation_mode", &self.validation_mode) + .field("auxilary_allowed_slots", &self.auxilary_allowed_slots) + .field("validation_error", &self.validation_error) + .field("user_address", &self.user_address) + .field("paymaster_address", &self.paymaster_address) + .field("should_stop_execution", &self.should_stop_execution) + .field("trusted_slots", &self.trusted_slots) + .field("trusted_addresses", &self.trusted_addresses) + .field("trusted_address_slots", &self.trusted_address_slots) + .finish() + } +} + +#[derive(Debug, Clone)] +pub struct ValidationTracerParams { + pub user_address: Address, + pub paymaster_address: Address, + /// Slots that are trusted (i.e. the user can access them). + pub trusted_slots: HashSet<(Address, U256)>, + /// Trusted addresses (the user can access any slots on these addresses). + pub trusted_addresses: HashSet
, + /// Slots, that are trusted and the value of them is the new trusted address. + /// They are needed to work correctly with beacon proxy, where the address of the implementation is + /// stored in the beacon. + pub trusted_address_slots: HashSet<(Address, U256)>, + /// Number of computational gas that validation step is allowed to use. + pub computational_gas_limit: u32, +} + +#[derive(Debug, Clone, Default)] +pub struct NewTrustedValidationItems { + pub new_allowed_slots: Vec, + pub new_trusted_addresses: Vec
, +} + +type ValidationRoundResult = Result; + +impl<'a, H: HistoryMode> ValidationTracer<'a, H> { + pub fn new(storage: StoragePtr<'a>, params: ValidationTracerParams) -> Self { + ValidationTracer { + storage, + validation_error: None, + validation_mode: ValidationTracerMode::NoValidation, + auxilary_allowed_slots: Default::default(), + + should_stop_execution: false, + user_address: params.user_address, + paymaster_address: params.paymaster_address, + trusted_slots: params.trusted_slots, + trusted_addresses: params.trusted_addresses, + trusted_address_slots: params.trusted_address_slots, + computational_gas_used: 0, + computational_gas_limit: params.computational_gas_limit, + + _marker: PhantomData, + } + } + + fn process_validation_round_result(&mut self, result: ValidationRoundResult) { + match result { + Ok(NewTrustedValidationItems { + new_allowed_slots, + new_trusted_addresses, + }) => { + self.auxilary_allowed_slots.extend(new_allowed_slots); + self.trusted_addresses.extend(new_trusted_addresses); + } + Err(err) => { + self.validation_error = Some(err); + } + } + } + + // Checks whether such storage access is acceptable. + fn is_allowed_storage_read(&self, address: Address, key: U256, msg_sender: Address) -> bool { + // If there are no restrictions, all storage reads are valid. + // We also don't support the paymaster validation for now. + if matches!( + self.validation_mode, + ValidationTracerMode::NoValidation | ValidationTracerMode::PaymasterTxValidation + ) { + return true; + } + + // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transfering ETH + // that is safe for the DDoS protection rules. + if valid_eth_token_call(address, msg_sender) { + return true; + } + + if self.trusted_slots.contains(&(address, key)) + || self.trusted_addresses.contains(&address) + || self.trusted_address_slots.contains(&(address, key)) + { + return true; + } + + if touches_allowed_context(address, key) { + return true; + } + + // The user is allowed to touch its own slots or slots semantically related to him. + let valid_users_slot = address == self.user_address + || u256_to_account_address(&key) == self.user_address + || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); + if valid_users_slot { + return true; + } + + if is_constant_code_hash(address, key, self.storage.clone()) { + return true; + } + + false + } + + // Used to remember user-related fields (its balance/allowance/etc). + // Note that it assumes that the length of the calldata is 64 bytes. + fn slot_to_add_from_keccak_call( + &self, + calldata: &[u8], + validated_address: Address, + ) -> Option { + assert_eq!(calldata.len(), 64); + + let (potential_address_bytes, potential_position_bytes) = calldata.split_at(32); + let potential_address = be_bytes_to_safe_address(potential_address_bytes); + + // If the validation_address is equal to the potential_address, + // then it is a request that could be used for mapping of kind mapping(address => ...). + // + // If the potential_position_bytes were already allowed before, then this keccak might be used + // for ERC-20 allowance or any other of mapping(address => mapping(...)) + if potential_address == Some(validated_address) + || self + .auxilary_allowed_slots + .contains(&H256::from_slice(potential_position_bytes)) + { + // This is request that could be used for mapping of kind mapping(address => ...) + + // We could theoretically wait for the slot number to be returned by the + // keccak256 precompile itself, but this would complicate the code even further + // so let's calculate it here. + let slot = keccak256(calldata); + + // Adding this slot to the allowed ones + Some(H256(slot)) + } else { + None + } + } + + pub fn check_user_restrictions( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &SimpleMemory, + ) -> ValidationRoundResult { + if self.computational_gas_used > self.computational_gas_limit { + return Err(ViolatedValidationRule::TookTooManyComputationalGas( + self.computational_gas_limit, + )); + } + + let opcode_variant = data.opcode.variant; + match opcode_variant.opcode { + Opcode::FarCall(_) => { + let packed_abi = data.src0_value.value; + let call_destination_value = data.src1_value.value; + + let called_address = u256_to_account_address(&call_destination_value); + let far_call_abi = FarCallABI::from_u256(packed_abi); + + if called_address == KECCAK256_PRECOMPILE_ADDRESS + && far_call_abi.memory_quasi_fat_pointer.length == 64 + { + let calldata_page = get_calldata_page_via_abi( + &far_call_abi, + state.vm_local_state.callstack.current.base_memory_page, + ); + let calldata = memory.read_unaligned_bytes( + calldata_page as usize, + far_call_abi.memory_quasi_fat_pointer.start as usize, + 64, + ); + + let slot_to_add = + self.slot_to_add_from_keccak_call(&calldata, self.user_address); + + if let Some(slot) = slot_to_add { + return Ok(NewTrustedValidationItems { + new_allowed_slots: vec![slot], + ..Default::default() + }); + } + } else if called_address != self.user_address { + let code_key = get_code_key(&called_address); + let code = self.storage.borrow_mut().get_value(&code_key); + + if code == H256::zero() { + // The users are not allowed to call contracts with no code + return Err(ViolatedValidationRule::CalledContractWithNoCode( + called_address, + )); + } + } + } + Opcode::Context(context) => { + match context { + ContextOpcode::Meta => { + return Err(ViolatedValidationRule::TouchedUnallowedContext); + } + ContextOpcode::ErgsLeft => { + } + _ => {} + } + } + Opcode::Log(LogOpcode::StorageRead) => { + let key = data.src0_value.value; + let this_address = state.vm_local_state.callstack.current.this_address; + let msg_sender = state.vm_local_state.callstack.current.msg_sender; + + if !self.is_allowed_storage_read(this_address, key, msg_sender) { + return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + this_address, + key, + )); + } + + if self.trusted_address_slots.contains(&(this_address, key)) { + let storage_key = + StorageKey::new(AccountTreeId::new(this_address), u256_to_h256(key)); + + let value = self.storage.borrow_mut().get_value(&storage_key); + + return Ok(NewTrustedValidationItems { + new_trusted_addresses: vec![h256_to_account_address(&value)], + ..Default::default() + }); + } + } + _ => {} + } + + Ok(Default::default()) + } +} + +impl Tracer for ValidationTracer<'_, H> { + const CALL_BEFORE_EXECUTION: bool = true; + + type SupportedMemory = SimpleMemory; + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + // For now, we support only validations for users. + if let ValidationTracerMode::UserTxValidation = self.validation_mode { + self.computational_gas_used = self + .computational_gas_used + .saturating_add(computational_gas_price(state, &data)); + + let validation_round_result = self.check_user_restrictions(state, data, memory); + self.process_validation_round_result(validation_round_result); + } + + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + let current_mode = self.validation_mode; + match (current_mode, hook) { + (ValidationTracerMode::NoValidation, VmHook::AccountValidationEntered) => { + // Account validation can be entered when there is no prior validation (i.e. "nested" validations are not allowed) + self.validation_mode = ValidationTracerMode::UserTxValidation; + } + (ValidationTracerMode::NoValidation, VmHook::PaymasterValidationEntered) => { + // Paymaster validation can be entered when there is no prior validation (i.e. "nested" validations are not allowed) + self.validation_mode = ValidationTracerMode::PaymasterTxValidation; + } + (_, VmHook::AccountValidationEntered | VmHook::PaymasterValidationEntered) => { + panic!( + "Unallowed transition inside the validation tracer. Mode: {:#?}, hook: {:#?}", + self.validation_mode, hook + ); + } + (_, VmHook::NoValidationEntered) => { + // Validation can be always turned off + self.validation_mode = ValidationTracerMode::NoValidation; + } + (_, VmHook::ValidationStepEndeded) => { + // The validation step has ended. + self.should_stop_execution = true; + } + (_, _) => { + // The hook is not relevant to the validation tracer. Ignore. + } + } + } + fn after_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterExecutionData, + _memory: &Self::SupportedMemory, + ) { + } +} + +impl ExecutionEndTracer for ValidationTracer<'_, H> { + fn should_stop_execution(&self) -> bool { + self.should_stop_execution || self.validation_error.is_some() + } +} + +impl PendingRefundTracer for ValidationTracer<'_, H> {} +impl PubdataSpentTracer for ValidationTracer<'_, H> {} + +impl StorageInvocationTracer for ValidationTracer<'_, H> {} diff --git a/core/multivm_deps/vm_m6/src/pubdata_utils.rs b/core/multivm_deps/vm_m6/src/pubdata_utils.rs new file mode 100644 index 000000000000..815514ebf0b1 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/pubdata_utils.rs @@ -0,0 +1,98 @@ +use crate::glue::GlueInto; +use crate::history_recorder::HistoryMode; +use crate::oracles::storage::storage_key_of_log; +use crate::utils::collect_storage_log_queries_after_timestamp; +use crate::VmInstance; +use std::collections::HashMap; +use zk_evm::aux_structures::Timestamp; +use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; +use zksync_types::zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries; +use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; +use zksync_utils::bytecode::bytecode_len_in_bytes; + +impl VmInstance<'_, H> { + pub fn pubdata_published(&self, from_timestamp: Timestamp) -> u32 { + let storage_writes_pubdata_published = self.pubdata_published_for_writes(from_timestamp); + + let (events, l2_to_l1_logs) = + self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + // For the first transaction in L1 batch there may be (it depends on the execution mode) an L2->L1 log + // that is sent by `SystemContext` in `setNewBlock`. It's a part of the L1 batch pubdata overhead and not the transaction itself. + let l2_l1_logs_bytes = (l2_to_l1_logs + .iter() + .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) + .count() as u32) + * zk_evm::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; + let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + .iter() + .map(|event| event.len() as u32) + .sum(); + + let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + .iter() + .map(|bytecodehash| { + bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD + }) + .sum(); + + storage_writes_pubdata_published + + l2_l1_logs_bytes + + l2_l1_long_messages_bytes + + published_bytecode_bytes + } + + fn pubdata_published_for_writes(&self, from_timestamp: Timestamp) -> u32 { + // This `HashMap` contains how much was already paid for every slot that was paid during the last tx execution. + // For the slots that weren't paid during the last tx execution we can just use + // `self.state.storage.paid_changes.inner().get(&key)` to get how much it was paid before. + let pre_paid_before_tx_map: HashMap = self + .state + .storage + .paid_changes + .history() + .iter() + .rev() + .take_while(|history_elem| history_elem.0 >= from_timestamp) + .map(|history_elem| (history_elem.1.key, history_elem.1.value.unwrap_or(0))) + .collect(); + let pre_paid_before_tx = |key: &StorageKey| -> u32 { + if let Some(pre_paid) = pre_paid_before_tx_map.get(key) { + *pre_paid + } else { + self.state + .storage + .paid_changes + .inner() + .get(key) + .copied() + .unwrap_or(0) + } + }; + + let storage_logs = collect_storage_log_queries_after_timestamp( + self.state.storage.frames_stack.forward().current_frame(), + from_timestamp, + ); + let (_, deduplicated_logs) = + sort_storage_access_queries(storage_logs.iter().map(|log| &log.log_query)); + + deduplicated_logs + .into_iter() + .filter_map(|log| { + if log.rw_flag { + let key = storage_key_of_log(&log.glue_into()); + let pre_paid = pre_paid_before_tx(&key); + let to_pay_by_user = self.state.storage.base_price_for_write(&log.glue_into()); + + if to_pay_by_user > pre_paid { + Some(to_pay_by_user - pre_paid) + } else { + None + } + } else { + None + } + }) + .sum() + } +} diff --git a/core/multivm_deps/vm_m6/src/refunds.rs b/core/multivm_deps/vm_m6/src/refunds.rs new file mode 100644 index 000000000000..f811274f0fc9 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/refunds.rs @@ -0,0 +1,202 @@ +use crate::history_recorder::HistoryMode; +use crate::vm_with_bootloader::{ + eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET, +}; +use crate::VmInstance; +use zk_evm::aux_structures::Timestamp; +use zksync_types::U256; +use zksync_utils::ceil_div_u256; + +impl VmInstance<'_, H> { + pub(crate) fn tx_body_refund( + &self, + from_timestamp: Timestamp, + bootloader_refund: u32, + gas_spent_on_pubdata: u32, + ) -> u32 { + let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + let tx_gas_limit = self.get_tx_gas_limit(current_tx_index); + let total_gas_spent = tx_gas_limit - bootloader_refund; + + let gas_spent_on_computation = total_gas_spent + .checked_sub(gas_spent_on_pubdata) + .unwrap_or_else(|| { + vlog::error!( + "Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", + gas_spent_on_pubdata, + total_gas_spent + ); + 0 + }); + + let pubdata_published = self.pubdata_published(from_timestamp); + + // For now, bootloader charges only for base fee. + let effective_gas_price = self.block_context.base_fee; + + let bootloader_eth_price_per_pubdata_byte = U256::from(effective_gas_price) + * U256::from(self.state.local_state.current_ergs_per_pubdata_byte); + let fair_eth_price_per_pubdata_byte = U256::from(eth_price_per_pubdata_byte( + self.block_context.context.l1_gas_price, + )); + + // For now, L1 originated transactions are allowed to pay less than fair fee per pubdata, + // so we should take it into account. + let eth_price_per_pubdata_byte_for_calculation = std::cmp::min( + bootloader_eth_price_per_pubdata_byte, + fair_eth_price_per_pubdata_byte, + ); + + let fair_fee_eth = U256::from(gas_spent_on_computation) + * U256::from(self.block_context.context.fair_l2_gas_price) + + U256::from(pubdata_published) * eth_price_per_pubdata_byte_for_calculation; + let pre_paid_eth = U256::from(tx_gas_limit) * U256::from(effective_gas_price); + let refund_eth = pre_paid_eth.checked_sub(fair_fee_eth).unwrap_or_else(|| { + vlog::error!( + "Fair fee is greater than pre paid. Fair fee: {} wei, pre paid: {} wei", + fair_fee_eth, + pre_paid_eth + ); + U256::zero() + }); + + ceil_div_u256(refund_eth, effective_gas_price.into()).as_u32() + } + + /// Calculates the refund for the block overhead. + /// This refund is the difference between how much user paid in advance for the block overhead + /// and how much he should pay based on actual tx execution result. + pub(crate) fn block_overhead_refund( + &self, + _from_timestamp: Timestamp, + _gas_remaining_before: u32, + _gas_spent_on_pubdata: u32, + ) -> u32 { + 0 + + // let pubdata_published = self.pubdata_published(from_timestamp); + // + // let total_gas_spent = gas_remaining_before - self.gas_remaining(); + // let gas_spent_on_computation = total_gas_spent.checked_sub(gas_spent_on_pubdata).unwrap_or_else(|| { + // vlog::error!("Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", gas_spent_on_pubdata, total_gas_spent); + // 0 + // }); + // let (_, l2_to_l1_logs) = self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + // let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + // + // let actual_overhead = Self::actual_overhead_gas( + // self.state.local_state.current_ergs_per_pubdata_byte, + // self.bootloader_state.get_tx_size(current_tx_index), + // pubdata_published, + // gas_spent_on_computation, + // self.state + // .decommittment_processor + // .get_number_of_decommitment_requests_after_timestamp(from_timestamp), + // l2_to_l1_logs.len(), + // ); + // + // let predefined_overhead = self + // .state + // .memory + // .read_slot( + // BOOTLOADER_HEAP_PAGE as usize, + // TX_OVERHEAD_OFFSET + current_tx_index, + // ) + // .value + // .as_u32(); + // + // if actual_overhead <= predefined_overhead { + // predefined_overhead - actual_overhead + // } else { + // // This should never happen but potential mistakes at the early stage should not bring the server down. + // + // // to make debugging easier. + // vlog::error!( + // "Actual overhead is greater than predefined one, actual: {}, predefined: {}", + // actual_overhead, + // predefined_overhead + // ); + // 0 + // } + } + + #[allow(dead_code)] + fn actual_overhead_gas( + _gas_per_pubdata_byte_limit: u32, + _encoded_len: usize, + _pubdata_published: u32, + _gas_spent_on_computation: u32, + _number_of_decommitment_requests: usize, + _l2_l1_logs: usize, + ) -> u32 { + 0 + + // let overhead_for_block_gas = U256::from(crate::transaction_data::block_overhead_gas( + // gas_per_pubdata_byte_limit, + // )); + + // let encoded_len = U256::from(encoded_len); + // let pubdata_published = U256::from(pubdata_published); + // let gas_spent_on_computation = U256::from(gas_spent_on_computation); + // let number_of_decommitment_requests = U256::from(number_of_decommitment_requests); + // let l2_l1_logs = U256::from(l2_l1_logs); + + // let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()); + + // let overhead_for_length = ceil_div_u256( + // encoded_len * overhead_for_block_gas, + // BOOTLOADER_TX_ENCODING_SPACE.into(), + // ); + + // let actual_overhead_for_pubdata = ceil_div_u256( + // pubdata_published * overhead_for_block_gas, + // MAX_PUBDATA_PER_BLOCK.into(), + // ); + + // let actual_gas_limit_overhead = ceil_div_u256( + // gas_spent_on_computation * overhead_for_block_gas, + // MAX_BLOCK_MULTIINSTANCE_GAS_LIMIT.into(), + // ); + + // let code_decommitter_sorter_circuit_overhead = ceil_div_u256( + // number_of_decommitment_requests * overhead_for_block_gas, + // GEOMETRY_CONFIG.limit_for_code_decommitter_sorter.into(), + // ); + + // let l1_l2_logs_overhead = ceil_div_u256( + // l2_l1_logs * overhead_for_block_gas, + // std::cmp::min( + // GEOMETRY_CONFIG.limit_for_l1_messages_merklizer, + // GEOMETRY_CONFIG.limit_for_l1_messages_pudata_hasher, + // ) + // .into(), + // ); + + // let overhead = vec![ + // tx_slot_overhead, + // overhead_for_length, + // actual_overhead_for_pubdata, + // actual_gas_limit_overhead, + // code_decommitter_sorter_circuit_overhead, + // l1_l2_logs_overhead, + // ] + // .into_iter() + // .max() + // .unwrap(); + + // overhead.as_u32() + } + + /// Returns the given transactions' gas limit - by reading it directly from the VM memory. + pub(crate) fn get_tx_gas_limit(&self, tx_index: usize) -> u32 { + let tx_description_offset = self.bootloader_state.get_tx_description_offset(tx_index); + self.state + .memory + .read_slot( + BOOTLOADER_HEAP_PAGE as usize, + tx_description_offset + TX_GAS_LIMIT_OFFSET, + ) + .value + .as_u32() + } +} diff --git a/core/multivm_deps/vm_m6/src/storage.rs b/core/multivm_deps/vm_m6/src/storage.rs new file mode 100644 index 000000000000..868904c3bbd9 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/storage.rs @@ -0,0 +1,62 @@ +use std::cell::RefCell; +use std::collections::HashMap; +use std::fmt::Debug; +use std::rc::Rc; + +use zksync_state::{ReadStorage, StorageView, WriteStorage}; +use zksync_types::{get_known_code_key, StorageKey, StorageValue, H256}; + +pub trait Storage: Debug + Sync + Send { + /// Returns a value from a given key. If value never existed, returns 0. + fn get_value(&mut self, key: &StorageKey) -> StorageValue; + // Sets the new value under a given key - returns the original value. + fn set_value(&mut self, key: &StorageKey, value: StorageValue) -> StorageValue; + /// The function returns true if it's the first time writing to this storage slot. + /// The initial write uses 64 gas, while subsequent writes use only 40. + fn is_write_initial(&mut self, key: &StorageKey) -> bool; + fn load_factory_dep(&mut self, hash: H256) -> Option>; + + fn number_of_updated_storage_slots(&self) -> usize { + self.get_modified_storage_keys().len() + } + + fn get_modified_storage_keys(&self) -> &HashMap; + + /// Returns whether a bytecode hash is "known", i.e. whether + /// it has been published on L1 + fn is_bytecode_known(&mut self, bytecode_hash: &H256) -> bool { + let code_key = get_known_code_key(bytecode_hash); + self.get_value(&code_key) != H256::zero() + } + + fn missed_storage_invocations(&self) -> usize; +} + +impl Storage for StorageView { + fn get_value(&mut self, key: &StorageKey) -> StorageValue { + ReadStorage::read_value(self, key) + } + + /// Returns the original value. + fn set_value(&mut self, key: &StorageKey, value: StorageValue) -> StorageValue { + WriteStorage::set_value(self, *key, value) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + ReadStorage::is_write_initial(self, key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + ReadStorage::load_factory_dep(self, hash) + } + + fn get_modified_storage_keys(&self) -> &HashMap { + WriteStorage::modified_storage_keys(self) + } + + fn missed_storage_invocations(&self) -> usize { + WriteStorage::missed_storage_invocations(self) + } +} + +pub type StoragePtr<'a> = Rc>; diff --git a/core/multivm_deps/vm_m6/src/test_utils.rs b/core/multivm_deps/vm_m6/src/test_utils.rs new file mode 100644 index 000000000000..9404f4359b22 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/test_utils.rs @@ -0,0 +1,331 @@ +//! +//! This file contains various utilities +//! that could be used for testing, but are not needed anywhere else. +//! +//! They are not put into the `cfg(test)` folder to allow easy sharing of the content +//! of this file with other crates. +//! + +use std::collections::HashMap; + +use itertools::Itertools; +use zk_evm::{aux_structures::Timestamp, vm_state::VmLocalState}; +use zksync_contracts::{deployer_contract, get_loadnext_contract, load_contract}; +use zksync_types::{ + ethabi::{Address, Token}, + fee::Fee, + l2::L2Tx, + web3::signing::keccak256, + Execute, L2ChainId, Nonce, StorageKey, StorageLogQuery, StorageValue, + CONTRACT_DEPLOYER_ADDRESS, H256, U256, +}; +use zksync_utils::{ + address_to_h256, bytecode::hash_bytecode, h256_to_account_address, + test_utils::LoadnextContractExecutionParams, u256_to_h256, +}; + +/// The tests here help us with the testing the VM +use crate::{ + event_sink::InMemoryEventSink, + history_recorder::{ + AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode, HistoryRecorder, + }, + memory::SimpleMemory, + VmInstance, +}; + +#[derive(Clone, Debug)] +pub struct ModifiedKeysMap(HashMap); + +// We consider hashmaps to be equal even if there is a key +// that is not present in one but has zero value in another. +impl PartialEq for ModifiedKeysMap { + fn eq(&self, other: &Self) -> bool { + for (key, value) in self.0.iter() { + if *value != other.0.get(key).cloned().unwrap_or_default() { + return false; + } + } + for (key, value) in other.0.iter() { + if *value != self.0.get(key).cloned().unwrap_or_default() { + return false; + } + } + true + } +} + +#[derive(Clone, PartialEq, Debug)] +pub struct DecommitterTestInnerState { + /// There is no way to "trully" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub modified_storage_keys: ModifiedKeysMap, + pub known_bytecodes: HistoryRecorder>, H>, + pub decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, +} + +#[derive(Clone, PartialEq, Debug)] +pub struct StorageOracleInnerState { + /// There is no way to "trully" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub modified_storage_keys: ModifiedKeysMap, + + pub frames_stack: AppDataFrameManagerWithHistory, +} + +#[derive(Clone, PartialEq, Debug)] +pub struct PrecompileProcessorTestInnerState { + pub timestamp_history: HistoryRecorder, H>, +} + +/// A struct that encapsulates the state of the VM's oracles +/// The state is to be used in tests. +#[derive(Clone, PartialEq, Debug)] +pub struct VmInstanceInnerState { + event_sink: InMemoryEventSink, + precompile_processor_state: PrecompileProcessorTestInnerState, + memory: SimpleMemory, + decommitter_state: DecommitterTestInnerState, + storage_oracle_state: StorageOracleInnerState, + local_state: VmLocalState, +} + +impl VmInstance<'_, H> { + /// This method is mostly to be used in tests. It dumps the inner state of all the oracles and the VM itself. + pub fn dump_inner_state(&self) -> VmInstanceInnerState { + let event_sink = self.state.event_sink.clone(); + let precompile_processor_state = PrecompileProcessorTestInnerState { + timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), + }; + let memory = self.state.memory.clone(); + let decommitter_state = DecommitterTestInnerState { + modified_storage_keys: ModifiedKeysMap( + self.state + .decommittment_processor + .get_storage() + .borrow() + .get_modified_storage_keys() + .clone(), + ), + known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), + decommitted_code_hashes: self + .state + .decommittment_processor + .get_decommitted_code_hashes_with_history() + .clone(), + }; + let storage_oracle_state = StorageOracleInnerState { + modified_storage_keys: ModifiedKeysMap( + self.state + .storage + .storage + .get_ptr() + .borrow() + .get_modified_storage_keys() + .clone(), + ), + frames_stack: self.state.storage.frames_stack.clone(), + }; + let local_state = self.state.local_state.clone(); + + VmInstanceInnerState { + event_sink, + precompile_processor_state, + memory, + decommitter_state, + storage_oracle_state, + local_state, + } + } +} + +// This one is used only for tests, but it is in this folder to +// be able to share it among crates +pub fn mock_loadnext_test_call( + eth_private_key: H256, + nonce: Nonce, + contract_address: Address, + fee: Fee, + execution_params: LoadnextContractExecutionParams, +) -> L2Tx { + let loadnext_contract = get_loadnext_contract(); + + let contract_function = loadnext_contract.contract.function("execute").unwrap(); + + let params = vec![ + Token::Uint(U256::from(execution_params.reads)), + Token::Uint(U256::from(execution_params.writes)), + Token::Uint(U256::from(execution_params.hashes)), + Token::Uint(U256::from(execution_params.events)), + Token::Uint(U256::from(execution_params.recursive_calls)), + Token::Uint(U256::from(execution_params.deploys)), + ]; + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let mut l2_tx = L2Tx::new_signed( + contract_address, + calldata, + nonce, + fee, + Default::default(), + L2ChainId(270), + ð_private_key, + None, + Default::default(), + ) + .unwrap(); + // Input means all transaction data (NOT calldata, but all tx fields) that came from the API. + // This input will be used for the derivation of the tx hash, so put some random to it to be sure + // that the transaction hash is unique. + l2_tx.set_input(H256::random().0.to_vec(), H256::random()); + l2_tx +} + +// This one is used only for tests, but it is in this folder to +// be able to share it among crates +pub fn mock_loadnext_gas_burn_call( + eth_private_key: H256, + nonce: Nonce, + contract_address: Address, + fee: Fee, + gas: u32, +) -> L2Tx { + let loadnext_contract = get_loadnext_contract(); + + let contract_function = loadnext_contract.contract.function("burnGas").unwrap(); + + let params = vec![Token::Uint(U256::from(gas))]; + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let mut l2_tx = L2Tx::new_signed( + contract_address, + calldata, + nonce, + fee, + Default::default(), + L2ChainId(270), + ð_private_key, + None, + Default::default(), + ) + .unwrap(); + // Input means all transaction data (NOT calldata, but all tx fields) that came from the API. + // This input will be used for the derivation of the tx hash, so put some random to it to be sure + // that the transaction hash is unique. + l2_tx.set_input(H256::random().0.to_vec(), H256::random()); + l2_tx +} + +pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { + let deployer = deployer_contract(); + + let contract_function = deployer.function("create").unwrap(); + + let params = [ + Token::FixedBytes(vec![0u8; 32]), + Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::Bytes(calldata.to_vec()), + ]; + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata, + factory_deps: Some(vec![code.to_vec()]), + value: U256::zero(), + } +} + +fn get_execute_error_calldata() -> Vec { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ); + + let function = test_contract.function("require_short").unwrap(); + + function + .encode_input(&[]) + .expect("failed to encode parameters") +} + +pub fn get_deploy_tx( + account_private_key: H256, + nonce: Nonce, + code: &[u8], + factory_deps: Vec>, + calldata: &[u8], + fee: Fee, +) -> L2Tx { + let factory_deps = factory_deps + .into_iter() + .chain(vec![code.to_vec()]) + .collect(); + let execute = get_create_execute(code, calldata); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + execute.calldata, + nonce, + fee, + U256::zero(), + L2ChainId(270), + &account_private_key, + Some(factory_deps), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + + signed +} + +pub fn get_error_tx( + account_private_key: H256, + nonce: Nonce, + contract_address: Address, + fee: Fee, +) -> L2Tx { + let factory_deps = vec![]; + let calldata = get_execute_error_calldata(); + + let mut signed = L2Tx::new_signed( + contract_address, + calldata, + nonce, + fee, + U256::zero(), + L2ChainId(270), + &account_private_key, + Some(factory_deps), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + + signed +} + +pub fn get_create_zksync_address(sender_address: Address, sender_nonce: Nonce) -> Address { + let prefix = keccak256("zksyncCreate".as_bytes()); + let address = address_to_h256(&sender_address); + let nonce = u256_to_h256(U256::from(sender_nonce.0)); + + let digest = prefix + .iter() + .chain(address.0.iter()) + .chain(nonce.0.iter()) + .copied() + .collect_vec(); + + let hash = keccak256(&digest); + + h256_to_account_address(&H256(hash)) +} diff --git a/core/multivm_deps/vm_m6/src/tests/bootloader.rs b/core/multivm_deps/vm_m6/src/tests/bootloader.rs new file mode 100644 index 000000000000..e37b4259f2cb --- /dev/null +++ b/core/multivm_deps/vm_m6/src/tests/bootloader.rs @@ -0,0 +1,2174 @@ +// //! +// //! Tests for the bootloader +// //! The description for each of the tests can be found in the corresponding `.yul` file. +// //! +// use itertools::Itertools; +// use std::{ +// collections::{HashMap, HashSet}, +// convert::TryFrom, +// }; +// use tempfile::TempDir; + +// use crate::{ +// errors::VmRevertReason, +// history_recorder::HistoryMode, +// oracles::tracer::{StorageInvocationTracer, TransactionResultTracer}, +// storage::{Storage, StoragePtr}, +// test_utils::{ +// get_create_execute, get_create_zksync_address, get_deploy_tx, get_error_tx, +// mock_loadnext_test_call, +// }, +// transaction_data::TransactionData, +// utils::{ +// create_test_block_params, insert_system_contracts, read_bootloader_test_code, +// BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, +// }, +// vm::{tx_has_failed, VmExecutionStopReason, ZkSyncVmState}, +// vm_with_bootloader::{ +// bytecode_to_factory_dep, get_bootloader_memory, get_bootloader_memory_for_encoded_tx, +// push_raw_transaction_to_bootloader_memory, BlockContext, BlockContextMode, +// BootloaderJobType, TxExecutionMode, +// }, +// vm_with_bootloader::{ +// init_vm_inner, push_transaction_to_bootloader_memory, DerivedBlockContext, +// BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, +// }, +// HistoryEnabled, OracleTools, TxRevertReason, VmBlockResult, VmExecutionResult, VmInstance, +// }; + +// use zk_evm::{ +// aux_structures::Timestamp, block_properties::BlockProperties, zkevm_opcode_defs::FarCallOpcode, +// }; + +// use zksync_types::{ +// block::DeployedContract, +// ethabi::encode, +// get_is_account_key, +// storage_writes_deduplicator::StorageWritesDeduplicator, +// system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, +// tx::tx_execution_info::TxExecutionStatus, +// utils::{ +// deployed_address_create, storage_key_for_eth_balance, +// storage_key_for_standard_token_balance, +// }, +// vm_trace::{Call, CallType}, +// Execute, L1BatchNumber, L1TxCommonData, StorageKey, StorageLog, L1_MESSENGER_ADDRESS, +// {ethabi::Token, AccountTreeId, Address, ExecuteTransactionCommon, Transaction, H256, U256}, +// {fee::Fee, l2_to_l1_log::L2ToL1Log}, +// { +// get_code_key, get_known_code_key, get_nonce_key, Nonce, BOOTLOADER_ADDRESS, H160, +// L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, SYSTEM_CONTEXT_ADDRESS, +// }, +// }; + +// use zksync_utils::{ +// bytecode::CompressedBytecodeInfo, +// test_utils::LoadnextContractExecutionParams, +// {bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}, +// }; + +// use zksync_contracts::{ +// get_loadnext_contract, load_contract, read_bytecode, SystemContractCode, +// PLAYGROUND_BLOCK_BOOTLOADER_CODE, +// }; + +// use zksync_state::{secondary_storage::SecondaryStateStorage, storage_view::StorageView}; +// use zksync_storage::{db::Database, RocksDB}; + +// fn run_vm_with_custom_factory_deps<'a, H: HistoryMode>( +// oracle_tools: &'a mut OracleTools<'a, false, H>, +// block_context: BlockContext, +// block_properties: &'a BlockProperties, +// encoded_tx: Vec, +// predefined_overhead: u32, +// expected_error: Option, +// ) { +// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); +// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); +// let mut vm = init_vm_inner( +// oracle_tools, +// BlockContextMode::OverrideCurrent(block_context.into()), +// block_properties, +// BLOCK_GAS_LIMIT, +// &base_system_contracts, +// TxExecutionMode::VerifyExecute, +// ); + +// vm.bootloader_state.add_tx_data(encoded_tx.len()); +// vm.state.memory.populate_page( +// BOOTLOADER_HEAP_PAGE as usize, +// get_bootloader_memory_for_encoded_tx( +// encoded_tx, +// 0, +// TxExecutionMode::VerifyExecute, +// 0, +// 0, +// predefined_overhead, +// u32::MAX, +// 0, +// vec![], +// ), +// Timestamp(0), +// ); + +// let result = vm.execute_next_tx(u32::MAX, false).err(); + +// assert_eq!(expected_error, result); +// } + +// fn get_balance(token_id: AccountTreeId, account: &Address, main_storage: StoragePtr<'_>) -> U256 { +// let key = storage_key_for_standard_token_balance(token_id, account); +// h256_to_u256(main_storage.borrow_mut().get_value(&key)) +// } + +// #[test] +// fn test_dummy_bootloader() { +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); +// let (block_context, block_properties) = create_test_block_params(); +// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); +// let bootloader_code = read_bootloader_test_code("dummy"); +// let bootloader_hash = hash_bytecode(&bootloader_code); + +// base_system_contracts.bootloader = SystemContractCode { +// code: bytes_to_be_words(bootloader_code), +// hash: bootloader_hash, +// }; + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context.into(), Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &base_system_contracts, +// TxExecutionMode::VerifyExecute, +// ); + +// let VmBlockResult { +// full_result: res, .. +// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); + +// // Dummy bootloader should not panic +// assert!(res.revert_reason.is_none()); + +// let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); + +// verify_required_memory( +// &vm.state, +// vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], +// ); +// } + +// #[test] +// fn test_bootloader_out_of_gas() { +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); +// let (block_context, block_properties) = create_test_block_params(); + +// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + +// let bootloader_code = read_bootloader_test_code("dummy"); +// let bootloader_hash = hash_bytecode(&bootloader_code); + +// base_system_contracts.bootloader = SystemContractCode { +// code: bytes_to_be_words(bootloader_code), +// hash: bootloader_hash, +// }; + +// // init vm with only 10 ergs +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context.into(), Default::default()), +// &block_properties, +// 10, +// &base_system_contracts, +// TxExecutionMode::VerifyExecute, +// ); + +// let res = vm.execute_block_tip(); + +// assert_eq!(res.revert_reason, Some(TxRevertReason::BootloaderOutOfGas)); +// } + +// fn verify_required_storage( +// state: &ZkSyncVmState<'_, H>, +// required_values: Vec<(H256, StorageKey)>, +// ) { +// for (required_value, key) in required_values { +// let current_value = state.storage.storage.read_from_storage(&key); + +// assert_eq!( +// u256_to_h256(current_value), +// required_value, +// "Invalid value at key {key:?}" +// ); +// } +// } + +// fn verify_required_memory( +// state: &ZkSyncVmState<'_, H>, +// required_values: Vec<(U256, u32, u32)>, +// ) { +// for (required_value, memory_page, cell) in required_values { +// let current_value = state +// .memory +// .read_slot(memory_page as usize, cell as usize) +// .value; +// assert_eq!(current_value, required_value); +// } +// } + +// #[test] +// fn test_default_aa_interaction() { +// // In this test, we aim to test whether a simple account interaction (without any fee logic) +// // will work. The account will try to deploy a simple contract from integration tests. + +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let operator_address = block_context.context.operator_address; +// let base_fee = block_context.base_fee; +// // We deploy here counter contract, because its logic is trivial +// let contract_code = read_test_contract(); +// let contract_code_hash = hash_bytecode(&contract_code); +// let tx: Transaction = get_deploy_tx( +// H256::random(), +// Nonce(0), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(20000000u32), +// max_fee_per_gas: U256::from(base_fee), +// max_priority_fee_per_gas: U256::from(0), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); +// let tx_data: TransactionData = tx.clone().into(); + +// let maximal_fee = tx_data.gas_limit * tx_data.max_fee_per_gas; +// let sender_address = tx_data.from(); +// // set balance + +// let key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + +// let tx_execution_result = vm +// .execute_next_tx(u32::MAX, false) +// .expect("Bootloader failed while processing transaction"); + +// assert_eq!( +// tx_execution_result.status, +// TxExecutionStatus::Success, +// "Transaction wasn't successful" +// ); + +// let VmBlockResult { +// full_result: res, .. +// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); +// // Should not panic +// assert!( +// res.revert_reason.is_none(), +// "Bootloader was not expected to revert: {:?}", +// res.revert_reason +// ); + +// // Both deployment and ordinary nonce should be incremented by one. +// let account_nonce_key = get_nonce_key(&sender_address); +// let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; + +// // The code hash of the deployed contract should be marked as republished. +// let known_codes_key = get_known_code_key(&contract_code_hash); + +// // The contract should be deployed successfully. +// let deployed_address = deployed_address_create(sender_address, U256::zero()); +// let account_code_key = get_code_key(&deployed_address); + +// let expected_slots = vec![ +// (u256_to_h256(expected_nonce), account_nonce_key), +// (u256_to_h256(U256::from(1u32)), known_codes_key), +// (contract_code_hash, account_code_key), +// ]; + +// verify_required_storage(&vm.state, expected_slots); + +// assert!(!tx_has_failed(&vm.state, 0)); + +// let expected_fee = +// maximal_fee - U256::from(tx_execution_result.gas_refunded) * U256::from(base_fee); +// let operator_balance = get_balance( +// AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), +// &operator_address, +// vm.state.storage.storage.get_ptr(), +// ); + +// assert!( +// operator_balance == expected_fee, +// "Operator did not receive his fee" +// ); +// } + +// fn execute_vm_with_predetermined_refund( +// txs: Vec, +// refunds: Vec, +// compressed_bytecodes: Vec>, +// ) -> VmBlockResult { +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// // set balance +// for tx in txs.iter() { +// let sender_address = tx.initiator_account(); +// let key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); +// } + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// let codes_for_decommiter = txs +// .iter() +// .flat_map(|tx| { +// tx.execute +// .factory_deps +// .clone() +// .unwrap_or_default() +// .iter() +// .map(|dep| bytecode_to_factory_dep(dep.clone())) +// .collect::)>>() +// }) +// .collect(); + +// vm.state.decommittment_processor.populate( +// codes_for_decommiter, +// Timestamp(vm.state.local_state.timestamp), +// ); + +// let memory_with_suggested_refund = get_bootloader_memory( +// txs.into_iter().map(Into::into).collect(), +// refunds, +// compressed_bytecodes, +// TxExecutionMode::VerifyExecute, +// BlockContextMode::NewBlock(block_context, Default::default()), +// ); + +// vm.state.memory.populate_page( +// BOOTLOADER_HEAP_PAGE as usize, +// memory_with_suggested_refund, +// Timestamp(0), +// ); + +// vm.execute_till_block_end(BootloaderJobType::TransactionExecution) +// } + +// #[test] +// fn test_predetermined_refunded_gas() { +// // In this test, we compare the execution of the bootloader with the predefined +// // refunded gas and without them + +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let base_fee = block_context.base_fee; + +// // We deploy here counter contract, because its logic is trivial +// let contract_code = read_test_contract(); +// let published_bytecode = CompressedBytecodeInfo::from_original(contract_code.clone()).unwrap(); +// let tx: Transaction = get_deploy_tx( +// H256::random(), +// Nonce(0), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(20000000u32), +// max_fee_per_gas: U256::from(base_fee), +// max_priority_fee_per_gas: U256::from(0), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); + +// let sender_address = tx.initiator_account(); + +// // set balance +// let key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + +// let tx_execution_result = vm +// .execute_next_tx(u32::MAX, false) +// .expect("Bootloader failed while processing transaction"); + +// assert_eq!( +// tx_execution_result.status, +// TxExecutionStatus::Success, +// "Transaction wasn't successful" +// ); + +// // If the refund provided by the operator or the final refund are the 0 +// // there is no impact of the operator's refund at all and so this test does not +// // make much sense. +// assert!( +// tx_execution_result.operator_suggested_refund > 0, +// "The operator's refund is 0" +// ); +// assert!( +// tx_execution_result.gas_refunded > 0, +// "The final refund is 0" +// ); + +// let mut result = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); +// assert!( +// result.full_result.revert_reason.is_none(), +// "Bootloader was not expected to revert: {:?}", +// result.full_result.revert_reason +// ); + +// let mut result_with_predetermined_refund = execute_vm_with_predetermined_refund( +// vec![tx], +// vec![tx_execution_result.operator_suggested_refund], +// vec![vec![published_bytecode]], +// ); +// // We need to sort these lists as those are flattened from HashMaps +// result.full_result.used_contract_hashes.sort(); +// result_with_predetermined_refund +// .full_result +// .used_contract_hashes +// .sort(); + +// assert_eq!( +// result.full_result.events, +// result_with_predetermined_refund.full_result.events +// ); +// assert_eq!( +// result.full_result.l2_to_l1_logs, +// result_with_predetermined_refund.full_result.l2_to_l1_logs +// ); +// assert_eq!( +// result.full_result.storage_log_queries, +// result_with_predetermined_refund +// .full_result +// .storage_log_queries +// ); +// assert_eq!( +// result.full_result.used_contract_hashes, +// result_with_predetermined_refund +// .full_result +// .used_contract_hashes +// ); +// } + +// #[derive(Debug, Clone)] +// enum TransactionRollbackTestInfo { +// Rejected(Transaction, TxRevertReason), +// Processed(Transaction, bool, TxExecutionStatus), +// } + +// impl TransactionRollbackTestInfo { +// fn new_rejected(transaction: Transaction, revert_reason: TxRevertReason) -> Self { +// Self::Rejected(transaction, revert_reason) +// } + +// fn new_processed( +// transaction: Transaction, +// should_be_rollbacked: bool, +// expected_status: TxExecutionStatus, +// ) -> Self { +// Self::Processed(transaction, should_be_rollbacked, expected_status) +// } + +// fn get_transaction(&self) -> &Transaction { +// match self { +// TransactionRollbackTestInfo::Rejected(tx, _) => tx, +// TransactionRollbackTestInfo::Processed(tx, _, _) => tx, +// } +// } + +// fn rejection_reason(&self) -> Option { +// match self { +// TransactionRollbackTestInfo::Rejected(_, revert_reason) => Some(revert_reason.clone()), +// TransactionRollbackTestInfo::Processed(_, _, _) => None, +// } +// } + +// fn should_rollback(&self) -> bool { +// match self { +// TransactionRollbackTestInfo::Rejected(_, _) => true, +// TransactionRollbackTestInfo::Processed(_, x, _) => *x, +// } +// } + +// fn expected_status(&self) -> TxExecutionStatus { +// match self { +// TransactionRollbackTestInfo::Rejected(_, _) => { +// panic!("There is no execution status for rejected transaction") +// } +// TransactionRollbackTestInfo::Processed(_, _, status) => *status, +// } +// } +// } + +// // Accepts the address of the sender as well as the list of pairs of its transactions +// // and whether these transactions should succeed. +// fn execute_vm_with_possible_rollbacks( +// sender_address: Address, +// transactions: Vec, +// block_context: DerivedBlockContext, +// block_properties: BlockProperties, +// ) -> VmExecutionResult { +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// // Setting infinite balance for the sender. +// let key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// for test_info in transactions { +// vm.save_current_vm_as_snapshot(); +// let vm_state_before_tx = vm.dump_inner_state(); +// push_transaction_to_bootloader_memory( +// &mut vm, +// test_info.get_transaction(), +// TxExecutionMode::VerifyExecute, +// None, +// ); + +// match vm.execute_next_tx(u32::MAX, false) { +// Err(reason) => { +// assert_eq!(test_info.rejection_reason(), Some(reason)); +// } +// Ok(res) => { +// assert_eq!(test_info.rejection_reason(), None); +// assert_eq!( +// res.status, +// test_info.expected_status(), +// "Transaction status is not correct" +// ); +// } +// }; + +// if test_info.should_rollback() { +// // Some error has occured, we should reject the transaction +// vm.rollback_to_latest_snapshot(); + +// // vm_state_before_tx. +// let state_after_rollback = vm.dump_inner_state(); +// assert_eq!( +// vm_state_before_tx, state_after_rollback, +// "Did not rollback VM state correctly" +// ); +// } +// } + +// let VmBlockResult { +// full_result: mut result, +// .. +// } = vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); +// // Used contract hashes are retrieved in unordered manner. +// // However it must be sorted for the comparisons in tests to work +// result.used_contract_hashes.sort(); + +// result +// } + +// // Sets the signature for an L2 transaction and returns the same transaction +// // but this different signature. +// fn change_signature(mut tx: Transaction, signature: Vec) -> Transaction { +// tx.common_data = match tx.common_data { +// ExecuteTransactionCommon::L2(mut data) => { +// data.signature = signature; +// ExecuteTransactionCommon::L2(data) +// } +// _ => unreachable!(), +// }; + +// tx +// } + +// #[test] +// fn test_vm_rollbacks() { +// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { +// let (block_context, block_properties) = create_test_block_params(); +// (block_context.into(), block_properties) +// }; + +// let base_fee = U256::from(block_context.base_fee); + +// let sender_private_key = H256::random(); +// let contract_code = read_test_contract(); + +// let tx_nonce_0: Transaction = get_deploy_tx( +// sender_private_key, +// Nonce(0), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(12000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); +// let tx_nonce_1: Transaction = get_deploy_tx( +// sender_private_key, +// Nonce(1), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(12000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); +// let tx_nonce_2: Transaction = get_deploy_tx( +// sender_private_key, +// Nonce(2), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(12000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); + +// let wrong_signature_length_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 32]); +// let wrong_v_tx = change_signature(tx_nonce_0.clone(), vec![1u8; 65]); +// let wrong_signature_tx = change_signature(tx_nonce_0.clone(), vec![27u8; 65]); + +// let sender_address = tx_nonce_0.initiator_account(); + +// let result_without_rollbacks = execute_vm_with_possible_rollbacks( +// sender_address, +// vec![ +// // The nonces are ordered correctly, all the transactions should succeed. +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_0.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_1.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_2.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// ], +// block_context, +// block_properties, +// ); + +// let incorrect_nonce = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "Incorrect nonce".to_string(), +// data: vec![ +// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, +// 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// ], +// }); +// let reusing_nonce_twice = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "Reusing the same nonce twice".to_string(), +// data: vec![ +// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, +// 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, +// 0, 0, 0, +// ], +// }); +// let signature_length_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "Signature length is incorrect".to_string(), +// data: vec![ +// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, +// 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, +// 116, 0, 0, 0, +// ], +// }); +// let v_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "v is neither 27 nor 28".to_string(), +// data: vec![ +// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, +// 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// ], +// }); +// let signature_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { +// msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), +// data: vec![], +// }); + +// let result_with_rollbacks = execute_vm_with_possible_rollbacks( +// sender_address, +// vec![ +// TransactionRollbackTestInfo::new_rejected( +// wrong_signature_length_tx, +// signature_length_is_incorrect, +// ), +// TransactionRollbackTestInfo::new_rejected(wrong_v_tx, v_is_incorrect), +// TransactionRollbackTestInfo::new_rejected(wrong_signature_tx, signature_is_incorrect), +// // The correct nonce is 0, this tx will fail +// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce.clone()), +// // This tx will succeed +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_0.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// // The correct nonce is 1, this tx will fail +// TransactionRollbackTestInfo::new_rejected( +// tx_nonce_0.clone(), +// reusing_nonce_twice.clone(), +// ), +// // The correct nonce is 1, this tx will fail +// TransactionRollbackTestInfo::new_rejected(tx_nonce_2.clone(), incorrect_nonce), +// // This tx will succeed +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_1, +// false, +// TxExecutionStatus::Success, +// ), +// // The correct nonce is 2, this tx will fail +// TransactionRollbackTestInfo::new_rejected(tx_nonce_0, reusing_nonce_twice.clone()), +// // This tx will succeed +// TransactionRollbackTestInfo::new_processed( +// tx_nonce_2.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// // This tx will fail +// TransactionRollbackTestInfo::new_rejected(tx_nonce_2, reusing_nonce_twice.clone()), +// ], +// block_context, +// block_properties, +// ); + +// assert_eq!(result_without_rollbacks, result_with_rollbacks); + +// let loadnext_contract = get_loadnext_contract(); + +// let loadnext_constructor_data = encode(&[Token::Uint(U256::from(100))]); +// let loadnext_deploy_tx: Transaction = get_deploy_tx( +// sender_private_key, +// Nonce(0), +// &loadnext_contract.bytecode, +// loadnext_contract.factory_deps, +// &loadnext_constructor_data, +// Fee { +// gas_limit: U256::from(70000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); +// let loadnext_contract_address = +// get_create_zksync_address(loadnext_deploy_tx.initiator_account(), Nonce(0)); +// let deploy_loadnext_tx_info = TransactionRollbackTestInfo::new_processed( +// loadnext_deploy_tx, +// false, +// TxExecutionStatus::Success, +// ); + +// let get_load_next_tx = |params: LoadnextContractExecutionParams, nonce: Nonce| { +// // Here we test loadnext with various kinds of operations +// let tx: Transaction = mock_loadnext_test_call( +// sender_private_key, +// nonce, +// loadnext_contract_address, +// Fee { +// gas_limit: U256::from(100000000u32), +// max_fee_per_gas: base_fee, +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// params, +// ) +// .into(); + +// tx +// }; + +// let loadnext_tx_0 = get_load_next_tx( +// LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// }, +// Nonce(1), +// ); +// let loadnext_tx_1 = get_load_next_tx( +// LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// }, +// Nonce(2), +// ); + +// let result_without_rollbacks = execute_vm_with_possible_rollbacks( +// sender_address, +// vec![ +// deploy_loadnext_tx_info.clone(), +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_0.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_1.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// ], +// block_context, +// block_properties, +// ); + +// let result_with_rollbacks = execute_vm_with_possible_rollbacks( +// sender_address, +// vec![ +// deploy_loadnext_tx_info, +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_0.clone(), +// true, +// TxExecutionStatus::Success, +// ), +// // After the previous tx has been rolled back, this one should succeed +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_0.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// // The nonce has been bumped up, this transaction should now fail +// TransactionRollbackTestInfo::new_rejected(loadnext_tx_0, reusing_nonce_twice.clone()), +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_1.clone(), +// true, +// TxExecutionStatus::Success, +// ), +// // After the previous tx has been rolled back, this one should succeed +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_1.clone(), +// true, +// TxExecutionStatus::Success, +// ), +// // After the previous tx has been rolled back, this one should succeed +// TransactionRollbackTestInfo::new_processed( +// loadnext_tx_1.clone(), +// false, +// TxExecutionStatus::Success, +// ), +// // The nonce has been bumped up, this transaction should now fail +// TransactionRollbackTestInfo::new_rejected(loadnext_tx_1, reusing_nonce_twice), +// ], +// block_context, +// block_properties, +// ); + +// assert_eq!(result_without_rollbacks, result_with_rollbacks); +// } + +// // Inserts the contracts into the test environment, bypassing the +// // deployer system contract. Besides the reference to storage +// // it accepts a `contracts` tuple of information about the contract +// // and whether or not it is an account. +// fn insert_contracts( +// raw_storage: &mut SecondaryStateStorage, +// contracts: Vec<(DeployedContract, bool)>, +// ) { +// let logs: Vec = contracts +// .iter() +// .flat_map(|(contract, is_account)| { +// let mut new_logs = vec![]; + +// let deployer_code_key = get_code_key(contract.account_id.address()); +// new_logs.push(StorageLog::new_write_log( +// deployer_code_key, +// hash_bytecode(&contract.bytecode), +// )); + +// if *is_account { +// let is_account_key = get_is_account_key(contract.account_id.address()); +// new_logs.push(StorageLog::new_write_log( +// is_account_key, +// u256_to_h256(1u32.into()), +// )); +// } + +// new_logs +// }) +// .collect(); +// raw_storage.process_transaction_logs(&logs); + +// for (contract, _) in contracts { +// raw_storage.store_factory_dep(hash_bytecode(&contract.bytecode), contract.bytecode); +// } +// raw_storage.save(L1BatchNumber(0)); +// } + +// enum NonceHolderTestMode { +// SetValueUnderNonce, +// IncreaseMinNonceBy5, +// IncreaseMinNonceTooMuch, +// LeaveNonceUnused, +// IncreaseMinNonceBy1, +// SwitchToArbitraryOrdering, +// } + +// impl From for u8 { +// fn from(mode: NonceHolderTestMode) -> u8 { +// match mode { +// NonceHolderTestMode::SetValueUnderNonce => 0, +// NonceHolderTestMode::IncreaseMinNonceBy5 => 1, +// NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, +// NonceHolderTestMode::LeaveNonceUnused => 3, +// NonceHolderTestMode::IncreaseMinNonceBy1 => 4, +// NonceHolderTestMode::SwitchToArbitraryOrdering => 5, +// } +// } +// } + +// fn get_nonce_holder_test_tx( +// nonce: U256, +// account_address: Address, +// test_mode: NonceHolderTestMode, +// block_context: &DerivedBlockContext, +// ) -> TransactionData { +// TransactionData { +// tx_type: 113, +// from: account_address, +// to: account_address, +// gas_limit: U256::from(10000000u32), +// pubdata_price_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// max_fee_per_gas: U256::from(block_context.base_fee), +// max_priority_fee_per_gas: U256::zero(), +// nonce, +// // The reserved fields that are unique for different types of transactions. +// // E.g. nonce is currently used in all transaction, but it should not be mandatory +// // in the long run. +// reserved: [U256::zero(); 4], +// data: vec![12], +// signature: vec![test_mode.into()], + +// ..Default::default() +// } +// } + +// fn run_vm_with_raw_tx<'a, H: HistoryMode>( +// oracle_tools: &'a mut OracleTools<'a, false, H>, +// block_context: DerivedBlockContext, +// block_properties: &'a BlockProperties, +// tx: TransactionData, +// ) -> (VmExecutionResult, bool) { +// let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); +// base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); +// let mut vm = init_vm_inner( +// oracle_tools, +// BlockContextMode::OverrideCurrent(block_context), +// block_properties, +// BLOCK_GAS_LIMIT, +// &base_system_contracts, +// TxExecutionMode::VerifyExecute, +// ); + +// let block_gas_price_per_pubdata = block_context.context.block_gas_price_per_pubdata(); + +// let overhead = tx.overhead_gas(block_gas_price_per_pubdata as u32); +// push_raw_transaction_to_bootloader_memory( +// &mut vm, +// tx, +// TxExecutionMode::VerifyExecute, +// overhead, +// None, +// ); +// let VmBlockResult { +// full_result: result, +// .. +// } = vm.execute_till_block_end(BootloaderJobType::TransactionExecution); + +// (result, tx_has_failed(&vm.state, 0)) +// } + +// #[test] +// fn test_nonce_holder() { +// let (block_context, block_properties): (DerivedBlockContext, BlockProperties) = { +// let (block_context, block_properties) = create_test_block_params(); +// (block_context.into(), block_properties) +// }; + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); + +// let account_address = H160::random(); +// let account = DeployedContract { +// account_id: AccountTreeId::new(account_address), +// bytecode: read_nonce_holder_tester(), +// }; + +// insert_contracts(&mut raw_storage, vec![(account, true)]); + +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// // We deploy here counter contract, because its logic is trivial + +// let key = storage_key_for_eth_balance(&account_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut run_nonce_test = |nonce: U256, +// test_mode: NonceHolderTestMode, +// error_message: Option, +// comment: &'static str| { +// let tx = get_nonce_holder_test_tx(nonce, account_address, test_mode, &block_context); + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); +// let (result, tx_has_failed) = +// run_vm_with_raw_tx(&mut oracle_tools, block_context, &block_properties, tx); +// if let Some(msg) = error_message { +// let expected_error = +// TxRevertReason::ValidationFailed(VmRevertReason::General { msg, data: vec![] }); +// assert_eq!( +// result +// .revert_reason +// .expect("No revert reason") +// .revert_reason +// .to_string(), +// expected_error.to_string(), +// "{}", +// comment +// ); +// } else { +// assert!(!tx_has_failed, "{}", comment); +// } +// }; + +// // Test 1: trying to set value under non sequential nonce value. +// run_nonce_test( +// 1u32.into(), +// NonceHolderTestMode::SetValueUnderNonce, +// Some("Previous nonce has not been used".to_string()), +// "Allowed to set value under non sequential value", +// ); + +// // Test 2: increase min nonce by 1 with sequential nonce ordering: +// run_nonce_test( +// 0u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy1, +// None, +// "Failed to increment nonce by 1 for sequential account", +// ); + +// // Test 3: correctly set value under nonce with sequential nonce ordering: +// run_nonce_test( +// 1u32.into(), +// NonceHolderTestMode::SetValueUnderNonce, +// None, +// "Failed to set value under nonce sequential value", +// ); + +// // Test 5: migrate to the arbitrary nonce ordering: +// run_nonce_test( +// 2u32.into(), +// NonceHolderTestMode::SwitchToArbitraryOrdering, +// None, +// "Failed to switch to arbitrary ordering", +// ); + +// // Test 6: increase min nonce by 5 +// run_nonce_test( +// 6u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy5, +// None, +// "Failed to increase min nonce by 5", +// ); + +// // Test 7: since the nonces in range [6,10] are no longer allowed, the +// // tx with nonce 10 should not be allowed +// run_nonce_test( +// 10u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy5, +// Some("Reusing the same nonce twice".to_string()), +// "Allowed to reuse nonce below the minimal one", +// ); + +// // Test 8: we should be able to use nonce 13 +// run_nonce_test( +// 13u32.into(), +// NonceHolderTestMode::SetValueUnderNonce, +// None, +// "Did not allow to use unused nonce 10", +// ); + +// // Test 9: we should not be able to reuse nonce 13 +// run_nonce_test( +// 13u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy5, +// Some("Reusing the same nonce twice".to_string()), +// "Allowed to reuse the same nonce twice", +// ); + +// // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 +// run_nonce_test( +// 14u32.into(), +// NonceHolderTestMode::IncreaseMinNonceBy5, +// None, +// "Did not allow to use a bumped nonce", +// ); + +// // Test 6: Do not allow bumping nonce by too much +// run_nonce_test( +// 16u32.into(), +// NonceHolderTestMode::IncreaseMinNonceTooMuch, +// Some("The value for incrementing the nonce is too high".to_string()), +// "Allowed for incrementing min nonce too much", +// ); + +// // Test 7: Do not allow not setting a nonce as used +// run_nonce_test( +// 16u32.into(), +// NonceHolderTestMode::LeaveNonceUnused, +// Some("The nonce was not set as used".to_string()), +// "Allowed to leave nonce as unused", +// ); +// } + +// #[test] +// fn test_l1_tx_execution() { +// // In this test, we try to execute a contract deployment from L1 +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); +// let (block_context, block_properties) = create_test_block_params(); + +// // Here instead of marking code hash via the bootloader means, we will +// // using L1->L2 communication, the same it would likely be done during the priority mode. +// let contract_code = read_test_contract(); +// let contract_code_hash = hash_bytecode(&contract_code); +// let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); +// let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); + +// let required_l2_to_l1_logs = vec![ +// L2ToL1Log { +// shard_id: 0, +// is_service: false, +// tx_number_in_block: 0, +// sender: SYSTEM_CONTEXT_ADDRESS, +// key: u256_to_h256(U256::from(block_context.block_timestamp)), +// value: Default::default(), +// }, +// L2ToL1Log { +// shard_id: 0, +// is_service: true, +// tx_number_in_block: 0, +// sender: BOOTLOADER_ADDRESS, +// key: l1_deploy_tx_data.canonical_l1_tx_hash(), +// value: u256_to_h256(U256::from(1u32)), +// }, +// ]; + +// let sender_address = l1_deploy_tx_data.from(); + +// oracle_tools.decommittment_processor.populate( +// vec![( +// h256_to_u256(contract_code_hash), +// bytes_to_be_words(contract_code), +// )], +// Timestamp(0), +// ); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context.into(), Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// push_transaction_to_bootloader_memory( +// &mut vm, +// &l1_deploy_tx, +// TxExecutionMode::VerifyExecute, +// None, +// ); + +// let res = vm.execute_next_tx(u32::MAX, false).unwrap(); + +// // The code hash of the deployed contract should be marked as republished. +// let known_codes_key = get_known_code_key(&contract_code_hash); + +// // The contract should be deployed successfully. +// let deployed_address = deployed_address_create(sender_address, U256::zero()); +// let account_code_key = get_code_key(&deployed_address); + +// let expected_slots = vec![ +// (u256_to_h256(U256::from(1u32)), known_codes_key), +// (contract_code_hash, account_code_key), +// ]; +// assert!(!tx_has_failed(&vm.state, 0)); + +// verify_required_storage(&vm.state, expected_slots); + +// assert_eq!(res.result.logs.l2_to_l1_logs, required_l2_to_l1_logs); + +// let tx = get_l1_execute_test_contract_tx(deployed_address, true); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + +// let res = StorageWritesDeduplicator::apply_on_empty_state( +// &vm.execute_next_tx(u32::MAX, false) +// .unwrap() +// .result +// .logs +// .storage_logs, +// ); +// assert_eq!(res.initial_storage_writes, 0); + +// let tx = get_l1_execute_test_contract_tx(deployed_address, false); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); +// let res = StorageWritesDeduplicator::apply_on_empty_state( +// &vm.execute_next_tx(u32::MAX, false) +// .unwrap() +// .result +// .logs +// .storage_logs, +// ); +// assert_eq!(res.initial_storage_writes, 2); + +// let repeated_writes = res.repeated_storage_writes; + +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); +// let res = StorageWritesDeduplicator::apply_on_empty_state( +// &vm.execute_next_tx(u32::MAX, false) +// .unwrap() +// .result +// .logs +// .storage_logs, +// ); +// assert_eq!(res.initial_storage_writes, 1); +// // We do the same storage write, so it will be deduplicated +// assert_eq!(res.repeated_storage_writes, repeated_writes); + +// let mut tx = get_l1_execute_test_contract_tx(deployed_address, false); +// tx.execute.value = U256::from(1); +// match &mut tx.common_data { +// ExecuteTransactionCommon::L1(l1_data) => { +// l1_data.to_mint = U256::from(4); +// } +// _ => unreachable!(), +// } +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); +// let execution_result = vm.execute_next_tx(u32::MAX, false).unwrap(); +// // The method is not payable, so the transaction with non-zero value should fail +// assert_eq!( +// execution_result.status, +// TxExecutionStatus::Failure, +// "The transaction should fail" +// ); + +// let res = +// StorageWritesDeduplicator::apply_on_empty_state(&execution_result.result.logs.storage_logs); + +// // There are 2 initial writes here: +// // - totalSupply of ETH token +// // - balance of the refund recipient +// assert_eq!(res.initial_storage_writes, 2); +// } + +// #[test] +// fn test_invalid_bytecode() { +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let (block_context, block_properties) = create_test_block_params(); +// let block_gas_per_pubdata = block_context.block_gas_price_per_pubdata(); + +// let test_vm_with_custom_bytecode_hash = +// |bytecode_hash: H256, expected_revert_reason: Option| { +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( +// h256_to_u256(bytecode_hash), +// block_gas_per_pubdata as u32, +// ); + +// run_vm_with_custom_factory_deps( +// &mut oracle_tools, +// block_context, +// &block_properties, +// encoded_tx, +// predefined_overhead, +// expected_revert_reason, +// ); +// }; + +// let failed_to_mark_factory_deps = |msg: &str, data: Vec| { +// TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { +// msg: msg.to_string(), +// data, +// }) +// }; + +// // Here we provide the correctly-formatted bytecode hash of +// // odd length, so it should work. +// test_vm_with_custom_bytecode_hash( +// H256([ +// 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, +// ]), +// None, +// ); + +// // Here we provide correctly formatted bytecode of even length, so +// // it should fail. +// test_vm_with_custom_bytecode_hash( +// H256([ +// 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, +// ]), +// Some(failed_to_mark_factory_deps( +// "Code length in words must be odd", +// vec![ +// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, +// 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, +// 32, 98, 101, 32, 111, 100, 100, +// ], +// )), +// ); + +// // Here we provide incorrectly formatted bytecode of odd length, so +// // it should fail. +// test_vm_with_custom_bytecode_hash( +// H256([ +// 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, +// ]), +// Some(failed_to_mark_factory_deps( +// "Incorrectly formatted bytecodeHash", +// vec![ +// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, +// 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, +// 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// ], +// )), +// ); + +// // Here we provide incorrectly formatted bytecode of odd length, so +// // it should fail. +// test_vm_with_custom_bytecode_hash( +// H256([ +// 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, +// ]), +// Some(failed_to_mark_factory_deps( +// "Incorrectly formatted bytecodeHash", +// vec![ +// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, +// 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, +// 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// ], +// )), +// ); +// } + +// #[test] +// fn test_tracing_of_execution_errors() { +// // In this test, we are checking that the execution errors are transmitted correctly from the bootloader. +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let private_key = H256::random(); + +// let contract_address = Address::random(); +// let error_contract = DeployedContract { +// account_id: AccountTreeId::new(contract_address), +// bytecode: read_error_contract(), +// }; + +// let tx = get_error_tx( +// private_key, +// Nonce(0), +// contract_address, +// Fee { +// gas_limit: U256::from(1000000u32), +// max_fee_per_gas: U256::from(10000000000u64), +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ); + +// insert_contracts(&mut raw_storage, vec![(error_contract, false)]); + +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let key = storage_key_for_eth_balance(&tx.common_data.initiator_address); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// push_transaction_to_bootloader_memory( +// &mut vm, +// &tx.into(), +// TxExecutionMode::VerifyExecute, +// None, +// ); + +// let mut tracer = TransactionResultTracer::new(usize::MAX, false); +// assert_eq!( +// vm.execute_with_custom_tracer(&mut tracer), +// VmExecutionStopReason::VmFinished, +// "Tracer should never request stop" +// ); + +// match tracer.revert_reason { +// Some(revert_reason) => { +// let revert_reason = VmRevertReason::try_from(&revert_reason as &[u8]).unwrap(); +// assert_eq!( +// revert_reason, +// VmRevertReason::General { +// msg: "short".to_string(), +// data: vec![ +// 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, +// 114, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0 +// ], +// } +// ) +// } +// _ => panic!( +// "Tracer captured incorrect result {:#?}", +// tracer.revert_reason +// ), +// } +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// let tx = get_error_tx( +// private_key, +// Nonce(1), +// contract_address, +// Fee { +// gas_limit: U256::from(1000000u32), +// max_fee_per_gas: U256::from(10000000000u64), +// max_priority_fee_per_gas: U256::zero(), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ); +// push_transaction_to_bootloader_memory( +// &mut vm, +// &tx.into(), +// TxExecutionMode::VerifyExecute, +// None, +// ); + +// let mut tracer = TransactionResultTracer::new(10, false); +// assert_eq!( +// vm.execute_with_custom_tracer(&mut tracer), +// VmExecutionStopReason::TracerRequestedStop, +// ); +// assert!(tracer.is_limit_reached()); +// } + +// /// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +// #[test] +// fn test_tx_gas_limit_offset() { +// let gas_limit = U256::from(999999); + +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let raw_storage = SecondaryStateStorage::new(db); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let contract_code = read_test_contract(); +// let tx: Transaction = get_deploy_tx( +// H256::random(), +// Nonce(0), +// &contract_code, +// Default::default(), +// Default::default(), +// Fee { +// gas_limit, +// ..Default::default() +// }, +// ) +// .into(); + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + +// let gas_limit_from_memory = vm +// .state +// .memory +// .read_slot( +// BOOTLOADER_HEAP_PAGE as usize, +// TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, +// ) +// .value; +// assert_eq!(gas_limit_from_memory, gas_limit); +// } + +// #[test] +// fn test_is_write_initial_behaviour() { +// // In this test, we check result of `is_write_initial` at different stages. + +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + +// let base_fee = block_context.base_fee; +// let account_pk = H256::random(); +// let contract_code = read_test_contract(); +// let tx: Transaction = get_deploy_tx( +// account_pk, +// Nonce(0), +// &contract_code, +// vec![], +// &[], +// Fee { +// gas_limit: U256::from(20000000u32), +// max_fee_per_gas: U256::from(base_fee), +// max_priority_fee_per_gas: U256::from(0), +// gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), +// }, +// ) +// .into(); + +// let sender_address = tx.initiator_account(); +// let nonce_key = get_nonce_key(&sender_address); + +// // Check that the next write to the nonce key will be initial. +// assert!(storage_ptr.is_write_initial(&nonce_key)); + +// // Set balance to be able to pay fee for txs. +// let balance_key = storage_key_for_eth_balance(&sender_address); +// storage_ptr.set_value(&balance_key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + +// vm.execute_next_tx(u32::MAX, false) +// .expect("Bootloader failed while processing the first transaction"); +// // Check that `is_write_initial` still returns true for the nonce key. +// assert!(storage_ptr.is_write_initial(&nonce_key)); +// } + +// pub fn get_l1_tx_with_custom_bytecode_hash( +// bytecode_hash: U256, +// block_gas_per_pubdata: u32, +// ) -> (Vec, u32) { +// let tx: TransactionData = get_l1_execute_test_contract_tx(Default::default(), false).into(); +// let predefined_overhead = +// tx.overhead_gas_with_custom_factory_deps(vec![bytecode_hash], block_gas_per_pubdata); +// let tx_bytes = tx.abi_encode_with_custom_factory_deps(vec![bytecode_hash]); + +// (bytes_to_be_words(tx_bytes), predefined_overhead) +// } + +// const L1_TEST_GAS_PER_PUBDATA_BYTE: u32 = 800; + +// pub fn get_l1_execute_test_contract_tx(deployed_address: Address, with_panic: bool) -> Transaction { +// let sender = H160::random(); +// get_l1_execute_test_contract_tx_with_sender( +// sender, +// deployed_address, +// with_panic, +// U256::zero(), +// false, +// ) +// } + +// pub fn get_l1_tx_with_large_output(sender: Address, deployed_address: Address) -> Transaction { +// let test_contract = load_contract( +// "etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json", +// ); + +// let function = test_contract.function("longReturnData").unwrap(); + +// let calldata = function +// .encode_input(&[]) +// .expect("failed to encode parameters"); + +// Transaction { +// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { +// sender, +// gas_limit: U256::from(100000000u32), +// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), +// ..Default::default() +// }), +// execute: Execute { +// contract_address: deployed_address, +// calldata, +// value: U256::zero(), +// factory_deps: None, +// }, +// received_timestamp_ms: 0, +// } +// } + +// pub fn get_l1_execute_test_contract_tx_with_sender( +// sender: Address, +// deployed_address: Address, +// with_panic: bool, +// value: U256, +// payable: bool, +// ) -> Transaction { +// let execute = execute_test_contract(deployed_address, with_panic, value, payable); + +// Transaction { +// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { +// sender, +// gas_limit: U256::from(200_000_000u32), +// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), +// to_mint: value, +// ..Default::default() +// }), +// execute, +// received_timestamp_ms: 0, +// } +// } + +// pub fn get_l1_deploy_tx(code: &[u8], calldata: &[u8]) -> Transaction { +// let execute = get_create_execute(code, calldata); + +// Transaction { +// common_data: ExecuteTransactionCommon::L1(L1TxCommonData { +// sender: H160::random(), +// gas_limit: U256::from(2000000u32), +// gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), +// ..Default::default() +// }), +// execute, +// received_timestamp_ms: 0, +// } +// } + +// fn read_test_contract() -> Vec { +// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +// } + +// fn read_long_return_data_contract() -> Vec { +// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json") +// } + +// fn read_nonce_holder_tester() -> Vec { +// read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +// } + +// fn read_error_contract() -> Vec { +// read_bytecode( +// "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", +// ) +// } + +// fn execute_test_contract( +// address: Address, +// with_panic: bool, +// value: U256, +// payable: bool, +// ) -> Execute { +// let test_contract = load_contract( +// "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", +// ); + +// let function = if payable { +// test_contract +// .function("incrementWithRevertPayable") +// .unwrap() +// } else { +// test_contract.function("incrementWithRevert").unwrap() +// }; + +// let calldata = function +// .encode_input(&[Token::Uint(U256::from(1u8)), Token::Bool(with_panic)]) +// .expect("failed to encode parameters"); + +// Execute { +// contract_address: address, +// calldata, +// value, +// factory_deps: None, +// } +// } + +// #[test] +// fn test_call_tracer() { +// let sender = H160::random(); +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); + +// let (block_context, block_properties) = create_test_block_params(); + +// let contract_code = read_test_contract(); +// let contract_code_hash = hash_bytecode(&contract_code); +// let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); +// let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); + +// let sender_address_counter = l1_deploy_tx_data.from(); +// let mut storage_accessor = StorageView::new(&raw_storage); +// let storage_ptr: &mut dyn Storage = &mut storage_accessor; + +// let key = storage_key_for_eth_balance(&sender_address_counter); +// storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); +// oracle_tools.decommittment_processor.populate( +// vec![( +// h256_to_u256(contract_code_hash), +// bytes_to_be_words(contract_code), +// )], +// Timestamp(0), +// ); + +// let contract_code = read_long_return_data_contract(); +// let contract_code_hash = hash_bytecode(&contract_code); +// let l1_deploy_long_return_data_tx = get_l1_deploy_tx(&contract_code, &[]); +// oracle_tools.decommittment_processor.populate( +// vec![( +// h256_to_u256(contract_code_hash), +// bytes_to_be_words(contract_code), +// )], +// Timestamp(0), +// ); + +// let tx_data: TransactionData = l1_deploy_long_return_data_tx.clone().into(); +// let sender_long_return_address = tx_data.from(); +// // The contract should be deployed successfully. +// let deployed_address_long_return_data = +// deployed_address_create(sender_long_return_address, U256::zero()); +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context.into(), Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// push_transaction_to_bootloader_memory( +// &mut vm, +// &l1_deploy_tx, +// TxExecutionMode::VerifyExecute, +// None, +// ); + +// // The contract should be deployed successfully. +// let deployed_address = deployed_address_create(sender_address_counter, U256::zero()); +// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); +// let calls = res.call_traces; +// let mut create_call = None; +// // The first MIMIC call is call to value simulator. All calls goes through it. +// // The second MIMIC call is call to Deployer contract. +// // And only third level call is construct call to the newly deployed contract And we call it create_call. +// for call in &calls { +// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { +// for call in &call.calls { +// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { +// for call in &call.calls { +// if let CallType::Create = call.r#type { +// create_call = Some(call.clone()); +// } +// } +// } +// } +// } +// } +// let expected = Call { +// r#type: CallType::Create, +// to: deployed_address, +// from: sender_address_counter, +// parent_gas: 0, +// gas_used: 0, +// gas: 0, +// value: U256::zero(), +// input: vec![], +// output: vec![ +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 0, 0, 0, 0, +// ], +// error: None, +// revert_reason: None, +// calls: vec![], +// }; +// assert_eq!(create_call.unwrap(), expected); + +// push_transaction_to_bootloader_memory( +// &mut vm, +// &l1_deploy_long_return_data_tx, +// TxExecutionMode::VerifyExecute, +// None, +// ); + +// vm.execute_next_tx(u32::MAX, false).unwrap(); + +// let tx = get_l1_execute_test_contract_tx_with_sender( +// sender, +// deployed_address, +// false, +// U256::from(1u8), +// true, +// ); + +// let tx_data: TransactionData = tx.clone().into(); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + +// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); +// let calls = res.call_traces; + +// // We don't want to compare gas used, because it's not fully deterministic. +// let expected = Call { +// r#type: CallType::Call(FarCallOpcode::Mimic), +// to: deployed_address, +// from: tx_data.from(), +// parent_gas: 0, +// gas_used: 0, +// gas: 0, +// value: U256::from(1), +// input: tx_data.data, +// output: vec![ +// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +// 0, 0, 1, +// ], +// error: None, +// revert_reason: None, +// calls: vec![], +// }; + +// // First loop filter out the bootloaders calls and +// // the second loop filters out the calls msg value simulator calls +// for call in calls { +// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { +// for call in call.calls { +// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { +// assert_eq!(expected, call); +// } +// } +// } +// } + +// let tx = get_l1_execute_test_contract_tx_with_sender( +// sender, +// deployed_address, +// true, +// U256::from(1u8), +// true, +// ); + +// let tx_data: TransactionData = tx.clone().into(); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + +// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); +// let calls = res.call_traces; + +// let expected = Call { +// r#type: CallType::Call(FarCallOpcode::Mimic), +// to: deployed_address, +// from: tx_data.from(), +// parent_gas: 257030, +// gas_used: 348, +// gas: 253008, +// value: U256::from(1u8), +// input: tx_data.data, +// output: vec![], +// error: None, +// revert_reason: Some("This method always reverts".to_string()), +// calls: vec![], +// }; + +// for call in calls { +// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { +// for call in call.calls { +// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { +// assert_eq!(expected, call); +// } +// } +// } +// } + +// let tx = get_l1_tx_with_large_output(sender, deployed_address_long_return_data); + +// let tx_data: TransactionData = tx.clone().into(); +// push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + +// assert_ne!(deployed_address_long_return_data, deployed_address); +// let res = vm.execute_next_tx(u32::MAX, true).unwrap(); +// let calls = res.call_traces; +// for call in calls { +// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { +// for call in call.calls { +// if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { +// assert_eq!(call.input, tx_data.data); +// assert_eq!( +// call.revert_reason, +// Some("Unknown revert reason".to_string()) +// ); +// } +// } +// } +// } +// } + +// #[test] +// fn test_get_used_contracts() { +// // get block context +// let (block_context, block_properties) = create_test_block_params(); +// let block_context: DerivedBlockContext = block_context.into(); + +// // insert system contracts to avoid vm errors during initialization +// let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); +// let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); +// let mut raw_storage = SecondaryStateStorage::new(db); +// insert_system_contracts(&mut raw_storage); + +// // get oracle tools +// let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); +// let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + +// // init vm +// let mut vm = init_vm_inner( +// &mut oracle_tools, +// BlockContextMode::NewBlock(block_context, Default::default()), +// &block_properties, +// BLOCK_GAS_LIMIT, +// &BASE_SYSTEM_CONTRACTS, +// TxExecutionMode::VerifyExecute, +// ); + +// assert!(known_bytecodes_without_aa_code(&vm).is_empty()); + +// // create and push and execute some not-empty factory deps transaction with success status +// // to check that get_used_contracts() updates +// let contract_code = read_test_contract(); +// let contract_code_hash = hash_bytecode(&contract_code); +// let tx1 = get_l1_deploy_tx(&contract_code, &[]); + +// push_transaction_to_bootloader_memory(&mut vm, &tx1, TxExecutionMode::VerifyExecute, None); + +// let res1 = vm.execute_next_tx(u32::MAX, true).unwrap(); +// assert_eq!(res1.status, TxExecutionStatus::Success); +// assert!(vm +// .get_used_contracts() +// .contains(&h256_to_u256(contract_code_hash))); + +// assert_eq!( +// vm.get_used_contracts() +// .into_iter() +// .collect::>(), +// known_bytecodes_without_aa_code(&vm) +// .keys() +// .cloned() +// .collect::>() +// ); + +// // create push and execute some non-empty factory deps transaction that fails +// // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) + +// let mut tx2 = tx1; +// tx2.execute.contract_address = L1_MESSENGER_ADDRESS; + +// let calldata = vec![1, 2, 3]; +// let big_calldata: Vec = calldata +// .iter() +// .cycle() +// .take(calldata.len() * 1024) +// .cloned() +// .collect(); + +// tx2.execute.calldata = big_calldata; +// tx2.execute.factory_deps = Some(vec![vec![1; 32]]); + +// push_transaction_to_bootloader_memory(&mut vm, &tx2, TxExecutionMode::VerifyExecute, None); + +// let res2 = vm.execute_next_tx(u32::MAX, false).unwrap(); + +// assert_eq!(res2.status, TxExecutionStatus::Failure); + +// for factory_dep in tx2.execute.factory_deps.unwrap() { +// let hash = hash_bytecode(&factory_dep); +// let hash_to_u256 = h256_to_u256(hash); +// assert!(known_bytecodes_without_aa_code(&vm) +// .keys() +// .contains(&hash_to_u256)); +// assert!(!vm.get_used_contracts().contains(&hash_to_u256)); +// } +// } + +// fn known_bytecodes_without_aa_code(vm: &VmInstance) -> HashMap> { +// let mut known_bytecodes_without_aa_code = vm +// .state +// .decommittment_processor +// .known_bytecodes +// .inner() +// .clone(); + +// known_bytecodes_without_aa_code +// .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) +// .unwrap(); + +// known_bytecodes_without_aa_code +// } diff --git a/core/multivm_deps/vm_m6/src/tests/mod.rs b/core/multivm_deps/vm_m6/src/tests/mod.rs new file mode 100644 index 000000000000..3900135abeaa --- /dev/null +++ b/core/multivm_deps/vm_m6/src/tests/mod.rs @@ -0,0 +1 @@ +mod bootloader; diff --git a/core/multivm_deps/vm_m6/src/transaction_data.rs b/core/multivm_deps/vm_m6/src/transaction_data.rs new file mode 100644 index 000000000000..c374b34c1c9d --- /dev/null +++ b/core/multivm_deps/vm_m6/src/transaction_data.rs @@ -0,0 +1,598 @@ +use zk_evm::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; +use zksync_types::ethabi::{encode, Address, Token}; +use zksync_types::fee::encoding_len; +use zksync_types::{l2::TransactionType, ExecuteTransactionCommon, Transaction, U256}; +use zksync_types::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; +use zksync_utils::{address_to_h256, ceil_div_u256}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; + +use crate::vm_with_bootloader::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, +}; + +pub(crate) const L1_TX_TYPE: u8 = 255; + +// This structure represents the data that is used by +// the Bootloader to describe the transaction. +#[derive(Debug, Default, Clone)] +pub struct TransactionData { + pub tx_type: u8, + pub from: Address, + pub to: Address, + pub gas_limit: U256, + pub pubdata_price_limit: U256, + pub max_fee_per_gas: U256, + pub max_priority_fee_per_gas: U256, + pub paymaster: Address, + pub nonce: U256, + pub value: U256, + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + pub reserved: [U256; 4], + pub data: Vec, + pub signature: Vec, + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + pub factory_deps: Vec>, + pub paymaster_input: Vec, + pub reserved_dynamic: Vec, +} + +impl From for TransactionData { + fn from(execute_tx: Transaction) -> Self { + match &execute_tx.common_data { + ExecuteTransactionCommon::L2(common_data) => { + let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); + + let should_check_chain_id = if matches!( + common_data.transaction_type, + TransactionType::LegacyTransaction + ) { + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + + TransactionData { + tx_type: (common_data.transaction_type as u32) as u8, + from: execute_tx.initiator_account(), + to: execute_tx.execute.contract_address, + gas_limit: common_data.fee.gas_limit, + pubdata_price_limit: common_data.fee.gas_per_pubdata_limit, + max_fee_per_gas: common_data.fee.max_fee_per_gas, + max_priority_fee_per_gas: common_data.fee.max_priority_fee_per_gas, + paymaster: common_data.paymaster_params.paymaster, + nonce, + value: execute_tx.execute.value, + reserved: [ + should_check_chain_id, + U256::zero(), + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + signature: common_data.signature.clone(), + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: common_data.paymaster_params.paymaster_input.clone(), + reserved_dynamic: vec![], + } + } + ExecuteTransactionCommon::L1(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: L1_TX_TYPE, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.serial_id.0), // priority op ID + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: vec![], + reserved_dynamic: vec![], + } + } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: common_data.tx_format() as u8, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.upgrade_id as u16), + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps.unwrap_or_default(), + paymaster_input: vec![], + reserved_dynamic: vec![], + } + } + } + } +} + +impl TransactionData { + pub fn from(&self) -> Address { + self.from + } + + // This method is to be used only in tests, when we want to bypass the checks imposed + // on the bytecode hash. + pub(crate) fn abi_encode_with_custom_factory_deps( + self, + factory_deps_hashes: Vec, + ) -> Vec { + encode(&[Token::Tuple(vec![ + Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), + Token::Address(self.from), + Token::Address(self.to), + Token::Uint(self.gas_limit), + Token::Uint(self.pubdata_price_limit), + Token::Uint(self.max_fee_per_gas), + Token::Uint(self.max_priority_fee_per_gas), + Token::Address(self.paymaster), + Token::Uint(self.nonce), + Token::Uint(self.value), + Token::FixedArray(self.reserved.iter().copied().map(Token::Uint).collect()), + Token::Bytes(self.data), + Token::Bytes(self.signature), + Token::Array(factory_deps_hashes.into_iter().map(Token::Uint).collect()), + Token::Bytes(self.paymaster_input), + Token::Bytes(self.reserved_dynamic), + ])]) + } + + pub(crate) fn abi_encode(self) -> Vec { + let factory_deps_hashes = self + .factory_deps + .iter() + .map(|dep| h256_to_u256(hash_bytecode(dep))) + .collect(); + self.abi_encode_with_custom_factory_deps(factory_deps_hashes) + } + + pub fn into_tokens(self) -> Vec { + let bytes = self.abi_encode(); + assert!(bytes.len() % 32 == 0); + + bytes_to_be_words(bytes) + } + + pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { + // It is enforced by the protocol that the L1 transactions always pay the exact amount of gas per pubdata + // as was supplied in the transaction. + if self.tx_type == L1_TX_TYPE { + self.pubdata_price_limit.as_u32() + } else { + block_gas_price_per_pubdata + } + } + + pub fn overhead_gas(&self, block_gas_price_per_pubdata: u32) -> u32 { + let total_gas_limit = self.gas_limit.as_u32(); + let gas_price_per_pubdata = + self.effective_gas_price_per_pubdata(block_gas_price_per_pubdata); + + let encoded_len = encoding_len( + self.data.len() as u64, + self.signature.len() as u64, + self.factory_deps.len() as u64, + self.paymaster_input.len() as u64, + self.reserved_dynamic.len() as u64, + ); + + let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + get_amortized_overhead( + total_gas_limit, + gas_price_per_pubdata, + encoded_len, + coeficients, + ) + } + + pub fn trusted_gas_limit(&self, _block_gas_price_per_pubdata: u32) -> u32 { + self.gas_limit.as_u32() + } +} + +pub fn derive_overhead( + gas_limit: u32, + gas_price_per_pubdata: u32, + encoded_len: usize, + coeficients: OverheadCoeficients, +) -> u32 { + // Even if the gas limit is greater than the MAX_TX_ERGS_LIMIT, we assume that everything beyond MAX_TX_ERGS_LIMIT + // will be spent entirely on publishing bytecodes and so we derive the overhead solely based on the capped value + let gas_limit = std::cmp::min(MAX_TX_ERGS_LIMIT, gas_limit); + + // Using large U256 type to avoid overflow + let max_block_overhead = U256::from(block_overhead_gas(gas_price_per_pubdata)); + let gas_limit = U256::from(gas_limit); + let encoded_len = U256::from(encoded_len); + + // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance + // circuits. + let overhead_for_single_instance_circuits = + ceil_div_u256(gas_limit * max_block_overhead, MAX_TX_ERGS_LIMIT.into()); + + // The overhead for occupying the bootloader memory + let overhead_for_length = ceil_div_u256( + encoded_len * max_block_overhead, + BOOTLOADER_TX_ENCODING_SPACE.into(), + ); + + // The overhead for occupying a single tx slot + let tx_slot_overhead = ceil_div_u256(max_block_overhead, MAX_TXS_IN_BLOCK.into()); + + // We use "ceil" here for formal reasons to allow easier approach for calculating the overhead in O(1) + // let max_pubdata_in_tx = ceil_div_u256(gas_limit, gas_price_per_pubdata); + + // The maximal potential overhead from pubdata + // let pubdata_overhead = ceil_div_u256( + // max_pubdata_in_tx * max_block_overhead, + // MAX_PUBDATA_PER_BLOCK.into(), + // ); + + vec![ + (coeficients.ergs_limit_overhead_coeficient + * overhead_for_single_instance_circuits.as_u32() as f64) + .floor() as u32, + (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) + .floor() as u32, + (coeficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, + ] + .into_iter() + .max() + .unwrap() +} + +/// Contains the coeficients with which the overhead for transactions will be calculated. +/// All of the coeficients should be <= 1. There are here to provide a certain "discount" for normal transactions +/// at the risk of malicious transactions that may close the block prematurely. +/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coeficients.ergs_limit_overhead_coeficient` MUST +/// result in an integer number +#[derive(Debug, Clone, Copy)] +pub struct OverheadCoeficients { + slot_overhead_coeficient: f64, + bootloader_memory_overhead_coeficient: f64, + ergs_limit_overhead_coeficient: f64, +} + +impl OverheadCoeficients { + // This method ensures that the parameters keep the required invariants + fn new_checked( + slot_overhead_coeficient: f64, + bootloader_memory_overhead_coeficient: f64, + ergs_limit_overhead_coeficient: f64, + ) -> Self { + assert!( + (MAX_TX_ERGS_LIMIT as f64 / ergs_limit_overhead_coeficient).round() + == MAX_TX_ERGS_LIMIT as f64 / ergs_limit_overhead_coeficient, + "MAX_TX_ERGS_LIMIT / ergs_limit_overhead_coeficient must be an integer" + ); + + Self { + slot_overhead_coeficient, + bootloader_memory_overhead_coeficient, + ergs_limit_overhead_coeficient, + } + } + + // L1->L2 do not receive any discounts + fn new_l1() -> Self { + OverheadCoeficients::new_checked(1.0, 1.0, 1.0) + } + + fn new_l2() -> Self { + OverheadCoeficients::new_checked( + 1.0, 1.0, + // For L2 transactions we allow a certain default discount with regard to the number of ergs. + // Multiinstance circuits can in theory be spawned infinite times, while projected future limitations + // on gas per pubdata allow for roughly 800kk gas per L1 batch, so the rough trust "discount" on the proof's part + // to be paid by the users is 0.1. + 0.1, + ) + } + + pub fn from_tx_type(tx_type: u8) -> Self { + if tx_type == L1_TX_TYPE { + Self::new_l1() + } else { + Self::new_l2() + } + } +} + +/// This method returns the overhead for processing the block +pub fn get_amortized_overhead( + total_gas_limit: u32, + gas_per_pubdata_byte_limit: u32, + encoded_len: usize, + coeficients: OverheadCoeficients, +) -> u32 { + // Using large U256 type to prevent overflows. + let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); + let total_gas_limit = U256::from(total_gas_limit); + let encoded_len = U256::from(encoded_len); + + // Derivation of overhead consists of 4 parts: + // 1. The overhead for taking up a transaction's slot. (O1): O1 = 1 / MAX_TXS_IN_BLOCK + // 2. The overhead for taking up the bootloader's memory (O2): O2 = encoded_len / BOOTLOADER_TX_ENCODING_SPACE + // 3. The overhead for possible usage of pubdata. (O3): O3 = max_pubdata_in_tx / MAX_PUBDATA_PER_BLOCK + // 4. The overhead for possible usage of all the single-instance circuits. (O4): O4 = gas_limit / MAX_TX_ERGS_LIMIT + // + // The maximum of these is taken to derive the part of the block's overhead to be paid by the users: + // + // max_overhead = max(O1, O2, O3, O4) + // overhead_gas = ceil(max_overhead * overhead_for_block_gas). Thus, overhead_gas is a function of + // tx_gas_limit, gas_per_pubdata_byte_limit and encoded_len. + // + // While it is possible to derive the overhead with binary search in O(log n), it is too expensive to be done + // on L1, so here is a reference implementation of finding the overhead for transaction in O(1): + // + // Given total_gas_limit = tx_gas_limit + overhead_gas, we need to find overhead_gas and tx_gas_limit, such that: + // 1. overhead_gas is maximal possible (the operator is paid fairly) + // 2. overhead_gas(tx_gas_limit, gas_per_pubdata_byte_limit, encoded_len) >= overhead_gas (the user does not overpay) + // The third part boils to the following 4 inequalities (at least one of these must hold): + // ceil(O1 * overhead_for_block_gas) >= overhead_gas + // ceil(O2 * overhead_for_block_gas) >= overhead_gas + // ceil(O3 * overhead_for_block_gas) >= overhead_gas + // ceil(O4 * overhead_for_block_gas) >= overhead_gas + // + // Now, we need to solve each of these separately: + + // 1. The overhead for occupying a single tx slot is a constant: + let tx_slot_overhead = { + let tx_slot_overhead = + ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()).as_u32(); + (coeficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 + }; + + // 2. The overhead for occupying the bootloader memory can be derived from encoded_len + let overhead_for_length = { + let overhead_for_length = ceil_div_u256( + encoded_len * overhead_for_block_gas, + BOOTLOADER_TX_ENCODING_SPACE.into(), + ) + .as_u32(); + + (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() + as u32 + }; + + // 3. ceil(O3 * overhead_for_block_gas) >= overhead_gas + // O3 = max_pubdata_in_tx / MAX_PUBDATA_PER_BLOCK = ceil(gas_limit / gas_per_pubdata_byte_limit) / MAX_PUBDATA_PER_BLOCK + // >= (gas_limit / (gas_per_pubdata_byte_limit * MAX_PUBDATA_PER_BLOCK). Throwing off the `ceil`, while may provide marginally lower + // overhead to the operator, provides substantially easier formula to work with. + // + // For better clarity, let's denote gas_limit = GL, MAX_PUBDATA_PER_BLOCK = MP, gas_per_pubdata_byte_limit = EP, overhead_for_block_gas = OB, total_gas_limit = TL, overhead_gas = OE + // ceil(OB * (TL - OE) / (EP * MP)) >= OE + // + // OB * (TL - OE) / (MP * EP) > OE - 1 + // OB * (TL - OE) > (OE - 1) * EP * MP + // OB * TL + EP * MP > OE * EP * MP + OE * OB + // (OB * TL + EP * MP) / (EP * MP + OB) > OE + // OE = floor((OB * TL + EP * MP) / (EP * MP + OB)) with possible -1 if the division is without remainder + // let overhead_for_pubdata = { + // let numerator: U256 = overhead_for_block_gas * total_gas_limit + // + gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK); + // let denominator = + // gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK) + overhead_for_block_gas; + + // // Corner case: if `total_gas_limit` = `gas_per_pubdata_byte_limit` = 0 + // // then the numerator will be 0 and subtracting 1 will cause a panic, so we just return a zero. + // if numerator.is_zero() { + // 0.into() + // } else { + // (numerator - 1) / denominator + // } + // }; + + // 4. K * ceil(O4 * overhead_for_block_gas) >= overhead_gas, where K is the discount + // O4 = gas_limit / MAX_TX_ERGS_LIMIT. Using the notation from the previous equation: + // ceil(OB * GL / MAX_TX_ERGS_LIMIT) >= (OE / K) + // ceil(OB * (TL - OE) / MAX_TX_ERGS_LIMIT) >= (OE/K) + // OB * (TL - OE) / MAX_TX_ERGS_LIMIT > (OE/K) - 1 + // OB * (TL - OE) > (OE/K) * MAX_TX_ERGS_LIMIT - MAX_TX_ERGS_LIMIT + // OB * TL + MAX_TX_ERGS_LIMIT > OE * ( MAX_TX_ERGS_LIMIT/K + OB) + // OE = floor(OB * TL + MAX_TX_ERGS_LIMIT / (MAX_TX_ERGS_LIMIT/K + OB)), with possible -1 if the division is without remainder + let overhead_for_gas = { + let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); + let denominator: U256 = U256::from( + (MAX_TX_ERGS_LIMIT as f64 / coeficients.ergs_limit_overhead_coeficient) as u64, + ) + overhead_for_block_gas; + + let overhead_for_gas = (numerator - 1) / denominator; + + overhead_for_gas.as_u32() + }; + + let overhead = vec![tx_slot_overhead, overhead_for_length, overhead_for_gas] + .into_iter() + .max() + // For the sake of consistency making sure that total_gas_limit >= max_overhead + .map(|max_overhead| std::cmp::min(max_overhead, total_gas_limit.as_u32())) + .unwrap(); + + let limit_after_deducting_overhead = total_gas_limit - overhead; + + // During double checking of the overhead, the bootloader will assume that the + // body of the transaction does not have any more than MAX_L2_TX_GAS_LIMIT ergs available to it. + if limit_after_deducting_overhead.as_u64() > MAX_L2_TX_GAS_LIMIT { + // We derive the same overhead that would exist for the MAX_L2_TX_GAS_LIMIT ergs + derive_overhead( + MAX_L2_TX_GAS_LIMIT as u32, + gas_per_pubdata_byte_limit, + encoded_len.as_usize(), + coeficients, + ) + } else { + overhead + } +} + +pub(crate) fn block_overhead_gas(gas_per_pubdata_byte: u32) -> u32 { + BLOCK_OVERHEAD_GAS + BLOCK_OVERHEAD_PUBDATA * gas_per_pubdata_byte +} + +#[cfg(test)] +mod tests { + + use zksync_types::fee::encoding_len; + + use super::*; + + // This method returns the maximum block overhead that can be charged from the user based on the binary search approach + pub fn get_maximal_allowed_overhead_bin_search( + total_gas_limit: u32, + gas_per_pubdata_byte_limit: u32, + encoded_len: usize, + coeficients: OverheadCoeficients, + ) -> u32 { + let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { + total_gas_limit - MAX_TX_ERGS_LIMIT + } else { + 0u32 + }; + // Safe cast: the gas_limit for a transaction can not be larger than 2^32 + let mut right_bound = total_gas_limit; + + // The closure returns whether a certain overhead would be accepted by the bootloader. + // It is accepted if the derived overhead (i.e. the actual overhead that the user has to pay) + // is >= than the overhead proposed by the operator. + let is_overhead_accepted = |suggested_overhead: u32| { + let derived_overhead = derive_overhead( + total_gas_limit - suggested_overhead, + gas_per_pubdata_byte_limit, + encoded_len, + coeficients, + ); + + derived_overhead >= suggested_overhead + }; + + // In order to find the maximal allowed overhead we are doing binary search + while left_bound + 1 < right_bound { + let mid = (left_bound + right_bound) / 2; + + if is_overhead_accepted(mid) { + left_bound = mid; + } else { + right_bound = mid; + } + } + + if is_overhead_accepted(right_bound) { + right_bound + } else { + left_bound + } + } + + #[test] + fn test_correctness_for_efficient_overhead() { + let test_params = |total_gas_limit: u32, + gas_per_pubdata: u32, + encoded_len: usize, + coeficients: OverheadCoeficients| { + let result_by_efficient_search = + get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coeficients); + + let result_by_binary_search = get_maximal_allowed_overhead_bin_search( + total_gas_limit, + gas_per_pubdata, + encoded_len, + coeficients, + ); + + assert_eq!(result_by_efficient_search, result_by_binary_search); + }; + + // Some arbitrary test + test_params(60_000_000, 800, 2900, OverheadCoeficients::new_l2()); + + // Very small parameters + test_params(0, 1, 12, OverheadCoeficients::new_l2()); + + // Relatively big parameters + let max_tx_overhead = derive_overhead( + MAX_TX_ERGS_LIMIT, + 5000, + 10000, + OverheadCoeficients::new_l2(), + ); + test_params( + MAX_TX_ERGS_LIMIT + max_tx_overhead, + 5000, + 10000, + OverheadCoeficients::new_l2(), + ); + + test_params(115432560, 800, 2900, OverheadCoeficients::new_l1()); + } + + #[test] + fn test_consistency_with_encoding_length() { + let transaction = TransactionData { + tx_type: 113, + from: Address::random(), + to: Address::random(), + gas_limit: U256::from(1u32), + pubdata_price_limit: U256::from(1u32), + max_fee_per_gas: U256::from(1u32), + max_priority_fee_per_gas: U256::from(1u32), + paymaster: Address::random(), + nonce: U256::zero(), + value: U256::zero(), + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + reserved: [U256::zero(); 4], + data: vec![0u8; 65], + signature: vec![0u8; 75], + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + factory_deps: vec![vec![0u8; 32], vec![1u8; 32]], + paymaster_input: vec![0u8; 85], + reserved_dynamic: vec![0u8; 32], + }; + + let assumed_encoded_len = encoding_len(65, 75, 2, 85, 32); + + let true_encoding_len = transaction.into_tokens().len(); + + assert_eq!(assumed_encoded_len, true_encoding_len); + } +} diff --git a/core/multivm_deps/vm_m6/src/utils.rs b/core/multivm_deps/vm_m6/src/utils.rs new file mode 100644 index 000000000000..3718761cc107 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/utils.rs @@ -0,0 +1,285 @@ +use crate::glue::GlueInto; +use crate::history_recorder::HistoryMode; +use crate::{ + memory::SimpleMemory, oracles::tracer::PubdataSpentTracer, vm_with_bootloader::BlockContext, + VmInstance, +}; +use once_cell::sync::Lazy; + +use zk_evm::block_properties::BlockProperties; +use zk_evm::{ + aux_structures::{LogQuery, MemoryPage, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; +use zksync_config::constants::ZKPORTER_IS_AVAILABLE; +use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_types::{Address, StorageLogQuery, H160, MAX_L2_TX_GAS_LIMIT, U256}; +use zksync_utils::h256_to_u256; + +pub const INITIAL_TIMESTAMP: u32 = 1024; +pub const INITIAL_MEMORY_COUNTER: u32 = 2048; +pub const INITIAL_CALLDATA_PAGE: u32 = 7; +pub const INITIAL_BASE_PAGE: u32 = 8; +pub const ENTRY_POINT_PAGE: u32 = code_page_candidate_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; + +/// How many gas bootloader is allowed to spend within one block. +/// Note that this value doesn't correspond to the gas limit of any particular transaction +/// (except for the fact that, of course, gas limit for each transaction should be <= `BLOCK_GAS_LIMIT`). +pub const BLOCK_GAS_LIMIT: u32 = zk_evm::zkevm_opcode_defs::system_params::VM_INITIAL_FRAME_ERGS; +pub const ETH_CALL_GAS_LIMIT: u32 = MAX_L2_TX_GAS_LIMIT as u32; + +#[derive(Debug, Clone)] +pub enum VmExecutionResult { + Ok(Vec), + Revert(Vec), + Panic, + MostLikelyDidNotFinish(Address, u16), +} + +pub const fn code_page_candidate_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0) +} + +pub const fn stack_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 1) +} + +pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 2) +} + +pub const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 3) +} + +pub(crate) fn dump_memory_page_using_primitive_value( + memory: &SimpleMemory, + ptr: PrimitiveValue, +) -> Vec { + if !ptr.is_pointer { + return vec![]; + } + let fat_ptr = FatPointer::from_u256(ptr.value); + dump_memory_page_using_fat_pointer(memory, fat_ptr) +} + +pub(crate) fn dump_memory_page_using_fat_pointer( + memory: &SimpleMemory, + fat_ptr: FatPointer, +) -> Vec { + dump_memory_page_by_offset_and_length( + memory, + fat_ptr.memory_page, + (fat_ptr.start + fat_ptr.offset) as usize, + (fat_ptr.length - fat_ptr.offset) as usize, + ) +} + +pub(crate) fn dump_memory_page_by_offset_and_length( + memory: &SimpleMemory, + page: u32, + offset: usize, + length: usize, +) -> Vec { + assert!(offset < (1u32 << 24) as usize); + assert!(length < (1u32 << 24) as usize); + let mut dump = Vec::with_capacity(length); + if length == 0 { + return dump; + } + + let first_word = offset / 32; + let end_byte = offset + length; + let mut last_word = end_byte / 32; + if end_byte % 32 != 0 { + last_word += 1; + } + + let unalignment = offset % 32; + + let page_part = + memory.dump_page_content_as_u256_words(page, (first_word as u32)..(last_word as u32)); + + let mut is_first = true; + let mut remaining = length; + for word in page_part.into_iter() { + let it = word.into_be_iter(); + if is_first { + is_first = false; + let it = it.skip(unalignment); + for next in it { + if remaining > 0 { + dump.push(next); + remaining -= 1; + } + } + } else { + for next in it { + if remaining > 0 { + dump.push(next); + remaining -= 1; + } + } + } + } + + assert_eq!( + dump.len(), + length, + "tried to dump with offset {}, length {}, got a bytestring of length {}", + offset, + length, + dump.len() + ); + + dump +} + +pub trait FixedLengthIterator<'a, I: 'a, const N: usize>: Iterator +where + Self: 'a, +{ + fn next(&mut self) -> Option<::Item> { + ::next(self) + } +} + +pub trait IntoFixedLengthByteIterator { + type IntoIter: FixedLengthIterator<'static, u8, N>; + fn into_le_iter(self) -> Self::IntoIter; + fn into_be_iter(self) -> Self::IntoIter; +} + +pub struct FixedBufferValueIterator { + iter: std::array::IntoIter, +} + +impl Iterator for FixedBufferValueIterator { + type Item = T; + fn next(&mut self) -> Option { + self.iter.next() + } +} + +impl FixedLengthIterator<'static, T, N> + for FixedBufferValueIterator +{ +} + +impl IntoFixedLengthByteIterator<32> for U256 { + type IntoIter = FixedBufferValueIterator; + fn into_le_iter(self) -> Self::IntoIter { + let mut buffer = [0u8; 32]; + self.to_little_endian(&mut buffer); + + FixedBufferValueIterator { + iter: IntoIterator::into_iter(buffer), + } + } + + fn into_be_iter(self) -> Self::IntoIter { + let mut buffer = [0u8; 32]; + self.to_big_endian(&mut buffer); + + FixedBufferValueIterator { + iter: IntoIterator::into_iter(buffer), + } + } +} + +/// Collects storage log queries where `log.log_query.timestamp >= from_timestamp`. +/// Denote `n` to be the number of such queries, then it works in O(n). +pub fn collect_storage_log_queries_after_timestamp( + all_log_queries: &[StorageLogQuery], + from_timestamp: Timestamp, +) -> Vec { + let from_timestamp = from_timestamp.glue_into(); + all_log_queries + .iter() + .rev() + .take_while(|log_query| log_query.log_query.timestamp >= from_timestamp) + .cloned() + .collect::>() + .into_iter() + .rev() + .collect() +} + +/// Collects all log queries where `log_query.timestamp >= from_timestamp`. +/// Denote `n` to be the number of such queries, then it works in O(n). +pub fn collect_log_queries_after_timestamp( + all_log_queries: &[LogQuery], + from_timestamp: Timestamp, +) -> Vec { + all_log_queries + .iter() + .rev() + .take_while(|log_query| log_query.timestamp >= from_timestamp) + .cloned() + .collect::>() + .into_iter() + .rev() + .collect() +} + +/// Receives sorted slice of timestamps. +/// Returns count of timestamps that are greater than or equal to `from_timestamp`. +/// Works in O(log(sorted_timestamps.len())). +pub fn precompile_calls_count_after_timestamp( + sorted_timestamps: &[Timestamp], + from_timestamp: Timestamp, +) -> usize { + sorted_timestamps.len() - sorted_timestamps.partition_point(|t| *t < from_timestamp) +} + +pub static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub fn create_test_block_params() -> (BlockContext, BlockProperties) { + let context = BlockContext { + block_number: 1u32, + block_timestamp: 1000, + l1_gas_price: 50_000_000_000, // 50 gwei + fair_l2_gas_price: 250_000_000, // 0.25 gwei + operator_address: H160::zero(), + }; + + ( + context, + BlockProperties { + default_aa_code_hash: h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash), + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + }, + ) +} + +pub fn read_bootloader_test_code(test: &str) -> Vec { + read_zbin_bytecode(format!( + "etc/system-contracts/bootloader/tests/artifacts/{}.yul/{}.yul.zbin", + test, test + )) +} + +pub(crate) fn calculate_computational_gas_used, H: HistoryMode>( + vm: &VmInstance<'_, H>, + tracer: &T, + gas_remaining_before: u32, + spent_pubdata_counter_before: u32, +) -> u32 { + let total_gas_used = gas_remaining_before + .checked_sub(vm.gas_remaining()) + .expect("underflow"); + let gas_used_on_pubdata = + tracer.gas_spent_on_pubdata(&vm.state.local_state) - spent_pubdata_counter_before; + total_gas_used + .checked_sub(gas_used_on_pubdata) + .unwrap_or_else(|| { + vlog::error!( + "Gas used on pubdata is greater than total gas used. On pubdata: {}, total: {}", + gas_used_on_pubdata, + total_gas_used + ); + 0 + }) +} diff --git a/core/multivm_deps/vm_m6/src/vm.rs b/core/multivm_deps/vm_m6/src/vm.rs new file mode 100644 index 000000000000..d878122093a4 --- /dev/null +++ b/core/multivm_deps/vm_m6/src/vm.rs @@ -0,0 +1,1014 @@ +use std::convert::TryFrom; +use std::fmt::Debug; + +use zk_evm::aux_structures::Timestamp; +use zk_evm::vm_state::{PrimitiveValue, VmLocalState, VmState}; +use zk_evm::witness_trace::DummyTracer; +use zk_evm::zkevm_opcode_defs::decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}; +use zk_evm::zkevm_opcode_defs::definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; +use zksync_config::constants::MAX_TXS_IN_BLOCK; +use zksync_types::l2_to_l1_log::L2ToL1Log; +use zksync_types::tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}; +use zksync_types::vm_trace::{Call, VmExecutionTrace, VmTrace}; +use zksync_types::{L1BatchNumber, StorageLogQuery, VmEvent, U256}; +use zksync_utils::bytes_to_be_words; + +use crate::bootloader_state::BootloaderState; +use crate::errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}; +use crate::event_sink::InMemoryEventSink; +use crate::events::merge_events; +use crate::glue::GlueInto; +use crate::history_recorder::{HistoryEnabled, HistoryMode}; +use crate::memory::SimpleMemory; +use crate::oracles::decommitter::DecommitterOracle; +use crate::oracles::precompile::PrecompilesProcessorWithHistory; +use crate::oracles::storage::StorageOracle; +use crate::oracles::tracer::{ + BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, TransactionResultTracer, ValidationError, ValidationTracer, + ValidationTracerParams, +}; +use crate::oracles::OracleWithHistory; +use crate::utils::{ + calculate_computational_gas_used, collect_log_queries_after_timestamp, + collect_storage_log_queries_after_timestamp, dump_memory_page_using_primitive_value, + precompile_calls_count_after_timestamp, +}; +use crate::vm_with_bootloader::{ + BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, + OPERATOR_REFUNDS_OFFSET, +}; +use crate::Word; + +pub type ZkSyncVmState<'a, H> = VmState< + 'a, + StorageOracle<'a, H>, + SimpleMemory, + InMemoryEventSink, + PrecompilesProcessorWithHistory, + DecommitterOracle<'a, false, H>, + DummyTracer, +>; + +pub const MAX_MEM_SIZE_BYTES: u32 = 16777216; // 2^24 + +// Arbitrary space in memory closer to the end of the page +pub const RESULT_SUCCESS_FIRST_SLOT: u32 = + (MAX_MEM_SIZE_BYTES - (MAX_TXS_IN_BLOCK as u32) * 32) / 32; +// The slot that is used for tracking vm hooks +pub const VM_HOOK_POSITION: u32 = RESULT_SUCCESS_FIRST_SLOT - 1; +pub const VM_HOOK_PARAMS_COUNT: u32 = 2; +pub const VM_HOOK_PARAMS_START_POSITION: u32 = VM_HOOK_POSITION - VM_HOOK_PARAMS_COUNT; + +pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Vec { + memory.dump_page_content_as_u256_words( + BOOTLOADER_HEAP_PAGE, + VM_HOOK_PARAMS_START_POSITION..VM_HOOK_PARAMS_START_POSITION + VM_HOOK_PARAMS_COUNT, + ) +} + +/// MultiVM-specific addition. +/// +/// At different points in time, refunds were handled in a different way. +/// E.g., initially they were completely disabled. +/// +/// This enum allows to execute blocks with the same VM but different support for refunds. +#[derive(Debug)] +pub enum MultiVMSubversion { + /// Initial VM M6 version. + V1, + /// Bug with code compression was fixed. + V2, +} + +#[derive(Debug)] +pub struct VmInstance<'a, H: HistoryMode> { + pub gas_limit: u32, + pub state: ZkSyncVmState<'a, H>, + pub execution_mode: TxExecutionMode, + pub block_context: DerivedBlockContext, + pub(crate) bootloader_state: BootloaderState, + + pub snapshots: Vec, + pub vm_subversion: MultiVMSubversion, +} + +/// This structure stores data that accumulates during the VM run. +#[derive(Debug, PartialEq)] +pub struct VmExecutionResult { + pub events: Vec, + pub storage_log_queries: Vec, + pub used_contract_hashes: Vec, + pub l2_to_l1_logs: Vec, + pub return_data: Vec, + + /// Value denoting the amount of gas spent withing VM invocation. + /// Note that return value represents the difference between the amount of gas + /// available to VM before and after execution. + /// + /// It means, that depending on the context, `gas_used` may represent different things. + /// If VM is continuously invoked and interrupted after each tx, this field may represent the + /// amount of gas spent by a single transaction. + /// + /// To understand, which value does `gas_used` represent, see the documentation for the method + /// that you use to get `VmExecutionResult` object. + /// + /// Side note: this may sound confusing, but this arises from the nature of the bootloader: for it, + /// processing multiple transactions is a single action. We *may* intrude and stop VM once transaction + /// is executed, but it's not enforced. So best we can do is to calculate the amount of gas before and + /// after the invocation, leaving the interpretation of this value to the user. + pub gas_used: u32, + /// This value also depends on the context, the same as `gas_used`. + pub computational_gas_used: u32, + pub contracts_used: usize, + pub revert_reason: Option, + pub trace: VmTrace, + pub total_log_queries: usize, + pub cycles_used: u32, +} + +impl VmExecutionResult { + pub fn error_message(&self) -> Option { + self.revert_reason + .as_ref() + .map(|result| result.revert_reason.to_string()) + } +} + +#[derive(Debug, PartialEq)] +pub struct VmBlockResult { + /// Result for the whole block execution. + pub full_result: VmExecutionResult, + /// Result for the block tip execution. + pub block_tip_result: VmPartialExecutionResult, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct VmPartialExecutionResult { + pub logs: VmExecutionLogs, + pub revert_reason: Option, + pub contracts_used: usize, + pub cycles_used: u32, + pub computational_gas_used: u32, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct VmTxExecutionResult { + pub status: TxExecutionStatus, + pub result: VmPartialExecutionResult, + pub call_traces: Vec, + // Gas refunded to the user at the end of the transaction + pub gas_refunded: u32, + // Gas proposed by the operator to be refunded, before the postOp call. + // This value is needed to correctly recover memory of the bootloader. + pub operator_suggested_refund: u32, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum VmExecutionStopReason { + VmFinished, + TracerRequestedStop, +} + +use crate::utils::VmExecutionResult as NewVmExecutionResult; + +fn vm_may_have_ended_inner(vm: &ZkSyncVmState) -> Option { + let execution_has_ended = vm.execution_has_ended(); + + let r1 = vm.local_state.registers[RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER as usize]; + let current_address = vm.local_state.callstack.get_current_stack().this_address; + + let outer_eh_location = >::PcOrImm::MAX.as_u64(); + match ( + execution_has_ended, + vm.local_state.callstack.get_current_stack().pc.as_u64(), + ) { + (true, 0) => { + let returndata = dump_memory_page_using_primitive_value(vm.memory, r1); + + Some(NewVmExecutionResult::Ok(returndata)) + } + (false, _) => None, + (true, l) if l == outer_eh_location => { + // check r1,r2,r3 + if vm.local_state.flags.overflow_or_less_than_flag { + Some(NewVmExecutionResult::Panic) + } else { + let returndata = dump_memory_page_using_primitive_value(vm.memory, r1); + Some(NewVmExecutionResult::Revert(returndata)) + } + } + (_, a) => Some(NewVmExecutionResult::MostLikelyDidNotFinish( + current_address, + a as u16, + )), + } +} + +// This method returns `VmExecutionResult` struct, but some of the fields are left empty. +// +// `gas_before` argument is used to calculate the amount of gas spent by transaction. +// It is required because the same VM instance is continuously used to apply several transactions. +fn vm_may_have_ended( + vm: &VmInstance, + gas_before: u32, +) -> Option { + let basic_execution_result = vm_may_have_ended_inner(&vm.state)?; + + let gas_used = gas_before + .checked_sub(vm.gas_remaining()) + .expect("underflow"); + + match basic_execution_result { + NewVmExecutionResult::Ok(mut data) => { + while data.len() % 32 != 0 { + data.push(0) + } + Some(VmExecutionResult { + // The correct `events` value for this field should be set separately + // later on based on the information inside the event_sink oracle. + events: vec![], + storage_log_queries: vm.get_final_log_queries(), + used_contract_hashes: vm.get_used_contracts(), + l2_to_l1_logs: vec![], + return_data: bytes_to_be_words(data), + gas_used, + // The correct `computational_gas_used` value for this field should be set separately later. + computational_gas_used: 0, + contracts_used: vm + .state + .decommittment_processor + .get_used_bytecode_hashes() + .len(), + revert_reason: None, + trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), + total_log_queries: vm.state.event_sink.get_log_queries() + + vm.state.precompiles_processor.get_timestamp_history().len() + + vm.get_final_log_queries().len(), + cycles_used: vm.state.local_state.monotonic_cycle_counter, + }) + } + NewVmExecutionResult::Revert(data) => { + let revert_reason = VmRevertReasonParsingResult::new( + TxRevertReason::parse_error(data.as_slice()), + data, + ); + + // Check if error indicates a bug in server/vm/bootloader. + if matches!( + revert_reason.revert_reason, + TxRevertReason::UnexpectedVMBehavior(_) + ) { + vlog::error!( + "Observed error that should never happen: {:?}. Full VM data: {:?}", + revert_reason, + vm + ); + } + + Some(VmExecutionResult { + events: vec![], + storage_log_queries: vm.get_final_log_queries(), + used_contract_hashes: vm.get_used_contracts(), + l2_to_l1_logs: vec![], + return_data: vec![], + gas_used, + // The correct `computational_gas_used` value for this field should be set separately later. + computational_gas_used: 0, + contracts_used: vm + .state + .decommittment_processor + .get_used_bytecode_hashes() + .len(), + revert_reason: Some(revert_reason), + trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), + total_log_queries: vm.state.event_sink.get_log_queries() + + vm.state.precompiles_processor.get_timestamp_history().len() + + vm.get_final_log_queries().len(), + cycles_used: vm.state.local_state.monotonic_cycle_counter, + }) + } + // Panic is effectively the same as Revert, but has different nature. + NewVmExecutionResult::Panic => Some(VmExecutionResult { + events: vec![], + storage_log_queries: vec![], + used_contract_hashes: vec![], + l2_to_l1_logs: vec![], + return_data: vec![], + gas_used, + // The correct `computational_gas_used` value for this field should be set separately later. + computational_gas_used: 0, + contracts_used: vm + .state + .decommittment_processor + .get_used_bytecode_hashes() + .len(), + revert_reason: Some(VmRevertReasonParsingResult { + revert_reason: TxRevertReason::Unknown(VmRevertReason::VmError), + original_data: vec![], + }), + trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), + total_log_queries: vm.state.event_sink.get_log_queries() + + vm.state.precompiles_processor.get_timestamp_history().len() + + vm.get_final_log_queries().len(), + cycles_used: vm.state.local_state.monotonic_cycle_counter, + }), + NewVmExecutionResult::MostLikelyDidNotFinish(_, _) => { + // The execution has not ended yet. It should either continue + // or throw Out-of-gas error. + None + } + } +} + +/// A snapshot of the VM that holds enough information to +/// rollback the VM to some historical state. +#[derive(Debug, Clone)] +pub struct VmSnapshot { + local_state: VmLocalState, + bootloader_state: BootloaderState, +} + +impl VmInstance<'_, H> { + fn has_ended(&self) -> bool { + match vm_may_have_ended_inner(&self.state) { + None | Some(NewVmExecutionResult::MostLikelyDidNotFinish(_, _)) => false, + Some( + NewVmExecutionResult::Ok(_) + | NewVmExecutionResult::Revert(_) + | NewVmExecutionResult::Panic, + ) => true, + } + } + + fn revert_reason(&self) -> Option { + match vm_may_have_ended_inner(&self.state) { + None + | Some( + NewVmExecutionResult::MostLikelyDidNotFinish(_, _) | NewVmExecutionResult::Ok(_), + ) => None, + Some(NewVmExecutionResult::Revert(data)) => { + let revert_reason = VmRevertReasonParsingResult::new( + TxRevertReason::parse_error(data.as_slice()), + data, + ); + + // Check if error indicates a bug in server/vm/bootloader. + if matches!( + revert_reason.revert_reason, + TxRevertReason::UnexpectedVMBehavior(_) + ) { + vlog::error!( + "Observed error that should never happen: {:?}. Full VM data: {:?}", + revert_reason, + self + ); + } + + Some(revert_reason) + } + Some(NewVmExecutionResult::Panic) => Some(VmRevertReasonParsingResult { + revert_reason: TxRevertReason::Unknown(VmRevertReason::VmError), + original_data: vec![], + }), + } + } + + /// Removes the latest snapshot without rollbacking to it. + /// This function expects that there is at least one snapshot present. + pub fn pop_snapshot_no_rollback(&mut self) { + self.snapshots.pop().unwrap(); + } + + /// Returns the amount of gas remaining to the VM. + /// Note that this *does not* correspond to the gas limit of a transaction. + /// To calculate the amount of gas spent by transaction, you should call this method before and after + /// the execution, and subtract these values. + /// + /// Note: this method should only be called when either transaction is fully completed or VM completed + /// its execution. Remaining gas value is read from the current stack frame, so if you'll attempt to + /// read it during the transaction execution, you may receive invalid value. + pub(crate) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining + } + + /// Returns the amount of gas consumed by the VM so far (based on the `gas_limit` provided + /// to initiate the virtual machine). + /// + /// Note: this method should only be called when either transaction is fully completed or VM completed + /// its execution. Remaining gas value is read from the current stack frame, so if you'll attempt to + /// read it during the transaction execution, you may receive invalid value. + pub fn gas_consumed(&self) -> u32 { + self.gas_limit + .checked_sub(self.gas_remaining()) + .expect("underflow") + } + + pub(crate) fn collect_events_and_l1_logs_after_timestamp( + &self, + from_timestamp: Timestamp, + ) -> (Vec, Vec) { + let (raw_events, l1_messages) = self + .state + .event_sink + .get_events_and_l2_l1_logs_after_timestamp(from_timestamp); + let events = merge_events(raw_events) + .into_iter() + .map(|e| e.into_vm_event(L1BatchNumber(self.block_context.context.block_number))) + .collect(); + ( + events, + l1_messages + .into_iter() + .map(|log| { + L2ToL1Log::from(GlueInto::< + zksync_types::zk_evm::reference_impls::event_sink::EventMessage, + >::glue_into(log)) + }) + .collect(), + ) + } + + fn collect_execution_logs_after_timestamp(&self, from_timestamp: Timestamp) -> VmExecutionLogs { + let storage_logs = collect_storage_log_queries_after_timestamp( + self.state.storage.frames_stack.forward().current_frame(), + from_timestamp, + ); + let storage_logs_count = storage_logs.len(); + + let (events, l2_to_l1_logs) = + self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + + let log_queries = collect_log_queries_after_timestamp( + self.state.event_sink.frames_stack.forward().current_frame(), + from_timestamp, + ); + + let precompile_calls_count = precompile_calls_count_after_timestamp( + self.state.precompiles_processor.timestamp_history.inner(), + from_timestamp, + ); + VmExecutionLogs { + storage_logs, + events, + l2_to_l1_logs, + total_log_queries_count: storage_logs_count + + log_queries.len() + + precompile_calls_count, + } + } + + /// Executes VM until the end or tracer says to stop. + /// Returns a tuple of `VmExecutionStopReason` and the size of the refund proposed by the operator + fn execute_with_custom_tracer_and_refunds< + T: ExecutionEndTracer + + PendingRefundTracer + + PubdataSpentTracer + + StorageInvocationTracer, + >( + &mut self, + tracer: &mut T, + ) -> (VmExecutionStopReason, u32) { + let mut operator_refund = None; + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; + + loop { + // Sanity check: we should never reach the maximum value, because then we won't be able to process the next cycle. + assert_ne!( + self.state.local_state.monotonic_cycle_counter, + u32::MAX, + "VM reached maximum possible amount of cycles. Vm state: {:?}", + self.state + ); + + let timestamp_before_cycle = self.state.local_state.timestamp; + self.state.cycle(tracer); + + if self.has_ended() { + return ( + VmExecutionStopReason::VmFinished, + operator_refund.unwrap_or_default(), + ); + } + + // This means that the bootloader has informed the system (usually via VMHooks) - that some gas + // should be refunded back (see askOperatorForRefund in bootloader.yul for details). + if let Some(bootloader_refund) = tracer.requested_refund() { + assert!( + operator_refund.is_none(), + "Operator was asked for refund two times" + ); + + let gas_spent_on_pubdata = tracer.gas_spent_on_pubdata(&self.state.local_state) + - spent_pubdata_counter_before; + let tx_body_refund = + self.tx_body_refund(timestamp_initial, bootloader_refund, gas_spent_on_pubdata); + + if tx_body_refund < bootloader_refund { + vlog::error!( + "Suggested tx body refund is less than bootloader refund. Tx body refund: {}, bootloader refund: {}", + tx_body_refund, + bootloader_refund + ); + } + + let refund_to_propose = tx_body_refund + + self.block_overhead_refund( + timestamp_initial, + gas_remaining_before, + gas_spent_on_pubdata, + ); + + let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + let refund_slot = OPERATOR_REFUNDS_OFFSET + current_tx_index; + + // Writing the refund into memory + self.state.memory.memory.write_to_memory( + BOOTLOADER_HEAP_PAGE as usize, + refund_slot, + PrimitiveValue { + value: refund_to_propose.into(), + is_pointer: false, + }, + Timestamp(timestamp_before_cycle), + ); + operator_refund = Some(refund_to_propose); + tracer.set_refund_as_done(); + + let tx_gas_limit = self.get_tx_gas_limit(current_tx_index); + + if tx_gas_limit < bootloader_refund { + vlog::error!( + "Tx gas limit is less than bootloader refund. Tx gas limit: {}, bootloader refund: {}", + tx_gas_limit, + bootloader_refund + ); + } + if tx_gas_limit < refund_to_propose { + vlog::error!( + "Tx gas limit is less than operator refund. Tx gas limit: {}, operator refund: {}", + tx_gas_limit, + refund_to_propose + ); + } + + metrics::histogram!("vm.refund", bootloader_refund as f64 / tx_gas_limit as f64 * 100.0, "type" => "bootloader"); + metrics::histogram!("vm.refund", refund_to_propose as f64 / tx_gas_limit as f64 * 100.0, "type" => "operator"); + metrics::histogram!( + "vm.refund.diff", + (refund_to_propose as f64 - bootloader_refund as f64) / tx_gas_limit as f64 + * 100.0 + ); + } + + tracer.set_missed_storage_invocations( + self.state + .storage + .storage + .get_ptr() + .borrow() + .missed_storage_invocations(), + ); + + if tracer.should_stop_execution() { + return ( + VmExecutionStopReason::TracerRequestedStop, + operator_refund.unwrap_or_default(), + ); + } + } + } + + // Executes VM until the end or tracer says to stop. + pub(crate) fn execute_with_custom_tracer< + T: ExecutionEndTracer + + PendingRefundTracer + + PubdataSpentTracer + + StorageInvocationTracer, + >( + &mut self, + tracer: &mut T, + ) -> VmExecutionStopReason { + self.execute_with_custom_tracer_and_refunds(tracer).0 + } + + /// Executes the VM until the end of the next transaction. + /// Panics if there are no new transactions in bootloader. + /// Internally uses the OneTxTracer to stop the VM when the last opcode from the transaction is reached. + // Err when transaction is rejected. + // Ok(status: TxExecutionStatus::Success) when the transaction succeeded + // Ok(status: TxExecutionStatus::Failure) when the transaction failed. + // Note that failed transactions are considered properly processed and are included in blocks + pub fn execute_next_tx( + &mut self, + validation_computational_gas_limit: u32, + with_call_tracer: bool, + ) -> Result { + let tx_index = self.bootloader_state.next_unexecuted_tx() as u32; + + let mut tx_tracer: OneTxTracer = + OneTxTracer::new(validation_computational_gas_limit, with_call_tracer); + + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let cycles_initial = self.state.local_state.monotonic_cycle_counter; + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; + + let (stop_reason, operator_suggested_refund) = + self.execute_with_custom_tracer_and_refunds(&mut tx_tracer); + match stop_reason { + VmExecutionStopReason::VmFinished => { + // Bootloader resulted in panic or revert, this means either the transaction is rejected + // (e.g. not enough fee or incorrect signature) or bootloader is out of gas. + + // Collect generated events to show bootloader debug logs. + let _ = self.collect_events_and_l1_logs_after_timestamp(timestamp_initial); + + let error = if tx_tracer.is_bootloader_out_of_gas() { + TxRevertReason::BootloaderOutOfGas + } else { + self.revert_reason() + .expect("vm ended execution prematurely, but no revert reason is given") + .revert_reason + }; + Err(error) + } + VmExecutionStopReason::TracerRequestedStop => { + if tx_tracer.tx_has_been_processed() { + let tx_execution_status = + TxExecutionStatus::from_has_failed(tx_has_failed(&self.state, tx_index)); + let vm_execution_logs = + self.collect_execution_logs_after_timestamp(timestamp_initial); + + let computational_gas_used = calculate_computational_gas_used( + self, + &tx_tracer, + gas_remaining_before, + spent_pubdata_counter_before, + ); + + Ok(VmTxExecutionResult { + gas_refunded: tx_tracer.refund_gas, + operator_suggested_refund, + status: tx_execution_status, + result: VmPartialExecutionResult { + logs: vm_execution_logs, + // If there is a revert Err is already returned above. + revert_reason: None, + // getting contracts used during this transaction + // at least for now the number returned here is always <= to the number + // of the code hashes actually used by the transaction, since it might've + // reused bytecode hashes from some of the previous ones. + contracts_used: self + .state + .decommittment_processor + .get_decommitted_bytecodes_after_timestamp(timestamp_initial), + cycles_used: self.state.local_state.monotonic_cycle_counter + - cycles_initial, + computational_gas_used, + }, + call_traces: tx_tracer.call_traces(), + }) + } else if tx_tracer.validation_run_out_of_gas() { + Err(TxRevertReason::ValidationFailed(VmRevertReason::General { + msg: format!( + "Took too many computational gas, allowed limit: {}", + validation_computational_gas_limit + ), + data: vec![], + })) + } else { + // VM ended up in state + // `stop_reason == VmExecutionStopReason::TracerRequestedStop && !tx_tracer.tx_has_been_processed() && !tx_tracer.validation_run_out_of_gas()`. + // It means that bootloader successfully finished its execution without executing the transaction. + // It is an unexpected situation. + panic!("VM successfully finished executing bootloader but transaction wasn't executed"); + } + } + } + } + + /// Returns full VM result and partial result produced within the current execution. + pub fn execute_till_block_end(&mut self, job_type: BootloaderJobType) -> VmBlockResult { + self.execute_till_block_end_with_tracer( + job_type, + &mut TransactionResultTracer::new(self.execution_mode.invocation_limit(), false), + ) + } + + pub fn execute_till_block_end_with_call_tracer( + &mut self, + job_type: BootloaderJobType, + ) -> VmBlockResult { + let mut tracer = TransactionResultTracer::new(self.execution_mode.invocation_limit(), true); + let mut block_result = self.execute_till_block_end_with_tracer(job_type, &mut tracer); + block_result.full_result.trace = VmTrace::CallTrace(tracer.call_trace().unwrap()); + block_result + } + + fn execute_till_block_end_with_tracer( + &mut self, + job_type: BootloaderJobType, + tx_result_tracer: &mut TransactionResultTracer, + ) -> VmBlockResult { + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let cycles_initial = self.state.local_state.monotonic_cycle_counter; + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; + + let stop_reason = self.execute_with_custom_tracer(tx_result_tracer); + match stop_reason { + VmExecutionStopReason::VmFinished => { + let mut full_result = vm_may_have_ended(self, gas_remaining_before).unwrap(); + + let computational_gas_used = calculate_computational_gas_used( + self, + tx_result_tracer, + gas_remaining_before, + spent_pubdata_counter_before, + ); + + if job_type == BootloaderJobType::TransactionExecution + && tx_has_failed(&self.state, 0) + && full_result.revert_reason.is_none() + { + let revert_reason = tx_result_tracer + .revert_reason + .clone() + .map(|reason| { + let vm_revert_reason = VmRevertReason::try_from(reason.as_slice()) + .unwrap_or_else(|_| VmRevertReason::Unknown { + function_selector: vec![], + data: reason.clone(), + }); + + VmRevertReasonParsingResult { + revert_reason: TxRevertReason::TxReverted(vm_revert_reason), + original_data: reason, + } + }) + .unwrap_or_else(|| VmRevertReasonParsingResult { + revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { + msg: "Transaction reverted with empty reason. Possibly out of gas" + .to_string(), + data: vec![], + }), + original_data: vec![], + }); + + full_result.revert_reason = Some(revert_reason); + } + + let block_tip_result = VmPartialExecutionResult { + logs: self.collect_execution_logs_after_timestamp(timestamp_initial), + revert_reason: full_result.revert_reason.clone().map(|r| r.revert_reason), + contracts_used: self + .state + .decommittment_processor + .get_decommitted_bytecodes_after_timestamp(timestamp_initial), + cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + computational_gas_used, + }; + + // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` + // after because draining will drop timestamps. + let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + full_result.events = merge_events(raw_events) + .into_iter() + .map(|e| { + e.into_vm_event(L1BatchNumber(self.block_context.context.block_number)) + }) + .collect(); + full_result.l2_to_l1_logs = l1_messages + .into_iter() + .map(|log| { + L2ToL1Log::from(GlueInto::< + zksync_types::zk_evm::reference_impls::event_sink::EventMessage, + >::glue_into(log)) + }) + .collect(); + full_result.computational_gas_used = block_tip_result.computational_gas_used; + VmBlockResult { + full_result, + block_tip_result, + } + } + VmExecutionStopReason::TracerRequestedStop => { + metrics::increment_counter!("runtime_context.execution.dropped"); + + if tx_result_tracer.is_limit_reached() { + VmBlockResult { + // Normally tracer should never stop, but if it's transaction call and it consumes + // too much requests to memory, we stop execution and return error. + full_result: VmExecutionResult { + events: vec![], + storage_log_queries: vec![], + used_contract_hashes: vec![], + l2_to_l1_logs: vec![], + return_data: vec![], + gas_used: 0, + computational_gas_used: 0, + contracts_used: 0, + revert_reason: Some(VmRevertReasonParsingResult { + revert_reason: TxRevertReason::MissingInvocationLimitReached, + original_data: vec![], + }), + trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), + total_log_queries: 0, + cycles_used: 0, + }, + block_tip_result: VmPartialExecutionResult { + logs: Default::default(), + revert_reason: Some(TxRevertReason::MissingInvocationLimitReached), + contracts_used: 0, + cycles_used: 0, + computational_gas_used: 0, + }, + } + } else { + unreachable!( + "Tracer should never stop execution, except MissingInvocationLimitReached" + ); + } + } + } + } + + /// Unlike `execute_till_block_end` methods returns only result for the block tip execution. + pub fn execute_block_tip(&mut self) -> VmPartialExecutionResult { + let timestamp_initial = Timestamp(self.state.local_state.timestamp); + let cycles_initial = self.state.local_state.monotonic_cycle_counter; + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; + let mut bootloader_tracer: BootloaderTracer = BootloaderTracer::default(); + + let stop_reason = self.execute_with_custom_tracer(&mut bootloader_tracer); + let revert_reason = match stop_reason { + VmExecutionStopReason::VmFinished => { + // Bootloader panicked or reverted. + let revert_reason = if bootloader_tracer.is_bootloader_out_of_gas() { + TxRevertReason::BootloaderOutOfGas + } else { + self.revert_reason() + .expect("vm ended execution prematurely, but no revert reason is given") + .revert_reason + }; + Some(revert_reason) + } + VmExecutionStopReason::TracerRequestedStop => { + // Bootloader finished successfully. + None + } + }; + + let computational_gas_used = calculate_computational_gas_used( + self, + &bootloader_tracer, + gas_remaining_before, + spent_pubdata_counter_before, + ); + VmPartialExecutionResult { + logs: self.collect_execution_logs_after_timestamp(timestamp_initial), + revert_reason, + contracts_used: self + .state + .decommittment_processor + .get_decommitted_bytecodes_after_timestamp(timestamp_initial), + cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + computational_gas_used, + } + } + + pub fn execute_validation( + &mut self, + validation_params: ValidationTracerParams, + ) -> Result<(), ValidationError> { + let mut validation_tracer: ValidationTracer = ValidationTracer::new( + self.state.storage.storage.inner().get_ptr(), + validation_params, + ); + + let stop_reason = self.execute_with_custom_tracer(&mut validation_tracer); + + match (stop_reason, validation_tracer.validation_error) { + (VmExecutionStopReason::VmFinished, _) => { + // The tx should only end in case of a revert, so it is safe to unwrap here + Err(ValidationError::FailedTx(self.revert_reason().unwrap())) + } + (VmExecutionStopReason::TracerRequestedStop, Some(err)) => { + Err(ValidationError::VioalatedRule(err)) + } + (VmExecutionStopReason::TracerRequestedStop, None) => Ok(()), + } + } + + // returns Some only when there is just one frame in execution trace. + fn get_final_log_queries(&self) -> Vec { + assert_eq!( + self.state.storage.frames_stack.len(), + 1, + "VM finished execution in unexpected state" + ); + + self.state + .storage + .frames_stack + .forward() + .current_frame() + .to_vec() + } + + /// Returns the keys of contracts that are already loaded (known) by bootloader. + pub(crate) fn get_used_contracts(&self) -> Vec { + self.state + .decommittment_processor + .decommitted_code_hashes + .inner() + .keys() + .cloned() + .collect() + } + + pub fn number_of_updated_storage_slots(&self) -> usize { + self.state + .storage + .storage + .inner() + .get_ptr() + .borrow_mut() + .number_of_updated_storage_slots() + } +} + +impl VmInstance<'_, HistoryEnabled> { + /// Saves the snapshot of the current state of the VM that can be used + /// to roll back its state later on. + pub fn save_current_vm_as_snapshot(&mut self) { + self.snapshots.push(VmSnapshot { + // Vm local state contains O(1) various parameters (registers/etc). + // The only "expensive" copying here is copying of the callstack. + // It will take O(callstack_depth) to copy it. + // So it is generally recommended to get snapshots of the bootloader frame, + // where the depth is 1. + local_state: self.state.local_state.clone(), + bootloader_state: self.bootloader_state.clone(), + }); + } + + fn rollback_to_snapshot(&mut self, snapshot: VmSnapshot) { + let VmSnapshot { + local_state, + bootloader_state, + } = snapshot; + + let timestamp = Timestamp(local_state.timestamp); + + vlog::trace!("Rolling back decomitter"); + self.state + .decommittment_processor + .rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back event_sink"); + self.state.event_sink.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back storage"); + self.state.storage.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back memory"); + self.state.memory.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back precompiles_processor"); + self.state + .precompiles_processor + .rollback_to_timestamp(timestamp); + self.state.local_state = local_state; + self.bootloader_state = bootloader_state; + } + + /// Rollbacks the state of the VM to the state of the latest snapshot. + pub fn rollback_to_latest_snapshot(&mut self) { + let snapshot = self.snapshots.last().cloned().unwrap(); + self.rollback_to_snapshot(snapshot); + } + + /// Rollbacks the state of the VM to the state of the latest snapshot. + /// Removes that snapshot from the list. + pub fn rollback_to_latest_snapshot_popping(&mut self) { + let snapshot = self.snapshots.pop().unwrap(); + self.rollback_to_snapshot(snapshot); + } +} + +// Reads the bootloader memory and checks whether the execution step of the transaction +// has failed. +pub(crate) fn tx_has_failed(state: &ZkSyncVmState<'_, H>, tx_id: u32) -> bool { + let mem_slot = RESULT_SUCCESS_FIRST_SLOT + tx_id; + let mem_value = state + .memory + .read_slot(BOOTLOADER_HEAP_PAGE as usize, mem_slot as usize) + .value; + + mem_value == U256::zero() +} diff --git a/core/multivm_deps/vm_m6/src/vm_with_bootloader.rs b/core/multivm_deps/vm_m6/src/vm_with_bootloader.rs new file mode 100644 index 000000000000..011352c701cc --- /dev/null +++ b/core/multivm_deps/vm_m6/src/vm_with_bootloader.rs @@ -0,0 +1,913 @@ +use std::{collections::HashMap, time::Instant}; + +use zk_evm::{ + abstractions::{MAX_HEAP_PAGE_SIZE_IN_WORDS, MAX_MEMORY_BYTES}, + aux_structures::{MemoryPage, Timestamp}, + block_properties::BlockProperties, + vm_state::{CallStackEntry, PrimitiveValue, VmState}, + zkevm_opcode_defs::{ + system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, FatPointer, BOOTLOADER_BASE_PAGE, + BOOTLOADER_CALLDATA_PAGE, STARTING_BASE_PAGE, STARTING_TIMESTAMP, + }, +}; +use zksync_config::constants::MAX_TXS_IN_BLOCK; +use zksync_contracts::BaseSystemContracts; + +use zksync_types::{ + zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, BOOTLOADER_ADDRESS, + L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, +}; +use zksync_utils::{ + address_to_u256, + bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, + bytes_to_be_words, h256_to_u256, + misc::ceil_div, +}; + +use crate::{ + bootloader_state::BootloaderState, + history_recorder::HistoryMode, + transaction_data::{TransactionData, L1_TX_TYPE}, + utils::{ + code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, + }, + vm::{MultiVMSubversion, ZkSyncVmState}, + OracleTools, VmInstance, +}; + +pub const BLOCK_OVERHEAD_GAS: u32 = 1200000; +pub const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; +pub const BLOCK_OVERHEAD_PUBDATA: u32 = BLOCK_OVERHEAD_L1_GAS / L1_GAS_PER_PUBDATA_BYTE; + +pub const MAX_BLOCK_MULTIINSTANCE_GAS_LIMIT: u32 = 300_000_000; + +/// `BlockContext` is a structure that contains parameters for +/// a block that are used as input for the bootloader and not the VM per se. +/// +/// These values are generally unique for each block (the exception is the operator's address). +#[derive(Clone, Debug, Copy)] +pub struct BlockContext { + pub block_number: u32, + pub block_timestamp: u64, + pub operator_address: Address, + pub l1_gas_price: u64, + pub fair_l2_gas_price: u64, +} + +impl BlockContext { + pub fn block_gas_price_per_pubdata(&self) -> u64 { + derive_base_fee_and_gas_per_pubdata(self.l1_gas_price, self.fair_l2_gas_price).1 + } +} + +/// Besides the raw values from the `BlockContext`, contains the values that are to be derived +/// from the other values +#[derive(Debug, Copy, Clone)] +pub struct DerivedBlockContext { + pub context: BlockContext, + pub base_fee: u64, +} + +pub(crate) fn eth_price_per_pubdata_byte(l1_gas_price: u64) -> u64 { + // This value will typically be a lot less than u64 + // unless the gas price on L1 goes beyond tens of millions of gwei + l1_gas_price * (L1_GAS_PER_PUBDATA_BYTE as u64) +} + +pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { + let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); + + ceil_div(eth_price_per_pubdata_byte, base_fee) +} + +pub fn derive_base_fee_and_gas_per_pubdata(l1_gas_price: u64, fair_gas_price: u64) -> (u64, u64) { + let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); + + // The baseFee is set in such a way that it is always possible for a transaction to + // publish enough public data while compensating us for it. + let base_fee = std::cmp::max( + fair_gas_price, + ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + ); + + ( + base_fee, + base_fee_to_gas_per_pubdata(l1_gas_price, base_fee), + ) +} + +impl From for DerivedBlockContext { + fn from(context: BlockContext) -> Self { + let base_fee = + derive_base_fee_and_gas_per_pubdata(context.l1_gas_price, context.fair_l2_gas_price).0; + + DerivedBlockContext { context, base_fee } + } +} + +// The first 32 slots are reserved for debugging purposes +pub const DEBUG_SLOTS_OFFSET: usize = 8; +pub const DEBUG_FIRST_SLOTS: usize = 32; +// The next 33 slots are reserved for dealing with the paymaster context (1 slot for storing length + 32 slots for storing the actual context). +pub const PAYMASTER_CONTEXT_SLOTS: usize = 32 + 1; +// The next PAYMASTER_CONTEXT_SLOTS + 7 slots free slots are needed before each tx, so that the +// postOp operation could be encoded correctly. +pub const MAX_POSTOP_SLOTS: usize = PAYMASTER_CONTEXT_SLOTS + 7; + +// Slots used to store the current L2 transaction's hash and the hash recommended +// to be used for signing the transaction's content. +const CURRENT_L2_TX_HASHES_SLOTS: usize = 2; + +// Slots used to store the calldata for the KnownCodesStorage to mark new factory +// dependencies as known ones. Besides the slots for the new factory dependencies themselves +// another 4 slots are needed for: selector, marker of whether the user should pay for the pubdata, +// the offset for the encoding of the array as well as the length of the array. +pub const NEW_FACTORY_DEPS_RESERVED_SLOTS: usize = MAX_NEW_FACTORY_DEPS + 4; + +// The operator can provide for each transaction the proposed minimal refund +pub const OPERATOR_REFUNDS_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub const OPERATOR_REFUNDS_OFFSET: usize = DEBUG_SLOTS_OFFSET + + DEBUG_FIRST_SLOTS + + PAYMASTER_CONTEXT_SLOTS + + CURRENT_L2_TX_HASHES_SLOTS + + NEW_FACTORY_DEPS_RESERVED_SLOTS; + +pub const TX_OVERHEAD_OFFSET: usize = OPERATOR_REFUNDS_OFFSET + OPERATOR_REFUNDS_SLOTS; +pub const TX_OVERHEAD_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub const TX_TRUSTED_GAS_LIMIT_OFFSET: usize = TX_OVERHEAD_OFFSET + TX_OVERHEAD_SLOTS; +pub const TX_TRUSTED_GAS_LIMIT_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub const COMPRESSED_BYTECODES_OFFSET: usize = + TX_TRUSTED_GAS_LIMIT_OFFSET + TX_TRUSTED_GAS_LIMIT_SLOTS; +pub const COMPRESSED_BYTECODES_SLOTS: usize = 32768; + +pub const BOOTLOADER_TX_DESCRIPTION_OFFSET: usize = + COMPRESSED_BYTECODES_OFFSET + COMPRESSED_BYTECODES_SLOTS; + +// The size of the bootloader memory dedicated to the encodings of transactions +pub const BOOTLOADER_TX_ENCODING_SPACE: u32 = + (MAX_HEAP_PAGE_SIZE_IN_WORDS - TX_DESCRIPTION_OFFSET - MAX_TXS_IN_BLOCK) as u32; + +// Size of the bootloader tx description in words +pub const BOOTLOADER_TX_DESCRIPTION_SIZE: usize = 2; + +// The actual descriptions of transactions should start after the minor descriptions and a MAX_POSTOP_SLOTS +// free slots to allow postOp encoding. +pub const TX_DESCRIPTION_OFFSET: usize = BOOTLOADER_TX_DESCRIPTION_OFFSET + + BOOTLOADER_TX_DESCRIPTION_SIZE * MAX_TXS_IN_BLOCK + + MAX_POSTOP_SLOTS; + +pub const TX_GAS_LIMIT_OFFSET: usize = 4; + +pub(crate) const BOOTLOADER_HEAP_PAGE: u32 = heap_page_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; +const BOOTLOADER_CODE_PAGE: u32 = code_page_candidate_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; + +/// Enum denoting the *in-server* execution mode for the bootloader transactions. +/// +/// If `EthCall` mode is chosen, the bootloader will use `mimicCall` opcode +/// to simulate the call instead of using the standard `execute` method of account. +/// This is needed to be able to behave equivalently to Ethereum without much overhead for custom account builders. +/// With `VerifyExecute` mode, transaction will be executed normally. +/// With `EstimateFee`, the bootloader will be used that has the same behavior +/// as the full `VerifyExecute` block, but errors in the account validation will be ignored. +#[derive(Debug, Clone, Copy)] +pub enum TxExecutionMode { + VerifyExecute, + EstimateFee { + missed_storage_invocation_limit: usize, + }, + EthCall { + missed_storage_invocation_limit: usize, + }, +} + +impl TxExecutionMode { + pub fn invocation_limit(&self) -> usize { + match self { + Self::VerifyExecute => usize::MAX, + TxExecutionMode::EstimateFee { + missed_storage_invocation_limit, + } => *missed_storage_invocation_limit, + TxExecutionMode::EthCall { + missed_storage_invocation_limit, + } => *missed_storage_invocation_limit, + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum BootloaderJobType { + TransactionExecution, + BlockPostprocessing, +} + +impl Default for TxExecutionMode { + fn default() -> Self { + Self::VerifyExecute + } +} + +pub fn init_vm<'a, H: HistoryMode>( + vm_subversion: MultiVMSubversion, + oracle_tools: &'a mut OracleTools<'a, false, H>, + block_context: BlockContextMode, + block_properties: &'a BlockProperties, + execution_mode: TxExecutionMode, + base_system_contract: &BaseSystemContracts, +) -> Box> { + init_vm_with_gas_limit( + vm_subversion, + oracle_tools, + block_context, + block_properties, + execution_mode, + base_system_contract, + BLOCK_GAS_LIMIT, + ) +} + +pub fn init_vm_with_gas_limit<'a, H: HistoryMode>( + vm_subversion: MultiVMSubversion, + oracle_tools: &'a mut OracleTools<'a, false, H>, + block_context: BlockContextMode, + block_properties: &'a BlockProperties, + execution_mode: TxExecutionMode, + base_system_contract: &BaseSystemContracts, + gas_limit: u32, +) -> Box> { + init_vm_inner( + vm_subversion, + oracle_tools, + block_context, + block_properties, + gas_limit, + base_system_contract, + execution_mode, + ) +} + +#[derive(Debug, Clone, Copy)] +// The block.number/block.timestamp data are stored in the CONTEXT_SYSTEM_CONTRACT. +// The bootloader can support execution in two modes: +// - "NewBlock" when the new block is created. It is enforced that the block.number is incremented by 1 +// and the timestamp is non-decreasing. Also, the L2->L1 message used to verify the correctness of the previous root hash is sent. +// This is the mode that should be used in the state keeper. +// - "OverrideCurrent" when we need to provide custom block.number and block.timestamp. ONLY to be used in testing/ethCalls. +pub enum BlockContextMode { + NewBlock(DerivedBlockContext, U256), + OverrideCurrent(DerivedBlockContext), +} + +impl BlockContextMode { + const OPERATOR_ADDRESS_SLOT: usize = 0; + const PREV_BLOCK_HASH_SLOT: usize = 1; + const NEW_BLOCK_TIMESTAMP_SLOT: usize = 2; + const NEW_BLOCK_NUMBER_SLOT: usize = 3; + const L1_GAS_PRICE_SLOT: usize = 4; + const FAIR_L2_GAS_PRICE_SLOT: usize = 5; + const EXPECTED_BASE_FEE_SLOT: usize = 6; + const SHOULD_SET_NEW_BLOCK_SLOT: usize = 7; + + // Returns the previous block hash and timestamp fields that should be used by the bootloader. + // If the timestamp is 0, then the bootloader will not attempt to start a new block + // and will continue using the existing block properties. + fn bootloader_block_params(&self) -> Vec<(usize, U256)> { + let DerivedBlockContext { context, base_fee } = self.inner_block_context(); + + let mut base_params: HashMap = vec![ + ( + Self::OPERATOR_ADDRESS_SLOT, + address_to_u256(&context.operator_address), + ), + (Self::PREV_BLOCK_HASH_SLOT, Default::default()), + ( + Self::NEW_BLOCK_TIMESTAMP_SLOT, + U256::from(context.block_timestamp), + ), + ( + Self::NEW_BLOCK_NUMBER_SLOT, + U256::from(context.block_number), + ), + (Self::L1_GAS_PRICE_SLOT, U256::from(context.l1_gas_price)), + ( + Self::FAIR_L2_GAS_PRICE_SLOT, + U256::from(context.fair_l2_gas_price), + ), + (Self::EXPECTED_BASE_FEE_SLOT, U256::from(base_fee)), + (Self::SHOULD_SET_NEW_BLOCK_SLOT, U256::from(0u32)), + ] + .into_iter() + .collect(); + + match *self { + BlockContextMode::OverrideCurrent(_) => base_params.into_iter().collect(), + BlockContextMode::NewBlock(_, prev_block_hash) => { + base_params.insert(Self::PREV_BLOCK_HASH_SLOT, prev_block_hash); + base_params.insert(Self::SHOULD_SET_NEW_BLOCK_SLOT, U256::from(1u32)); + base_params.into_iter().collect() + } + } + } + + pub fn inner_block_context(&self) -> DerivedBlockContext { + match *self { + BlockContextMode::OverrideCurrent(props) => props, + BlockContextMode::NewBlock(props, _) => props, + } + } + + pub fn timestamp(&self) -> u64 { + self.inner_block_context().context.block_timestamp + } +} + +// This method accepts a custom bootloader code. +// It should be used only in tests. +pub fn init_vm_inner<'a, H: HistoryMode>( + vm_subversion: MultiVMSubversion, + oracle_tools: &'a mut OracleTools<'a, false, H>, + block_context: BlockContextMode, + block_properties: &'a BlockProperties, + gas_limit: u32, + base_system_contract: &BaseSystemContracts, + execution_mode: TxExecutionMode, +) -> Box> { + let start = Instant::now(); + + oracle_tools.decommittment_processor.populate( + vec![( + h256_to_u256(base_system_contract.default_aa.hash), + base_system_contract.default_aa.code.clone(), + )], + Timestamp(0), + ); + + oracle_tools.memory.populate( + vec![( + BOOTLOADER_CODE_PAGE, + base_system_contract.bootloader.code.clone(), + )], + Timestamp(0), + ); + + oracle_tools.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + bootloader_initial_memory(&block_context), + Timestamp(0), + ); + + let state = get_default_local_state(oracle_tools, block_properties, gas_limit); + + let vm = Box::new(VmInstance { + gas_limit, + state, + execution_mode, + block_context: block_context.inner_block_context(), + bootloader_state: BootloaderState::new(), + snapshots: Vec::new(), + vm_subversion, + }); + + metrics::histogram!("server.vm.init", start.elapsed()); + vm +} + +fn bootloader_initial_memory(block_properties: &BlockContextMode) -> Vec<(usize, U256)> { + block_properties.bootloader_block_params() +} + +pub fn get_bootloader_memory( + vm_subversion: MultiVMSubversion, + txs: Vec, + predefined_refunds: Vec, + predefined_compressed_bytecodes: Vec>, + execution_mode: TxExecutionMode, + block_context: BlockContextMode, +) -> Vec<(usize, U256)> { + match vm_subversion { + MultiVMSubversion::V1 => get_bootloader_memory_v1( + txs, + predefined_refunds, + predefined_compressed_bytecodes, + execution_mode, + block_context, + ), + MultiVMSubversion::V2 => get_bootloader_memory_v2( + txs, + predefined_refunds, + predefined_compressed_bytecodes, + execution_mode, + block_context, + ), + } +} + +// Initial version of the function. +// Contains a bug in bytecode compression. +fn get_bootloader_memory_v1( + txs: Vec, + predefined_refunds: Vec, + predefined_compressed_bytecodes: Vec>, + execution_mode: TxExecutionMode, + block_context: BlockContextMode, +) -> Vec<(usize, U256)> { + let inner_context = block_context.inner_block_context().context; + + let block_gas_price_per_pubdata = inner_context.block_gas_price_per_pubdata(); + + let mut memory = bootloader_initial_memory(&block_context); + + let mut previous_compressed: usize = 0; + let mut already_included_txs_size = 0; + for (tx_index_in_block, tx) in txs.into_iter().enumerate() { + let compressed_bytecodes = predefined_compressed_bytecodes[tx_index_in_block].clone(); + + let mut total_compressed_len = 0; + for i in compressed_bytecodes.iter() { + total_compressed_len += i.encode_call().len() + } + + let memory_for_current_tx = get_bootloader_memory_for_tx( + tx.clone(), + tx_index_in_block, + execution_mode, + already_included_txs_size, + predefined_refunds[tx_index_in_block], + block_gas_price_per_pubdata as u32, + previous_compressed, + compressed_bytecodes, + ); + + previous_compressed += total_compressed_len; + + memory.extend(memory_for_current_tx); + let encoded_struct = tx.into_tokens(); + let encoding_length = encoded_struct.len(); + already_included_txs_size += encoding_length; + } + memory +} + +// Version with the bug fixed +fn get_bootloader_memory_v2( + txs: Vec, + predefined_refunds: Vec, + predefined_compressed_bytecodes: Vec>, + execution_mode: TxExecutionMode, + block_context: BlockContextMode, +) -> Vec<(usize, U256)> { + let inner_context = block_context.inner_block_context().context; + + let block_gas_price_per_pubdata = inner_context.block_gas_price_per_pubdata(); + + let mut memory = bootloader_initial_memory(&block_context); + + let mut previous_compressed: usize = 0; + let mut already_included_txs_size = 0; + for (tx_index_in_block, tx) in txs.into_iter().enumerate() { + let compressed_bytecodes = predefined_compressed_bytecodes[tx_index_in_block].clone(); + + let mut total_compressed_len_words = 0; + for i in compressed_bytecodes.iter() { + total_compressed_len_words += i.encode_call().len() / 32; + } + + let memory_for_current_tx = get_bootloader_memory_for_tx( + tx.clone(), + tx_index_in_block, + execution_mode, + already_included_txs_size, + predefined_refunds[tx_index_in_block], + block_gas_price_per_pubdata as u32, + previous_compressed, + compressed_bytecodes, + ); + + previous_compressed += total_compressed_len_words; + + memory.extend(memory_for_current_tx); + let encoded_struct = tx.into_tokens(); + let encoding_length = encoded_struct.len(); + already_included_txs_size += encoding_length; + } + memory +} + +pub fn push_transaction_to_bootloader_memory( + vm: &mut VmInstance, + tx: &Transaction, + execution_mode: TxExecutionMode, + explicit_compressed_bytecodes: Option>, +) { + let tx: TransactionData = tx.clone().into(); + let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); + let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); + push_raw_transaction_to_bootloader_memory( + vm, + tx, + execution_mode, + overhead, + explicit_compressed_bytecodes, + ); +} + +pub fn push_raw_transaction_to_bootloader_memory( + vm: &mut VmInstance, + tx: TransactionData, + execution_mode: TxExecutionMode, + predefined_overhead: u32, + explicit_compressed_bytecodes: Option>, +) { + match vm.vm_subversion { + MultiVMSubversion::V1 => push_raw_transaction_to_bootloader_memory_v1( + vm, + tx, + execution_mode, + predefined_overhead, + explicit_compressed_bytecodes, + ), + MultiVMSubversion::V2 => push_raw_transaction_to_bootloader_memory_v2( + vm, + tx, + execution_mode, + predefined_overhead, + explicit_compressed_bytecodes, + ), + } +} + +/// Contains a bug in the bytecode compression. +fn push_raw_transaction_to_bootloader_memory_v1( + vm: &mut VmInstance, + tx: TransactionData, + execution_mode: TxExecutionMode, + predefined_overhead: u32, + explicit_compressed_bytecodes: Option>, +) { + let tx_index_in_block = vm.bootloader_state.free_tx_index(); + let already_included_txs_size = vm.bootloader_state.free_tx_offset(); + + let timestamp = Timestamp(vm.state.local_state.timestamp); + let codes_for_decommiter = tx + .factory_deps + .iter() + .map(|dep| bytecode_to_factory_dep(dep.clone())) + .collect(); + + let compressed_bytecodes = explicit_compressed_bytecodes.unwrap_or_else(|| { + if tx.tx_type == L1_TX_TYPE { + // L1 transactions do not need compression + return vec![]; + } + + tx.factory_deps + .iter() + .filter_map(|bytecode| { + if vm + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(bytecode)) + { + return None; + } + + compress_bytecode(bytecode) + .ok() + .map(|compressed| CompressedBytecodeInfo { + original: bytecode.clone(), + compressed, + }) + }) + .collect() + }); + let compressed_len = compressed_bytecodes + .iter() + .map(|bytecode| bytecode.compressed.len()) + .sum(); + + vm.state + .decommittment_processor + .populate(codes_for_decommiter, timestamp); + + let block_gas_price_per_pubdata = vm.block_context.context.block_gas_price_per_pubdata(); + let trusted_ergs_limit = tx.trusted_gas_limit(block_gas_price_per_pubdata as u32); + let encoded_tx = tx.into_tokens(); + let encoded_tx_size = encoded_tx.len(); + + let previous_bytecodes = vm.bootloader_state.get_compressed_bytecodes(); + + let bootloader_memory = get_bootloader_memory_for_encoded_tx( + encoded_tx, + tx_index_in_block, + execution_mode, + already_included_txs_size, + 0, + predefined_overhead, + trusted_ergs_limit, + previous_bytecodes, + compressed_bytecodes, + ); + + vm.state.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + bootloader_memory, + Timestamp(vm.state.local_state.timestamp), + ); + vm.bootloader_state.add_tx_data(encoded_tx_size); + vm.bootloader_state.add_compressed_bytecode(compressed_len); +} + +// Bytecode compression bug fixed +fn push_raw_transaction_to_bootloader_memory_v2( + vm: &mut VmInstance, + tx: TransactionData, + execution_mode: TxExecutionMode, + predefined_overhead: u32, + explicit_compressed_bytecodes: Option>, +) { + let tx_index_in_block = vm.bootloader_state.free_tx_index(); + let already_included_txs_size = vm.bootloader_state.free_tx_offset(); + + let timestamp = Timestamp(vm.state.local_state.timestamp); + let codes_for_decommiter = tx + .factory_deps + .iter() + .map(|dep| bytecode_to_factory_dep(dep.clone())) + .collect(); + + let compressed_bytecodes = explicit_compressed_bytecodes.unwrap_or_else(|| { + if tx.tx_type == L1_TX_TYPE { + // L1 transactions do not need compression + return vec![]; + } + + tx.factory_deps + .iter() + .filter_map(|bytecode| { + if vm + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(bytecode)) + { + return None; + } + + compress_bytecode(bytecode) + .ok() + .map(|compressed| CompressedBytecodeInfo { + original: bytecode.clone(), + compressed, + }) + }) + .collect() + }); + let compressed_bytecodes_encoding_len_words = compressed_bytecodes + .iter() + .map(|bytecode| { + let encoding_length_bytes = bytecode.encode_call().len(); + assert!( + encoding_length_bytes % 32 == 0, + "ABI encoding of bytecode is not 32-byte aligned" + ); + + encoding_length_bytes / 32 + }) + .sum(); + + vm.state + .decommittment_processor + .populate(codes_for_decommiter, timestamp); + + let block_gas_price_per_pubdata = vm.block_context.context.block_gas_price_per_pubdata(); + let trusted_ergs_limit = tx.trusted_gas_limit(block_gas_price_per_pubdata as u32); + let encoded_tx = tx.into_tokens(); + let encoded_tx_size = encoded_tx.len(); + + let previous_bytecodes = vm.bootloader_state.get_compressed_bytecodes(); + + let bootloader_memory = get_bootloader_memory_for_encoded_tx( + encoded_tx, + tx_index_in_block, + execution_mode, + already_included_txs_size, + 0, + predefined_overhead, + trusted_ergs_limit, + previous_bytecodes, + compressed_bytecodes, + ); + + vm.state.memory.populate_page( + BOOTLOADER_HEAP_PAGE as usize, + bootloader_memory, + Timestamp(vm.state.local_state.timestamp), + ); + vm.bootloader_state.add_tx_data(encoded_tx_size); + vm.bootloader_state + .add_compressed_bytecode(compressed_bytecodes_encoding_len_words); +} + +#[allow(clippy::too_many_arguments)] +fn get_bootloader_memory_for_tx( + tx: TransactionData, + tx_index_in_block: usize, + execution_mode: TxExecutionMode, + already_included_txs_size: usize, + predefined_refund: u32, + block_gas_per_pubdata: u32, + previous_compressed_bytecode_size: usize, + compressed_bytecodes: Vec, +) -> Vec<(usize, U256)> { + let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); + let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); + get_bootloader_memory_for_encoded_tx( + tx.into_tokens(), + tx_index_in_block, + execution_mode, + already_included_txs_size, + predefined_refund, + overhead_gas, + trusted_gas_limit, + previous_compressed_bytecode_size, + compressed_bytecodes, + ) +} + +#[allow(clippy::too_many_arguments)] +pub(crate) fn get_bootloader_memory_for_encoded_tx( + encoded_tx: Vec, + tx_index_in_block: usize, + execution_mode: TxExecutionMode, + already_included_txs_size: usize, + predefined_refund: u32, + predefined_overhead: u32, + trusted_gas_limit: u32, + previous_compressed_bytecode_size: usize, + compressed_bytecodes: Vec, +) -> Vec<(usize, U256)> { + let mut memory: Vec<(usize, U256)> = Vec::default(); + let bootloader_description_offset = + BOOTLOADER_TX_DESCRIPTION_OFFSET + BOOTLOADER_TX_DESCRIPTION_SIZE * tx_index_in_block; + + let tx_description_offset = TX_DESCRIPTION_OFFSET + already_included_txs_size; + + // Marking that this transaction should be executed. + memory.push(( + bootloader_description_offset, + assemble_tx_meta(execution_mode, true), + )); + memory.push(( + bootloader_description_offset + 1, + U256::from_big_endian(&(32 * tx_description_offset).to_be_bytes()), + )); + + let refund_offset = OPERATOR_REFUNDS_OFFSET + tx_index_in_block; + memory.push((refund_offset, predefined_refund.into())); + + let overhead_offset = TX_OVERHEAD_OFFSET + tx_index_in_block; + memory.push((overhead_offset, predefined_overhead.into())); + + let trusted_gas_limit_offset = TX_TRUSTED_GAS_LIMIT_OFFSET + tx_index_in_block; + memory.push((trusted_gas_limit_offset, trusted_gas_limit.into())); + + // Now we need to actually put the transaction description: + let encoding_length = encoded_tx.len(); + memory.extend((tx_description_offset..tx_description_offset + encoding_length).zip(encoded_tx)); + + // Note, +1 is moving for poitner + let compressed_bytecodes_offset = + COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; + + let memory_addition: Vec<_> = compressed_bytecodes + .into_iter() + .flat_map(|x| x.encode_call()) + .collect(); + + let memory_addition = bytes_to_be_words(memory_addition); + + memory.extend( + (compressed_bytecodes_offset..compressed_bytecodes_offset + memory_addition.len()) + .zip(memory_addition), + ); + + memory +} + +fn get_default_local_state<'a, H: HistoryMode>( + tools: &'a mut OracleTools<'a, false, H>, + block_properties: &'a BlockProperties, + gas_limit: u32, +) -> ZkSyncVmState<'a, H> { + let mut vm = VmState::empty_state( + &mut tools.storage, + &mut tools.memory, + &mut tools.event_sink, + &mut tools.precompiles_processor, + &mut tools.decommittment_processor, + &mut tools.witness_tracer, + block_properties, + ); + // Override ergs limit for the initial frame. + vm.local_state.callstack.current.ergs_remaining = gas_limit; + + let initial_context = CallStackEntry { + this_address: BOOTLOADER_ADDRESS, + msg_sender: Address::zero(), + code_address: BOOTLOADER_ADDRESS, + base_memory_page: MemoryPage(BOOTLOADER_BASE_PAGE), + code_page: MemoryPage(BOOTLOADER_CODE_PAGE), + sp: 0, + pc: 0, + // Note, that since the results are written at the end of the memory + // it is needed to have the entire heap available from the beginning + heap_bound: MAX_MEMORY_BYTES as u32, + aux_heap_bound: MAX_MEMORY_BYTES as u32, + exception_handler_location: INITIAL_FRAME_FORMAL_EH_LOCATION, + ergs_remaining: gas_limit, + this_shard_id: 0, + caller_shard_id: 0, + code_shard_id: 0, + is_static: false, + is_local_frame: false, + context_u128_value: 0, + }; + + // We consider the contract that is being run as a bootloader + vm.push_bootloader_context(INITIAL_MONOTONIC_CYCLE_COUNTER - 1, initial_context); + vm.local_state.timestamp = STARTING_TIMESTAMP; + vm.local_state.memory_page_counter = STARTING_BASE_PAGE; + vm.local_state.monotonic_cycle_counter = INITIAL_MONOTONIC_CYCLE_COUNTER; + vm.local_state.current_ergs_per_pubdata_byte = 0; + vm.local_state.registers[0] = formal_calldata_abi(); + + // Deleting all the historical records brought by the initial + // initialization of the VM to make them permanent. + vm.decommittment_processor.delete_history(); + vm.event_sink.delete_history(); + vm.storage.delete_history(); + vm.memory.delete_history(); + vm.precompiles_processor.delete_history(); + + vm +} + +fn formal_calldata_abi() -> PrimitiveValue { + let fat_pointer = FatPointer { + offset: 0, + memory_page: BOOTLOADER_CALLDATA_PAGE, + start: 0, + length: 0, + }; + + PrimitiveValue { + value: fat_pointer.to_u256(), + is_pointer: true, + } +} + +pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { + let bytecode_hash = hash_bytecode(&bytecode); + let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); + + let bytecode_words = bytes_to_be_words(bytecode); + + (bytecode_hash, bytecode_words) +} + +/// Forms a word that contains meta information for the transaction execution. +/// +/// # Current layout +/// +/// - 0 byte (MSB): server-side tx execution mode +/// In the server, we may want to execute different parts of the transaction in the different context +/// For example, when checking validity, we don't want to actually execute transaction and have side effects. +/// +/// Possible values: +/// - 0x00: validate & execute (normal mode) +/// - 0x01: validate but DO NOT execute +/// - 0x02: execute but DO NOT validate +/// +/// - 31 byte (LSB): whether to execute transaction or not (at all). +fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool) -> U256 { + let mut output = [0u8; 32]; + + // Set 0 byte (execution mode) + output[0] = match execution_mode { + TxExecutionMode::VerifyExecute => 0x00, + TxExecutionMode::EstimateFee { .. } => 0x00, + TxExecutionMode::EthCall { .. } => 0x02, + }; + + // Set 31 byte (marker for tx execution) + output[31] = u8::from(execute_tx); + + U256::from_big_endian(&output) +} diff --git a/core/tests/cross_external_nodes_checker/Cargo.toml b/core/tests/cross_external_nodes_checker/Cargo.toml index 111be3349ee1..74763b9d4af7 100644 --- a/core/tests/cross_external_nodes_checker/Cargo.toml +++ b/core/tests/cross_external_nodes_checker/Cargo.toml @@ -14,7 +14,6 @@ publish = false # We don't want to publish our binaries. zksync_types = { path = "../../lib/types", version = "1.0" } zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0" } zksync_utils = { path = "../../lib/utils", version = "1.0" } -prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } serde_json = "1.0" diff --git a/core/tests/cross_external_nodes_checker/README.md b/core/tests/cross_external_nodes_checker/README.md index 238568ac94e8..b6df859d9062 100644 --- a/core/tests/cross_external_nodes_checker/README.md +++ b/core/tests/cross_external_nodes_checker/README.md @@ -19,7 +19,7 @@ Run the server ``` zk init -zk server --components api,tree_lightweight,eth,data_fetcher,state_keeper +zk server --components api,tree,eth,data_fetcher,state_keeper ``` Run the EN diff --git a/core/tests/cross_external_nodes_checker/src/checker.rs b/core/tests/cross_external_nodes_checker/src/checker.rs index d12d9d780c4f..215b8c802b56 100644 --- a/core/tests/cross_external_nodes_checker/src/checker.rs +++ b/core/tests/cross_external_nodes_checker/src/checker.rs @@ -1,13 +1,17 @@ -use crate::config::{CheckerConfig, RpcMode}; use std::{ cmp::Ordering::{Equal, Greater, Less}, collections::HashMap, fmt::Debug, time::Duration, }; + +use serde_json::Value; +use tokio::{sync::watch::Receiver, time::sleep}; + use zksync_types::{ - api::BlockNumber, explorer_api::BlockDetails, web3::types::U64, L1BatchNumber, MiniblockNumber, - H256, + api::{BlockDetails, BlockNumber, L1BatchDetails}, + web3::types::U64, + L1BatchNumber, MiniblockNumber, H256, }; use zksync_utils::wait_for_tasks::wait_for_tasks; use zksync_web3_decl::{ @@ -15,16 +19,14 @@ use zksync_web3_decl::{ jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, namespaces::{EnNamespaceClient, EthNamespaceClient, ZksNamespaceClient}, types::FilterBuilder, + RpcResult, }; +use crate::config::{CheckerConfig, RpcMode}; use crate::{ divergence::{Divergence, DivergenceDetails}, helpers::compare_json, }; -use serde_json::Value; -use tokio::{sync::watch::Receiver, time::sleep}; -use zksync_types::explorer_api::L1BatchDetails; -use zksync_web3_decl::RpcResult; #[derive(Debug, Clone)] pub struct Checker { diff --git a/core/tests/loadnext/Cargo.toml b/core/tests/loadnext/Cargo.toml index 94da57df0f0f..aa759318dfea 100644 --- a/core/tests/loadnext/Cargo.toml +++ b/core/tests/loadnext/Cargo.toml @@ -29,7 +29,6 @@ tokio = { version = "1", features = ["full"] } futures = "0.3" anyhow = "1.0" rand = { version = "0.8", features = ["small_rng"] } -rand_distr = "0.4.3" envy = "0.4" hex = "0.4" static_assertions = "1.1" diff --git a/core/tests/loadnext/src/account/explorer_api_executor.rs b/core/tests/loadnext/src/account/explorer_api_executor.rs deleted file mode 100644 index a1886bc1c501..000000000000 --- a/core/tests/loadnext/src/account/explorer_api_executor.rs +++ /dev/null @@ -1,328 +0,0 @@ -use futures::{stream, TryStreamExt}; -use rand::{seq::SliceRandom, Rng}; -use rand_distr::{Distribution, Normal}; -use reqwest::{Response, StatusCode}; -use serde::{de::DeserializeOwned, Deserialize}; - -use std::{cmp, str, time::Instant}; - -use zksync::error::ClientError; -use zksync_types::explorer_api::{ - BlocksQuery, PaginationDirection, PaginationQuery, TransactionsQuery, -}; -use zksync_types::{Address, MiniblockNumber, H256}; - -use super::{Aborted, AccountLifespan}; -use crate::{ - command::{ExplorerApiRequest, ExplorerApiRequestType}, - config::RequestLimiters, - constants::API_REQUEST_TIMEOUT, - report::{ActionType, ReportBuilder, ReportLabel}, -}; - -#[derive(Debug, Clone)] -pub struct ExplorerApiClient { - client: reqwest::Client, - base_url: String, - last_sealed_block_number: Option, -} - -impl ExplorerApiClient { - pub fn new(base_url: String) -> Self { - Self { - client: reqwest::Client::default(), - base_url, - last_sealed_block_number: None, - } - } -} - -#[derive(Debug, Deserialize)] -pub struct NetworkStats { - pub last_sealed: MiniblockNumber, - pub last_verified: MiniblockNumber, - pub total_transactions: usize, -} - -// Client for explorer api, we don't use return values anywhere, so we return just json -impl ExplorerApiClient { - async fn response_to_result( - response: Response, - ) -> anyhow::Result> { - match response.status() { - StatusCode::OK => Ok(Some(response.json().await?)), - StatusCode::NOT_FOUND => Ok(None), - code => { - let body = response.bytes().await; - let body_str = if let Ok(body) = &body { - str::from_utf8(body).ok().filter(|body| body.len() < 1_024) - } else { - None - }; - let (body_sep, body_str) = match body_str { - Some(s) => (", body: ", s), - None => ("", ""), - }; - Err(anyhow::anyhow!( - "Unexpected status code: {code}{body_sep}{body_str}" - )) - } - } - } - - pub async fn network_stats(&mut self) -> anyhow::Result> { - let url = format!("{}/network_stats", &self.base_url); - let response = self.client.get(url).send().await?; - let result: anyhow::Result> = Self::response_to_result(response).await; - if let Ok(Some(stats)) = result.as_ref() { - self.last_sealed_block_number = Some(stats.last_sealed); - } - result - } - - pub async fn blocks(&self, query: BlocksQuery) -> anyhow::Result> { - let url = format!("{}/blocks", &self.base_url); - let response = self.client.get(url).query(&query).send().await?; - Self::response_to_result(response).await - } - - pub async fn block(&self, number: u32) -> anyhow::Result> { - let url = format!("{}/block/{}", &self.base_url, number); - let response = self.client.get(url).send().await?; - Self::response_to_result(response).await - } - - pub async fn transaction(&self, hash: &H256) -> anyhow::Result> { - let url = format!("{}/transaction/{:?}", &self.base_url, hash); - let response = self.client.get(url).send().await?; - Self::response_to_result(response).await - } - - pub async fn transactions( - &self, - query: TransactionsQuery, - ) -> anyhow::Result> { - let url = format!("{}/transactions", &self.base_url); - let response = self.client.get(url).query(&query).send().await?; - Self::response_to_result(response).await - } - - pub async fn account(&self, address: &Address) -> anyhow::Result> { - let url = format!("{}/account/{address:?}", &self.base_url); - let response = self.client.get(url).send().await?; - Self::response_to_result(response).await - } - - pub async fn contract(&self, address: &Address) -> anyhow::Result> { - let url = format!("{}/contract/{address:?}", &self.base_url); - let response = self.client.get(url).send().await?; - Self::response_to_result(response).await - } - - pub async fn token(&self, address: &Address) -> anyhow::Result> { - let url = format!("{}/token/{address:?}", &self.base_url); - let response = self.client.get(url).send().await?; - Self::response_to_result(response).await - } -} - -impl AccountLifespan { - async fn execute_explorer_api_request( - &mut self, - request: ExplorerApiRequest, - ) -> Result<(), anyhow::Error> { - let request_result = tokio::time::timeout( - API_REQUEST_TIMEOUT, - self.execute_explorer_api_request_inner(request), - ) - .await; - - match request_result { - Ok(result) => result.map_err(Into::into), - Err(_) => Err(ClientError::OperationTimeout)?, - } - } - - fn random_existing_block(&self) -> Option { - self.explorer_client.last_sealed_block_number.map(|number| { - let num = rand::thread_rng().gen_range(0..number.0); - MiniblockNumber(num) - }) - } - - async fn execute_explorer_api_request_inner( - &mut self, - request: ExplorerApiRequest, - ) -> Result<(), anyhow::Error> { - let ExplorerApiRequest { request_type } = request; - - match request_type { - ExplorerApiRequestType::NetworkStats => { - self.explorer_client.network_stats().await.map(drop) - } - ExplorerApiRequestType::Blocks => { - let from_block = self.random_existing_block(); - // Offset should be less than `last_block - from_block`, otherwise no blocks - // will be returned for the request. - let mut pagination = Self::random_pagination(); - pagination.offset = if let Some(from_block) = from_block { - let last_block = self.explorer_client.last_sealed_block_number.unwrap(); - cmp::min(pagination.offset, (last_block.0 - from_block.0) as usize) - } else { - 0 - }; - - self.explorer_client - .blocks(BlocksQuery { - from: from_block, - pagination, - }) - .await - .map(drop) - } - ExplorerApiRequestType::Block => { - let block = self.random_existing_block().map(|b| *b).unwrap_or(1); - self.explorer_client.block(block).await.map(drop) - } - ExplorerApiRequestType::Account => self - .explorer_client - .account(&self.wallet.wallet.address()) - .await - .map(drop), - ExplorerApiRequestType::Transaction => { - let tx = self - .successfully_sent_txs - .read() - .await - .choose(&mut self.wallet.rng) - .copied() - .expect("We skip such requests if success_tx is empty"); - self.explorer_client.transaction(&tx).await.map(drop) - } - ExplorerApiRequestType::Contract => { - let contract = self - .wallet - .deployed_contract_address - .get() - .expect("We skip such requests if contract is none"); - self.explorer_client.contract(contract).await.map(drop) - } - ExplorerApiRequestType::Token => self - .explorer_client - .token(&self.main_l2_token) - .await - .map(drop), - ExplorerApiRequestType::Transactions => { - let from_block = self.random_existing_block(); - self.explorer_client - .transactions(TransactionsQuery { - from_block_number: from_block, - from_tx_index: None, - block_number: None, - l1_batch_number: None, - address: None, - account_address: None, - contract_address: None, - pagination: Self::random_pagination(), - }) - .await - .map(drop) - } - ExplorerApiRequestType::AccountTransactions => { - let from_block = self.random_existing_block(); - self.explorer_client - .transactions(TransactionsQuery { - from_block_number: from_block, - from_tx_index: None, - block_number: None, - l1_batch_number: None, - address: None, - account_address: Some(self.wallet.wallet.address()), - contract_address: None, - pagination: Self::random_pagination(), - }) - .await - .map(drop) - } - } - } - - fn random_pagination() -> PaginationQuery { - // These parameters should correspond to pagination validation logic on the server - // so that we don't get all API requests failing. - const LIMIT: usize = 50; - const OFFSET_STD_DEV: f32 = 60.0; - const MAX_OFFSET: usize = 200; - - PaginationQuery { - limit: LIMIT, - offset: Self::normally_distributed_offset(OFFSET_STD_DEV, MAX_OFFSET), - direction: PaginationDirection::Newer, - } - } - - fn normally_distributed_offset(std_dev: f32, limit: usize) -> usize { - let normal = Normal::new(0.0, std_dev).unwrap(); - let sampled = normal.sample(&mut rand::thread_rng()); - let offset = sampled.abs() as usize; - cmp::min(offset, limit) - } - - async fn run_single_request(mut self, limiters: &RequestLimiters) -> Result<(), Aborted> { - let permit = limiters.explorer_api_requests.acquire().await.unwrap(); - - let request = ExplorerApiRequest::random(&mut self.wallet.rng).await; - let start = Instant::now(); - let mut empty_success_txs = true; - if request.request_type == ExplorerApiRequestType::Transaction { - empty_success_txs = self.successfully_sent_txs.read().await.is_empty(); - } - - let label = if let (ExplorerApiRequestType::Contract, None) = ( - request.request_type, - self.wallet.deployed_contract_address.get(), - ) { - ReportLabel::skipped("Contract not deployed yet") - } else if let (ExplorerApiRequestType::Transaction, true) = - (request.request_type, empty_success_txs) - { - ReportLabel::skipped("No one txs has been submitted yet") - } else { - let result = self.execute_explorer_api_request(request).await; - match result { - Ok(_) => ReportLabel::ActionDone, - Err(err) => { - vlog::error!("API request failed: {request:?}, reason: {err}"); - ReportLabel::failed(err.to_string()) - } - } - }; - drop(permit); - - let api_action_type = ActionType::from(request.request_type); - let report = ReportBuilder::default() - .action(api_action_type) - .label(label) - .time(start.elapsed()) - .reporter(self.wallet.wallet.address()) - .finish(); - self.send_report(report).await - } - - pub(super) async fn run_explorer_api_requests_task( - mut self, - limiters: &RequestLimiters, - ) -> Result<(), Aborted> { - // Setup current last block - self.explorer_client.network_stats().await.ok(); - - // We use `try_for_each_concurrent` to propagate test abortion, but we cannot - // rely solely on its concurrency limiter because we need to limit concurrency - // for all accounts in total, rather than for each account separately. - let local_limit = (self.config.sync_api_requests_limit / 5).max(1); - let request = stream::repeat_with(move || Ok(self.clone())); - request - .try_for_each_concurrent(local_limit, |this| this.run_single_request(limiters)) - .await - } -} diff --git a/core/tests/loadnext/src/account/mod.rs b/core/tests/loadnext/src/account/mod.rs index 6520a6d8cd1c..e868937f65c5 100644 --- a/core/tests/loadnext/src/account/mod.rs +++ b/core/tests/loadnext/src/account/mod.rs @@ -7,16 +7,13 @@ use std::{ use tokio::sync::RwLock; use zksync::{error::ClientError, operations::SyncTransactionHandle, HttpClient}; -use zksync_types::{ - api::{TransactionReceipt, U64}, - Address, Nonce, H256, U256, -}; +use zksync_types::{api::TransactionReceipt, Address, Nonce, H256, U256, U64}; use zksync_utils::test_utils::LoadnextContractExecutionParams; use zksync_web3_decl::jsonrpsee::core::Error as CoreError; use crate::utils::format_gwei; use crate::{ - account::{explorer_api_executor::ExplorerApiClient, tx_command_executor::SubmitResult}, + account::tx_command_executor::SubmitResult, account_pool::{AddressPool, TestWallet}, command::{ExpectedOutcome, IncorrectnessModifier, TxCommand, TxType}, config::{LoadtestConfig, RequestLimiters}, @@ -25,7 +22,6 @@ use crate::{ }; mod api_request_executor; -mod explorer_api_executor; mod pubsub_executor; mod tx_command_executor; @@ -58,8 +54,6 @@ struct InflightTx { pub struct AccountLifespan { /// Wallet used to perform the test. pub wallet: TestWallet, - /// Client for explorer api - pub explorer_client: ExplorerApiClient, config: LoadtestConfig, contract_execution_params: LoadnextContractExecutionParams, /// Pool of account addresses, used to generate commands. @@ -90,11 +84,8 @@ impl AccountLifespan { main_l2_token: Address, paymaster_address: Address, ) -> Self { - let explorer_client = ExplorerApiClient::new(config.l2_explorer_api_address.clone()); - Self { wallet: test_account, - explorer_client, config: config.clone(), contract_execution_params, addresses, @@ -102,7 +93,6 @@ impl AccountLifespan { main_l1_token: config.main_token, main_l2_token, paymaster_address, - report_sink, inflight_txs: Default::default(), current_nonce: None, @@ -113,7 +103,6 @@ impl AccountLifespan { let duration = self.config.duration(); let tx_execution_task = self.clone().run_tx_execution(); let api_requests_task = self.clone().run_api_requests_task(limiters); - let api_explorer_requests_task = self.clone().run_explorer_api_requests_task(limiters); tokio::select! { result = tx_execution_task => { @@ -122,9 +111,6 @@ impl AccountLifespan { result = api_requests_task => { vlog::trace!("API requests task finished with {result:?}"); }, - result = api_explorer_requests_task => { - vlog::trace!("Explorer API requests task finished with {result:?}"); - }, result = self.run_pubsub_task(limiters) => { vlog::trace!("PubSub task finished with {result:?}"); }, diff --git a/core/tests/loadnext/src/command/explorer_api.rs b/core/tests/loadnext/src/command/explorer_api.rs deleted file mode 100644 index ed1c6fdf53aa..000000000000 --- a/core/tests/loadnext/src/command/explorer_api.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::all::AllWeighted; -use crate::config::ExplorerApiRequestWeights; -use crate::rng::{LoadtestRng, WeightedRandom}; -use once_cell::sync::OnceCell; - -static WEIGHTS: OnceCell<[(ExplorerApiRequestType, f32); 9]> = OnceCell::new(); - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum ExplorerApiRequestType { - NetworkStats, - Blocks, - Block, - Transaction, - Transactions, - AccountTransactions, - Account, - Contract, - Token, -} - -impl ExplorerApiRequestType { - pub fn initialize_weights(weights: &ExplorerApiRequestWeights) { - WEIGHTS - .set([ - (ExplorerApiRequestType::NetworkStats, weights.network_stats), - (ExplorerApiRequestType::Blocks, weights.blocks), - (ExplorerApiRequestType::Block, weights.block), - (ExplorerApiRequestType::Transaction, weights.transaction), - (ExplorerApiRequestType::Transactions, weights.transactions), - ( - ExplorerApiRequestType::AccountTransactions, - weights.account_transactions, - ), - (ExplorerApiRequestType::Account, weights.account), - (ExplorerApiRequestType::Contract, weights.contract), - (ExplorerApiRequestType::Token, weights.token), - ]) - .unwrap(); - } -} -impl AllWeighted for ExplorerApiRequestType { - fn all_weighted() -> &'static [(Self, f32)] { - WEIGHTS.get().expect("Weights are not initialized") - } -} - -#[derive(Debug, Copy, Clone)] -pub struct ExplorerApiRequest { - /// Type of the request to be performed. - pub request_type: ExplorerApiRequestType, -} - -impl ExplorerApiRequest { - pub async fn random(rng: &mut LoadtestRng) -> Self { - let request_type = ExplorerApiRequestType::random(rng); - Self { request_type } - } -} diff --git a/core/tests/loadnext/src/command/mod.rs b/core/tests/loadnext/src/command/mod.rs index 1b46acd00e35..cf0790dd647e 100644 --- a/core/tests/loadnext/src/command/mod.rs +++ b/core/tests/loadnext/src/command/mod.rs @@ -1,11 +1,9 @@ pub use self::{ api::{ApiRequest, ApiRequestType}, - explorer_api::{ExplorerApiRequest, ExplorerApiRequestType}, pubsub::SubscriptionType, tx_command::{ExpectedOutcome, IncorrectnessModifier, TxCommand, TxType}, }; mod api; -mod explorer_api; mod pubsub; mod tx_command; diff --git a/core/tests/loadnext/src/config.rs b/core/tests/loadnext/src/config.rs index f2ce9a465719..22990d7bc2e3 100644 --- a/core/tests/loadnext/src/config.rs +++ b/core/tests/loadnext/src/config.rs @@ -113,10 +113,6 @@ pub struct LoadtestConfig { #[serde(default = "default_l2_ws_rpc_address")] pub l2_ws_rpc_address: String, - /// Explorer api address of L2 node. - #[serde(default = "default_l2_explorer_api_address")] - pub l2_explorer_api_address: String, - /// The maximum number of transactions per account that can be sent without waiting for confirmation. /// Should not exceed the corresponding value in the L2 node configuration. #[serde(default = "default_max_inflight_txs")] @@ -157,12 +153,6 @@ fn default_l1_rpc_address() -> String { result } -fn default_l2_explorer_api_address() -> String { - let result = "http://127.0.0.1:3070".to_string(); - vlog::info!("Using default L2_EXPLORER_API_ADDRESS: {result}"); - result -} - fn default_master_wallet_pk() -> String { // Use this key only for localhost because it is compromised! // Using this key for rinkeby will result in losing rinkeby ETH. @@ -293,7 +283,6 @@ impl LoadtestConfig { pub struct ExecutionConfig { pub transaction_weights: TransactionWeights, pub contract_execution_params: LoadnextContractExecutionParams, - pub explorer_api_config_weights: ExplorerApiRequestWeights, } impl ExecutionConfig { @@ -302,53 +291,13 @@ impl ExecutionConfig { TransactionWeights::from_env().unwrap_or_else(default_transaction_weights); let contract_execution_params = LoadnextContractExecutionParams::from_env() .unwrap_or_else(default_contract_execution_params); - let explorer_api_config_weights = ExplorerApiRequestWeights::from_env() - .unwrap_or_else(default_explorer_api_request_weights); Self { transaction_weights, contract_execution_params, - explorer_api_config_weights, - } - } -} - -#[derive(Debug, Clone, Deserialize)] -pub struct ExplorerApiRequestWeights { - pub network_stats: f32, - pub blocks: f32, - pub block: f32, - pub account_transactions: f32, - pub transaction: f32, - pub transactions: f32, - pub account: f32, - pub contract: f32, - pub token: f32, -} - -impl Default for ExplorerApiRequestWeights { - fn default() -> Self { - Self { - network_stats: 1.0, - blocks: 1.0, - block: 1.0, - transactions: 1.0, - account: 1.0, - token: 1.0, - contract: 1.0, - transaction: 1.0, - account_transactions: 1.0, } } } -impl ExplorerApiRequestWeights { - pub fn from_env() -> Option { - envy::prefixed("EXPLORER_API_REQUESTS_WEIGHTS_") - .from_env() - .ok() - } -} - #[derive(Debug, Clone, Deserialize)] pub struct TransactionWeights { pub deposit: f32, @@ -386,16 +335,9 @@ fn default_contract_execution_params() -> LoadnextContractExecutionParams { result } -fn default_explorer_api_request_weights() -> ExplorerApiRequestWeights { - let result = ExplorerApiRequestWeights::default(); - vlog::info!("Using default ExplorerApiRequestWeights: {result:?}"); - result -} - #[derive(Debug)] pub struct RequestLimiters { pub api_requests: Semaphore, - pub explorer_api_requests: Semaphore, pub subscriptions: Semaphore, } @@ -403,7 +345,6 @@ impl RequestLimiters { pub fn new(config: &LoadtestConfig) -> Self { Self { api_requests: Semaphore::new(config.sync_api_requests_limit), - explorer_api_requests: Semaphore::new(config.sync_api_requests_limit), subscriptions: Semaphore::new(config.sync_pubsub_subscriptions_limit), } } diff --git a/core/tests/loadnext/src/executor.rs b/core/tests/loadnext/src/executor.rs index d93751ca6f04..22208853fd55 100644 --- a/core/tests/loadnext/src/executor.rs +++ b/core/tests/loadnext/src/executor.rs @@ -12,9 +12,10 @@ use zksync::{EthNamespaceClient, EthereumProvider, ZksNamespaceClient}; use zksync_config::constants::MAX_L1_TRANSACTION_GAS_LIMIT; use zksync_eth_client::{BoundEthInterface, EthInterface}; use zksync_eth_signer::PrivateKeySigner; -use zksync_types::api::{BlockNumber, U64}; -use zksync_types::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{tokens::ETHEREUM_ADDRESS, Address, Nonce, U256}; +use zksync_types::{ + api::BlockNumber, tokens::ETHEREUM_ADDRESS, Address, Nonce, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, U64, +}; use crate::report::ReportBuilder; use crate::utils::format_eth; diff --git a/core/tests/loadnext/src/main.rs b/core/tests/loadnext/src/main.rs index e814513b40d8..a920c5b19b29 100644 --- a/core/tests/loadnext/src/main.rs +++ b/core/tests/loadnext/src/main.rs @@ -5,7 +5,7 @@ //! values to check the local zkSync deployment. use loadnext::{ - command::{ExplorerApiRequestType, TxType}, + command::TxType, config::{ExecutionConfig, LoadtestConfig}, executor::Executor, report_collector::LoadtestResult, @@ -25,16 +25,11 @@ async fn main() -> anyhow::Result<()> { let prometheus_config: Option = envy::prefixed("PROMETHEUS_").from_env().ok(); TxType::initialize_weights(&execution_config.transaction_weights); - ExplorerApiRequestType::initialize_weights(&execution_config.explorer_api_config_weights); vlog::info!( "Run with tx weights: {:?}", execution_config.transaction_weights ); - vlog::info!( - "Run explorer api weights: {:?}", - execution_config.explorer_api_config_weights - ); let mut executor = Executor::new(config, execution_config).await?; if let Some(prometheus_config) = prometheus_config { diff --git a/core/tests/loadnext/src/report.rs b/core/tests/loadnext/src/report.rs index 1af6db3b299b..0ea86a49de04 100644 --- a/core/tests/loadnext/src/report.rs +++ b/core/tests/loadnext/src/report.rs @@ -5,9 +5,7 @@ use zksync_types::Address; use crate::account::ExecutionType; use crate::{ all::All, - command::{ - ApiRequest, ApiRequestType, ExplorerApiRequestType, SubscriptionType, TxCommand, TxType, - }, + command::{ApiRequest, ApiRequestType, SubscriptionType, TxCommand, TxType}, }; /// Report for any operation done by loadtest. @@ -189,16 +187,9 @@ pub enum ActionType { InitComplete, Tx(TxActionType), Api(ApiActionType), - ExplorerApi(ExplorerApiRequestType), Subscription(SubscriptionType), } -impl From for ActionType { - fn from(action: ExplorerApiRequestType) -> Self { - Self::ExplorerApi(action) - } -} - impl From for ActionType { fn from(action: TxActionType) -> Self { Self::Tx(action) diff --git a/core/tests/loadnext/src/report_collector/operation_results_collector.rs b/core/tests/loadnext/src/report_collector/operation_results_collector.rs index 17c52f5b0e80..e794233a1de7 100644 --- a/core/tests/loadnext/src/report_collector/operation_results_collector.rs +++ b/core/tests/loadnext/src/report_collector/operation_results_collector.rs @@ -10,7 +10,6 @@ pub struct OperationResultsCollector { pub(super) tx_results: ResultCollector, api_requests_results: ResultCollector, subscriptions_results: ResultCollector, - explorer_api_requests_results: ResultCollector, loadtest_duration: Duration, } @@ -69,7 +68,6 @@ impl OperationResultsCollector { ActionType::Tx(_) => self.tx_results.add_status(status), ActionType::Api(_) => self.api_requests_results.add_status(status), ActionType::Subscription(_) => self.subscriptions_results.add_status(status), - ActionType::ExplorerApi(_) => self.explorer_api_requests_results.add_status(status), ActionType::InitComplete => {} } } @@ -93,6 +91,5 @@ impl OperationResultsCollector { vlog::info!("Transaction execution stats: {}", self.tx_results); vlog::info!("API requests stats: {}", self.api_requests_results); vlog::info!("Subscriptions stats: {}", self.subscriptions_results); - vlog::info!("Explorer api stats: {}", self.explorer_api_requests_results); } } diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 2700e6440192..a936f1d0de5f 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -69,6 +69,8 @@ describe('Block reverting test', function () { // Set 1000 seconds deadline for `ExecuteBlocks` operation. process.env.CHAIN_STATE_KEEPER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1000'; + // Set lightweight mode for the Merkle tree. + process.env.DATABASE_MERKLE_TREE_MODE = 'lightweight'; // Run server in background. const components = 'api,tree,eth,data_fetcher,state_keeper'; @@ -167,7 +169,7 @@ describe('Block reverting test', function () { process.env.CHAIN_STATE_KEEPER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1'; // Run server. - utils.background(`zk server --components api,tree_lightweight,eth,data_fetcher,state_keeper`); + utils.background('zk server --components api,tree,eth,data_fetcher,state_keeper'); await utils.sleep(10); const balanceBefore = await alice.getBalance(); diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 2864c3a5fddc..a4087b24eb32 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -76,6 +76,24 @@ impl ZkSyncAccount { _increment_nonce: bool, ) -> L2Tx { todo!("New withdrawal support is not yet implemented") + + // let mut stored_nonce = self.nonce.lock().unwrap(); + // let withdraw = GenericL2Tx::::new_signed( + // token, + // amount, + // to, + // nonce.unwrap_or(*stored_nonce), + // fee, + // L2ChainId(270), + // &self.private_key, + // ) + // .expect("should create a signed transfer transaction"); + + // if increment_nonce { + // **stored_nonce += 1; + // } + + // withdraw.into() } pub fn sign_deploy_contract( diff --git a/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol b/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol index 6a09e7f09134..1ad6b64ce44c 100644 --- a/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol +++ b/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol @@ -75,6 +75,8 @@ library RLPEncoder { /// @notice Uses little endian ordering (The least significant byte has index `0`). /// NOTE: returns `0` for `0` function _highestByteSet(uint256 _number) private pure returns (uint256 hbs) { + // + // if (_number >= 2**128) { _number >>= 128; hbs += 16; diff --git a/core/tests/ts-integration/contracts/vyper/CreateForwarder.vy b/core/tests/ts-integration/contracts/vyper/CreateForwarder.vy deleted file mode 100644 index dc9114475b5b..000000000000 --- a/core/tests/ts-integration/contracts/vyper/CreateForwarder.vy +++ /dev/null @@ -1,12 +0,0 @@ -# @version ^0.3 -# vim: ft=python - -interface DeployMe: - def setup(name: String[101]): nonpayable - -@external -def deploy(_masterCopy: address, _name: String[101]) -> address: - addr: address = create_forwarder_to(_masterCopy) - # DeployMe.__init__ was not called, else this would fail - DeployMe(addr).setup(_name) - return addr diff --git a/core/tests/ts-integration/contracts/vyper/DeployMe.vy b/core/tests/ts-integration/contracts/vyper/DeployMe.vy deleted file mode 100644 index 3377f2f8d4fd..000000000000 --- a/core/tests/ts-integration/contracts/vyper/DeployMe.vy +++ /dev/null @@ -1,18 +0,0 @@ -# @version ^0.3 -# vim: ft=python - -owner: public(address) -name: public(String[101]) - -# __init__ is not called when deployed from create_forwarder_to -@external -def __init__(): - self.owner = msg.sender - self.name = "Foo" - -# call once after create_forwarder_to -@external -def setup(_name: String[101]): - assert self.owner == ZERO_ADDRESS, "owner != zero address" - self.owner = msg.sender - self.name = _name diff --git a/core/tests/ts-integration/contracts/vyper/Greeter.vy b/core/tests/ts-integration/contracts/vyper/Greeter.vy new file mode 100644 index 000000000000..30db1c3ae1b3 --- /dev/null +++ b/core/tests/ts-integration/contracts/vyper/Greeter.vy @@ -0,0 +1,23 @@ +# @version ^0.3.3 +# vim: ft=python + +owner: public(address) +greeting: public(String[100]) + +# __init__ is not invoked when deployed from create_forwarder_to +@external +def __init__(greeting: String[64]): + self.owner = msg.sender + self.greeting = greeting + +# Invoke once after create_forwarder_to +@external +def setup(_greeting: String[100]): + assert self.owner == ZERO_ADDRESS, "owner != zero address" + self.owner = msg.sender + self.greeting = _greeting + +@external +@view +def greet() -> String[100]: + return self.greeting diff --git a/core/tests/ts-integration/contracts/vyper/Greeter2.vy b/core/tests/ts-integration/contracts/vyper/Greeter2.vy new file mode 100644 index 000000000000..7a154cf39a61 --- /dev/null +++ b/core/tests/ts-integration/contracts/vyper/Greeter2.vy @@ -0,0 +1,15 @@ +# @version ^0.3.3 +# vim: ft=python + +import Greeter as Greeter + +greeter_contract: Greeter + +@external +def __init__(greeter_address: address): + self.greeter_contract = Greeter(greeter_address) + +@external +@view +def test() -> String[100]: + return self.greeter_contract.greet() diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 03bb67881f25..b039cb4978b0 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -7,7 +7,8 @@ "test": "zk f jest --forceExit --testTimeout 60000", "long-running-test": "zk f jest", "fee-test": "RUN_FEE_TEST=1 zk f jest -- fees.test.ts", - "api-test": "zk f jest -- api/web3.test.ts" + "api-test": "zk f jest -- api/web3.test.ts", + "contract-verification-test": "zk f jest -- api/contract-verification.test.ts" }, "devDependencies": { "@matterlabs/hardhat-zksync-deploy": "^0.6.1", diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index 30d929666894..1f376e815d79 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -2,7 +2,6 @@ import * as path from 'path'; import * as fs from 'fs'; import * as ethers from 'ethers'; import * as zksync from 'zksync-web3'; -import { getTokens } from 'reading-tool'; import { TestEnvironment } from './types'; import { Reporter } from './reporter'; @@ -68,9 +67,9 @@ export async function loadTestEnvironment(): Promise { process.env.ZKSYNC_WEB3_WS_API_URL || process.env.API_WEB3_JSON_RPC_WS_URL, 'WS L2 node URL' ); - const explorerUrl = process.env.ZKSYNC_ENV!.startsWith('ext-node') - ? '' - : ensureVariable(process.env.API_EXPLORER_URL, 'Explorer API'); + const contractVerificationUrl = process.env.ZKSYNC_ENV!.startsWith('ext-node') + ? process.env.API_CONTRACT_VERIFICATION_URL! + : ensureVariable(process.env.API_CONTRACT_VERIFICATION_URL, 'Contract verification API'); const tokens = getTokens(process.env.CHAIN_ETH_NETWORK || 'localhost'); // wBTC is chosen because it has decimals different from ETH (8 instead of 18). @@ -101,7 +100,7 @@ export async function loadTestEnvironment(): Promise { l2NodeUrl, l1NodeUrl, wsL2NodeUrl, - explorerUrl, + contractVerificationUrl, erc20Token: { name: token.name, symbol: token.symbol, @@ -128,3 +127,19 @@ function ensureVariable(value: string | undefined, variableName: string): string } return value; } + +type L1Token = { + name: string; + symbol: string; + decimals: number; + address: string; +}; + +function getTokens(network: string): L1Token[] { + const configPath = `${process.env.ZKSYNC_HOME}/etc/tokens/${network}.json`; + return JSON.parse( + fs.readFileSync(configPath, { + encoding: 'utf-8' + }) + ); +} diff --git a/core/tests/ts-integration/src/system.ts b/core/tests/ts-integration/src/system.ts index aaed9620bc76..631a752c9342 100644 --- a/core/tests/ts-integration/src/system.ts +++ b/core/tests/ts-integration/src/system.ts @@ -15,12 +15,12 @@ export interface ForceDeployment { bytecodeHash: BytesLike; // The address on which to deploy the bytecodehash to newAddress: string; + // Whether to call the constructor + callConstructor: boolean; // The value with which to initialize a contract value: BigNumber; // The constructor calldata input: BytesLike; - // Whether to call the constructor - callConstructor: boolean; } // A minimized copy of the `diamondCut` function used in L1 contracts diff --git a/core/tests/ts-integration/src/types.ts b/core/tests/ts-integration/src/types.ts index 82c816001be7..20a7175cdd70 100644 --- a/core/tests/ts-integration/src/types.ts +++ b/core/tests/ts-integration/src/types.ts @@ -38,9 +38,9 @@ export interface TestEnvironment { */ wsL2NodeUrl: string; /** - * URL of zkSync node's Explorer API. + * URL of zkSync node's contract verification API. */ - explorerUrl: string; + contractVerificationUrl: string; /** * Description of the "main" ERC20 token used in the tests. */ diff --git a/core/tests/ts-integration/tests/api/contract-verification.test.ts b/core/tests/ts-integration/tests/api/contract-verification.test.ts new file mode 100644 index 000000000000..55dee90a21bf --- /dev/null +++ b/core/tests/ts-integration/tests/api/contract-verification.test.ts @@ -0,0 +1,274 @@ +import { TestMaster } from '../../src/index'; +import * as zksync from 'zksync-web3'; +import * as ethers from 'ethers'; +import fetch from 'node-fetch'; +import fs from 'fs'; +import { deployContract, getContractSource, getTestContract } from '../../src/helpers'; +import { sleep } from 'zksync-web3/build/src/utils'; + +const contracts = { + counter: getTestContract('Counter'), + customAccount: getTestContract('CustomAccount'), + create: { + ...getTestContract('Import'), + factoryDep: getTestContract('Foo').bytecode + }, + greeter2: { + ...getTestContract('Greeter2'), + factoryDep: getTestContract('Greeter').bytecode + } +}; + +// Regular expression to match ISO dates. +const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; + +const ZKSOLC_VERSION = 'v1.3.13'; +const SOLC_VERSION = '0.8.20'; + +const ZKVYPER_VERSION = 'v1.3.9'; +const VYPER_VERSION = '0.3.3'; + +type HttpMethod = 'POST' | 'GET'; + +describe('Tests for the contract verification API', () => { + let testMaster: TestMaster; + let alice: zksync.Wallet; + + if (process.env.RUN_CONTRACT_VERIFICATION_TEST != 'true') { + test('Contract verification test is not requested to run', () => { + return; + }); + } else { + beforeAll(() => { + testMaster = TestMaster.getInstance(__filename); + alice = testMaster.mainAccount(); + + if (process.env.ZKSYNC_ENV!.startsWith('ext-node')) { + console.warn("You are trying to run contract verification tests on external node. It's not supported."); + } + }); + + test('should test contract verification', async () => { + const counterContract = await deployContract(alice, contracts.counter, []); + const constructorArguments = counterContract.interface.encodeDeploy([]); + + const requestBody = { + contractAddress: counterContract.address, + contractName: 'contracts/counter/counter.sol:Counter', + sourceCode: getContractSource('counter/counter.sol'), + compilerZksolcVersion: ZKSOLC_VERSION, + compilerSolcVersion: SOLC_VERSION, + optimizationUsed: true, + constructorArguments, + isSystem: true + }; + let requestId = await query('POST', '/contract_verification', undefined, requestBody); + + await expectVerifyRequestToSucceed(requestId, requestBody); + }); + + test('should test multi-files contract verification', async () => { + const contractFactory = new zksync.ContractFactory(contracts.create.abi, contracts.create.bytecode, alice); + const contractHandle = await contractFactory.deploy({ + customData: { + factoryDeps: [contracts.create.factoryDep] + } + }); + const importContract = await contractHandle.deployed(); + const standardJsonInput = { + language: 'Solidity', + sources: { + 'contracts/create/create.sol': { + content: getContractSource('create/create.sol') + }, + 'contracts/create/Foo.sol': { + content: getContractSource('create/Foo.sol') + } + }, + settings: { + optimizer: { enabled: true }, + isSystem: true + } + }; + + const constructorArguments = importContract.interface.encodeDeploy([]); + + const requestBody = { + contractAddress: importContract.address, + contractName: 'contracts/create/create.sol:Import', + sourceCode: standardJsonInput, + codeFormat: 'solidity-standard-json-input', + compilerZksolcVersion: ZKSOLC_VERSION, + compilerSolcVersion: SOLC_VERSION, + optimizationUsed: true, + constructorArguments + }; + let requestId = await query('POST', '/contract_verification', undefined, requestBody); + + await expectVerifyRequestToSucceed(requestId, requestBody); + }); + + test('should test yul contract verification', async () => { + const contractPath = `${process.env.ZKSYNC_HOME}/core/tests/ts-integration/contracts/yul/Empty.yul`; + const sourceCode = fs.readFileSync(contractPath, 'utf8'); + + const bytecodePath = `${process.env.ZKSYNC_HOME}/core/tests/ts-integration/contracts/yul/artifacts/Empty.yul/Empty.yul.zbin`; + const bytecode = fs.readFileSync(bytecodePath); + + const contractFactory = new zksync.ContractFactory([], bytecode, alice); + const deployTx = await contractFactory.deploy(); + const contractAddress = (await deployTx.deployed()).address; + + const requestBody = { + contractAddress, + contractName: 'Empty', + sourceCode, + codeFormat: 'yul-single-file', + compilerZksolcVersion: ZKSOLC_VERSION, + compilerSolcVersion: SOLC_VERSION, + optimizationUsed: true, + constructorArguments: '0x', + isSystem: true + }; + let requestId = await query('POST', '/contract_verification', undefined, requestBody); + + await expectVerifyRequestToSucceed(requestId, requestBody); + }); + + test('should test vyper contract verification', async () => { + const contractFactory = new zksync.ContractFactory( + contracts.greeter2.abi, + contracts.greeter2.bytecode, + alice + ); + const randomAddress = ethers.utils.hexlify(ethers.utils.randomBytes(20)); + const contractHandle = await contractFactory.deploy(randomAddress, { + customData: { + factoryDeps: [contracts.greeter2.factoryDep] + } + }); + const contract = await contractHandle.deployed(); + const constructorArguments = contract.interface.encodeDeploy([randomAddress]); + + const requestBody = { + contractAddress: contract.address, + contractName: 'Greeter2', + sourceCode: { + Greeter: getContractSource('vyper/Greeter.vy'), + Greeter2: getContractSource('vyper/Greeter2.vy') + }, + codeFormat: 'vyper-multi-file', + compilerZkvyperVersion: ZKVYPER_VERSION, + compilerVyperVersion: VYPER_VERSION, + optimizationUsed: true, + constructorArguments + }; + let requestId = await query('POST', '/contract_verification', undefined, requestBody); + + await expectVerifyRequestToSucceed(requestId, requestBody); + }); + + test('Should return zksolc versions', async () => { + const versions = await query('GET', `/contract_verification/zksolc_versions`); + expect(versions.includes(ZKSOLC_VERSION)); + }); + + test('Should return solc versions', async () => { + const versions = await query('GET', `/contract_verification/solc_versions`); + expect(versions.includes(SOLC_VERSION)); + }); + + test('Should return zkvyper versions', async () => { + const versions = await query('GET', `/contract_verification/zkvyper_versions`); + expect(versions.includes(ZKVYPER_VERSION)); + }); + + test('Should return vyper versions', async () => { + const versions: string[] = await query('GET', `/contract_verification/vyper_versions`); + expect(versions.includes(VYPER_VERSION)); + }); + + afterAll(async () => { + await testMaster.deinitialize(); + }); + } + + /** + * Performs an API call to the Contract verification API. + * + * @param endpoint API endpoint to call. + * @param queryParams Parameters for a query string. + * @param requestBody Request body. If provided, a POST request would be met and body would be encoded to JSON. + * @returns API response parsed as a JSON. + */ + async function query( + method: HttpMethod, + endpoint: string, + queryParams?: { [key: string]: string }, + requestBody?: any + ): Promise { + const url = new URL(endpoint, testMaster.environment().contractVerificationUrl); + // Iterate through query params and add them to URL. + if (queryParams) { + Object.entries(queryParams).forEach(([key, value]) => url.searchParams.set(key, value)); + } + + let init = { + method, + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify(requestBody) + }; + if (requestBody) { + init.body = JSON.stringify(requestBody); + } + + let response = await fetch(url, init); + try { + return await response.json(); + } catch (e) { + throw { + error: 'Could not decode JSON in response', + status: `${response.status} ${response.statusText}` + }; + } + } + + async function expectVerifyRequestToSucceed(requestId: number, requestBody: any) { + let retries = 0; + while (true) { + if (retries > 50) { + throw new Error('Too many retries'); + } + + let statusObject = await query('GET', `/contract_verification/${requestId}`); + + if (statusObject.status == 'successful') { + break; + } else if (statusObject.status == 'failed') { + throw new Error(statusObject.error); + } else { + retries += 1; + await sleep(1000); + } + } + + const contract_info = await query('GET', `/contract_verification/info/${requestBody.contractAddress}`); + + expect(contract_info).toMatchObject({ + request: { + id: requestId, + contractAddress: requestBody.contractAddress.toLowerCase(), + codeFormat: expect.any(String), + sourceCode: expect.anything(), + contractName: requestBody.contractName, + optimizationUsed: requestBody.optimizationUsed, + constructorArguments: requestBody.constructorArguments, + isSystem: expect.any(Boolean) + }, + artifacts: expect.any(Object), + verifiedAt: expect.stringMatching(DATE_REGEX) + }); + } +}); diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts index e72f9411334c..e8e860c4ec19 100644 --- a/core/tests/ts-integration/tests/api/debug.test.ts +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -23,10 +23,6 @@ describe('Debug methods', () => { tokenDetails = testMaster.environment().erc20Token; aliceErc20 = new zksync.Contract(tokenDetails.l2Address, zksync.utils.IERC20, alice); - - if (process.env.ZKSYNC_ENV!.startsWith('ext-node')) { - console.warn("You are trying to run debug namespace tests on external node. It's not supported."); - } }); test('Debug sending erc20 token in a block', async () => { diff --git a/core/tests/ts-integration/tests/api/explorer.test.ts b/core/tests/ts-integration/tests/api/explorer.test.ts deleted file mode 100644 index fb30b93e8e5a..000000000000 --- a/core/tests/ts-integration/tests/api/explorer.test.ts +++ /dev/null @@ -1,878 +0,0 @@ -import { TestMaster } from '../../src/index'; -import * as zksync from 'zksync-web3'; -import * as ethers from 'ethers'; -import fetch from 'node-fetch'; -import fs from 'fs'; -import { - anyTransaction, - deployContract, - getContractSource, - getTestContract, - waitForNewL1Batch -} from '../../src/helpers'; -import { sleep } from 'zksync-web3/build/src/utils'; -import { IERC20MetadataFactory } from 'zksync-web3/build/typechain'; -import { extractFee } from '../../src/modifiers/balance-checker'; -import { Token } from '../../src/types'; - -const contracts = { - counter: getTestContract('Counter'), - customAccount: getTestContract('CustomAccount'), - create: { - ...getTestContract('Import'), - factoryDep: getTestContract('Foo').bytecode - }, - createForwarder: { - ...getTestContract('CreateForwarder'), - factoryDep: getTestContract('DeployMe').bytecode - } -}; - -// Regular expression to match 32-byte hashes. -const HASH_REGEX = /^0x[\da-fA-F]{64}$/; -// Regular expression to match 20-byte addresses in lowercase. -const ADDRESS_REGEX = /^0x[\da-f]{40}$/; -// Regular expression to match variable-length hex number. -const HEX_VALUE_REGEX = /^0x[\da-fA-F]*$/; -// Regular expression to match ISO dates. -const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; - -const ZKSOLC_VERSION = 'v1.3.13'; -const SOLC_VERSION = '0.8.20'; - -const ZKVYPER_VERSION = 'v1.3.9'; -const VYPER_VERSION = '0.3.3'; - -describe('Tests for the Explorer API', () => { - let testMaster: TestMaster; - let alice: zksync.Wallet; - let erc20: Token; - - beforeAll(() => { - testMaster = TestMaster.getInstance(__filename); - alice = testMaster.mainAccount(); - erc20 = testMaster.environment().erc20Token; - - if (process.env.ZKSYNC_ENV!.startsWith('ext-node')) { - console.warn("You are trying to run explorer tests on external node. It's not supported."); - } - }); - - test('Should test /network_stats endpoint', async () => { - const initialStats = await query('/network_stats'); - expect(initialStats).toEqual({ - last_sealed: expect.any(Number), - last_verified: expect.any(Number), - total_transactions: expect.any(Number) - }); - }); - - test('Should test /blocks endpoint', async () => { - // To ensure that the newest block is not verified yet, we're sending a transaction. - await anyTransaction(alice); - - const blocksResponse = await query('/blocks', { direction: 'older', limit: '1' }); - expect(blocksResponse).toHaveLength(1); - const apiBlock = blocksResponse[0]; - expect(apiBlock).toEqual({ - number: expect.any(Number), - l1TxCount: expect.any(Number), - l2TxCount: expect.any(Number), - hash: expect.stringMatching(/^0x[\da-fA-F]{64}$/), - status: expect.stringMatching(/sealed|verified/), - timestamp: expect.any(Number) - }); - - // Sanity checks for the values we can't control. - expect(apiBlock.l1TxCount).toBeGreaterThanOrEqual(0); - expect(apiBlock.l2TxCount).toBeGreaterThanOrEqual(0); - expectTimestampToBeSane(apiBlock.timestamp); - - // Retrieve block details through web3 API and cross-check the root hash. - const blockHash = await alice.provider.getBlock(apiBlock.number).then((block) => block.hash); - expect(apiBlock.hash).toEqual(blockHash); - - // Now try to find the same block using the "newer" query. - const newBlocksResponse = await query('/blocks', { - from: (apiBlock.number - 1).toString(), - direction: 'newer', - limit: '1' - }); - expect(newBlocksResponse).toHaveLength(1); - const apiBlockCopy = newBlocksResponse[0]; - // Response should be the same. - expect(apiBlockCopy).toEqual(apiBlock); - - // Finally, in the long mode also check, that once block becomes finalized, status also changes - // in the explorer API. - if (!testMaster.isFastMode()) { - await waitFor(async () => { - const verifiedApiBlock = ( - await query('/blocks', { from: (apiBlock.number - 1).toString(), direction: 'newer', limit: '1' }) - )[0]; - return verifiedApiBlock.status == 'verified'; - }, 'Block was not verified'); - } - }); - - test('Should test /l1_batches endpoint', async () => { - if (testMaster.isFastMode()) { - // This test requires a new L1 batch to be created, which may be very time consuming on stage. - return; - } - - // To ensure that the newest batch is not verified yet, we're sealing a new batch. - await waitForNewL1Batch(alice); - - const l1BatchesResponse = await query('/l1_batches', { direction: 'older', limit: '1' }); - expect(l1BatchesResponse).toHaveLength(1); - const apiL1Batch = l1BatchesResponse[0]; - expect(apiL1Batch).toMatchObject({ - number: expect.any(Number), - l1TxCount: expect.any(Number), - l2TxCount: expect.any(Number), - status: expect.stringMatching(/sealed|verified/), - timestamp: expect.any(Number) - }); - - // Sanity checks for the values we can't control. - expect(apiL1Batch.l1TxCount).toBeGreaterThanOrEqual(0); - expect(apiL1Batch.l2TxCount).toBeGreaterThanOrEqual(0); - expectTimestampToBeSane(apiL1Batch.timestamp); - - // Now try to find the same batch using the "newer" query. - const newL1BatchesResponse = await query('/l1_batches', { - from: (apiL1Batch.number - 1).toString(), - direction: 'newer', - limit: '1' - }); - expect(newL1BatchesResponse).toHaveLength(1); - const apiL1BatchCopy = newL1BatchesResponse[0]; - // Response should be the same. - expect(apiL1BatchCopy).toEqual(apiL1Batch); - - // Finally, in the long mode also check, that once l1 batch becomes finalized, status also changes - // in the explorer API. - if (!testMaster.isFastMode()) { - await waitFor(async () => { - const verifiedApiL1Batch = ( - await query('/l1_batches', { - from: (apiL1Batch.number - 1).toString(), - direction: 'newer', - limit: '1' - }) - )[0]; - return verifiedApiL1Batch.status == 'verified'; - }, 'L1 batch was not verified'); - } - }); - - test('Should test /block endpoint', async () => { - // Send the transaction to query block data about. - const tx = await anyTransaction(alice); - - const apiBlock = await query(`/block/${tx.blockNumber}`); - expect(apiBlock).toMatchObject({ - number: expect.any(Number), - l1BatchNumber: expect.any(Number), - l1TxCount: expect.any(Number), - l2TxCount: expect.any(Number), - rootHash: expect.stringMatching(HASH_REGEX), - status: expect.stringMatching(/sealed|verified/), - timestamp: expect.any(Number), - baseSystemContractsHashes: { - bootloader: expect.stringMatching(HASH_REGEX), - default_aa: expect.stringMatching(HASH_REGEX) - }, - l1GasPrice: expect.any(Number), - l2FairGasPrice: expect.any(Number), - operatorAddress: expect.stringMatching(/^0x[\da-f]{40}$/) - }); - expect(apiBlock.number).toEqual(tx.blockNumber); - expect(apiBlock.rootHash).toEqual(tx.blockHash); - expect(apiBlock.l1TxCount).toBeGreaterThanOrEqual(0); - expect(apiBlock.l2TxCount).toBeGreaterThanOrEqual(1); // We know that at least 1 tx is included there. - expectTimestampToBeSane(apiBlock.timestamp); - - // Perform L1-related checks in the long mode only. - if (!testMaster.isFastMode()) { - // Check that L1 transaction count can also be non-zero. - const l1Tx = await alice.deposit({ token: zksync.utils.ETH_ADDRESS, amount: 1 }).then((tx) => tx.wait()); - const apiBlockWithL1Tx = await query(`/block/${l1Tx.blockNumber}`); - expect(apiBlockWithL1Tx.l1TxCount).toBeGreaterThanOrEqual(1); - - // Wait until the block is verified and check that the required fields are set. - let verifiedBlock = null; - await waitFor(async () => { - verifiedBlock = await query(`/block/${tx.blockNumber}`); - return verifiedBlock.status == 'verified'; - }, 'Block was not verified'); - expect(verifiedBlock).toEqual({ - number: expect.any(Number), - l1BatchNumber: expect.any(Number), - l1TxCount: expect.any(Number), - l2TxCount: expect.any(Number), - rootHash: expect.stringMatching(/^0x[\da-fA-F]{64}$/), - status: 'verified', - timestamp: expect.any(Number), - commitTxHash: expect.stringMatching(HASH_REGEX), - committedAt: expect.stringMatching(DATE_REGEX), - proveTxHash: expect.stringMatching(HASH_REGEX), - provenAt: expect.stringMatching(DATE_REGEX), - executeTxHash: expect.stringMatching(HASH_REGEX), - executedAt: expect.stringMatching(DATE_REGEX), - baseSystemContractsHashes: { - bootloader: expect.stringMatching(HASH_REGEX), - default_aa: expect.stringMatching(HASH_REGEX) - }, - l1GasPrice: expect.any(Number), - l2FairGasPrice: expect.any(Number), - operatorAddress: expect.stringMatching(/^0x[\da-f]{40}$/) - }); - } - }); - - test('Should test /l1_batch endpoint', async () => { - if (testMaster.isFastMode()) { - // This test requires a new L1 batch to be created, which may be very time consuming on stage. - return; - } - - // Send the transaction to query l1 batch data about. - const tx = await waitForNewL1Batch(alice); - - const apiL1Batch = await query(`/l1_batch/${tx.l1BatchNumber}`); - expect(apiL1Batch).toMatchObject({ - number: expect.any(Number), - l1TxCount: expect.any(Number), - l2TxCount: expect.any(Number), - status: expect.stringMatching(/sealed|verified/), - timestamp: expect.any(Number), - baseSystemContractsHashes: { - bootloader: expect.stringMatching(HASH_REGEX), - default_aa: expect.stringMatching(HASH_REGEX) - }, - l1GasPrice: expect.any(Number), - l2FairGasPrice: expect.any(Number) - }); - expect(apiL1Batch.number).toEqual(tx.l1BatchNumber); - expect(apiL1Batch.l1TxCount).toBeGreaterThanOrEqual(0); - expect(apiL1Batch.l2TxCount).toBeGreaterThanOrEqual(1); // We know that at least 1 tx is included there. - expectTimestampToBeSane(apiL1Batch.timestamp); - - // Check that L1 transaction count can also be non-zero. - const l1Tx = await alice.deposit({ token: zksync.utils.ETH_ADDRESS, amount: 1 }).then((tx) => tx.wait()); - // Wait for l1 batch to be sealed. - await waitForNewL1Batch(alice); - const l1TxReceipt = await alice.provider.getTransactionReceipt(l1Tx.transactionHash); - - const l1BatchWithL1Tx = await query(`/l1_batch/${l1TxReceipt.l1BatchNumber}`); - expect(l1BatchWithL1Tx.l1TxCount).toBeGreaterThanOrEqual(1); - - // Wait until the block is verified and check that the required fields are set. - let verifiedL1Batch = null; - await waitFor(async () => { - verifiedL1Batch = await query(`/l1_batch/${tx.l1BatchNumber}`); - return verifiedL1Batch.status == 'verified'; - }, 'Block was not verified'); - expect(verifiedL1Batch).toEqual({ - number: expect.any(Number), - l1TxCount: expect.any(Number), - l2TxCount: expect.any(Number), - rootHash: expect.stringMatching(/^0x[\da-fA-F]{64}$/), - status: 'verified', - timestamp: expect.any(Number), - commitTxHash: expect.stringMatching(HASH_REGEX), - committedAt: expect.stringMatching(DATE_REGEX), - proveTxHash: expect.stringMatching(HASH_REGEX), - provenAt: expect.stringMatching(DATE_REGEX), - executeTxHash: expect.stringMatching(HASH_REGEX), - executedAt: expect.stringMatching(DATE_REGEX), - baseSystemContractsHashes: { - bootloader: expect.stringMatching(HASH_REGEX), - default_aa: expect.stringMatching(HASH_REGEX) - }, - l1GasPrice: expect.any(Number), - l2FairGasPrice: expect.any(Number) - }); - }); - - test('Should test /account endpoint for an EOA', async () => { - // Check response for the empty account. - const newEoa = testMaster.newEmptyAccount(); - const newEoaResponse = await query(`/account/${newEoa.address}`); - expect(newEoaResponse).toEqual({ - address: newEoa.address.toLowerCase(), - balances: {}, - sealedNonce: 0, - verifiedNonce: 0, - accountType: 'eOA' - }); - - // Check response for the non-empty account. - const aliceResponse = await query(`/account/${alice.address}`); - const aliceExpectedBalances: any = {}; - aliceExpectedBalances[zksync.utils.ETH_ADDRESS] = await apiBalanceObject( - zksync.utils.ETH_ADDRESS, - await alice.getBalance() - ); - aliceExpectedBalances[erc20.l2Address.toLowerCase()] = await apiBalanceObject( - erc20.l2Address, - await alice.getBalance(erc20.l2Address), - erc20.l1Address - ); - expect(aliceResponse.balances).toEqual(aliceExpectedBalances); - }); - - test('Should test /account endpoint for a contract', async () => { - // Check response for the empty account. - const contract = await deployContract(alice, contracts.counter, []); - const contractResponse = await query(`/account/${contract.address}`); - expect(contractResponse).toEqual({ - address: contract.address.toLowerCase(), - balances: {}, - sealedNonce: 0, - verifiedNonce: 0, - accountType: 'contract' - }); - }); - - test('Should test /transaction endpoint', async () => { - const amount = 1; - const bob = testMaster.newEmptyAccount(); - const txNonce = await alice.getTransactionCount(); - const txHandle = await alice.transfer({ to: bob.address, amount, token: erc20.l2Address }); - const tx = await txHandle.wait(); - - const apiTx = await query(`/transaction/${tx.transactionHash}`); - expect(apiTx).toMatchObject({ - transactionHash: tx.transactionHash, - nonce: txNonce, - blockNumber: tx.blockNumber, - blockHash: tx.blockHash, - indexInBlock: expect.any(Number), - status: expect.stringMatching(/included|verified/), - fee: ethers.utils.hexValue(extractFee(tx as any).feeAfterRefund), - isL1Originated: false, - initiatorAddress: alice.address.toLowerCase(), - receivedAt: expect.stringMatching(DATE_REGEX), - miniblockTimestamp: expect.any(Number), - balanceChanges: expect.any(Array), - erc20Transfers: expect.any(Array), - data: { - calldata: txHandle.data, - contractAddress: erc20.l2Address.toLowerCase(), - factoryDeps: null, - value: ethers.utils.hexValue(txHandle.value) - }, - logs: expect.any(Array), - transfer: { - from: alice.address.toLowerCase(), - to: bob.address.toLowerCase(), - amount: ethers.utils.hexValue(amount), - tokenInfo: await erc20TokenInfo(erc20.l2Address, erc20.l1Address) - } - }); - - if (!testMaster.isFastMode()) { - // Wait for the block to become verified and check that the corresponding fields are set. - await waitFor(async () => { - const verifiedBlock = await query(`/block/${tx.blockNumber}`); - return verifiedBlock.status == 'verified'; - }, 'Block was not verified'); - - const finalizedApiTx = await query(`/transaction/${tx.transactionHash}`); - expect(finalizedApiTx).toMatchObject({ - ethCommitTxHash: expect.stringMatching(HASH_REGEX), - ethProveTxHash: expect.stringMatching(HASH_REGEX), - ethExecuteTxHash: expect.stringMatching(HASH_REGEX), - l1BatchNumber: expect.any(Number) - }); - } - }); - - test('Should test /transaction endpoint for L1->L2', async () => { - if (testMaster.isFastMode()) { - // This test requires an L1->L2 transaction to be included, which may be time consuming on stage. - return; - } - - const amount = 1; - const txHandle = await alice.deposit({ to: alice.address, amount, token: erc20.l1Address, approveERC20: true }); - const tx = await txHandle.wait(); - - const apiTx = await query(`/transaction/${tx.transactionHash}`); - expect(ethers.BigNumber.from(apiTx.fee).toNumber()).toBeGreaterThan(0); - expect(apiTx).toMatchObject({ - transactionHash: tx.transactionHash, - blockNumber: tx.blockNumber, - blockHash: tx.blockHash, - indexInBlock: expect.any(Number), - status: expect.stringMatching(/included|verified/), - fee: ethers.utils.hexValue(tx.gasUsed.mul(tx.effectiveGasPrice)), - isL1Originated: true, - initiatorAddress: expect.stringMatching(HEX_VALUE_REGEX), - receivedAt: expect.stringMatching(DATE_REGEX), - miniblockTimestamp: expect.any(Number), - balanceChanges: expect.any(Array), - erc20Transfers: expect.any(Array), - data: { - calldata: expect.stringMatching(HEX_VALUE_REGEX), - contractAddress: expect.stringMatching(ADDRESS_REGEX), - factoryDeps: expect.any(Array), - value: expect.stringMatching(HEX_VALUE_REGEX) - }, - logs: expect.any(Array) - }); - }); - - test('Should test /transactions endpoint', async () => { - const amount = 1; - const bob = testMaster.newEmptyAccount(); - const txNonce = await alice.getNonce(); - const tx = await alice.transfer({ to: bob.address, amount }).then((tx) => tx.wait()); - - const response: any = await query('/transactions', { - blockNumber: tx.blockNumber.toString(), - limit: '100', - direction: 'older' - }); - expect(response).toEqual({ - total: expect.any(Number), - list: expect.anything() - }); - expect(response.total).toBeGreaterThanOrEqual(1); - - const apiTx = response.list.find((apiTx: any) => apiTx.transactionHash == tx.transactionHash); - expect(apiTx).toBeDefined(); - - // Ensure the response format based on the performed ETH transfer. - // After this check we assume that the response format is the same in other responses - // to avoid being too verbose. - expect(apiTx).toMatchObject({ - transactionHash: tx.transactionHash, - nonce: txNonce, - blockNumber: tx.blockNumber, - blockHash: tx.blockHash, - indexInBlock: expect.any(Number), - status: expect.stringMatching(/included|verified/), - fee: ethers.utils.hexValue(extractFee(tx as any).feeAfterRefund), - isL1Originated: false, - initiatorAddress: alice.address.toLowerCase(), - receivedAt: expect.stringMatching(DATE_REGEX), - miniblockTimestamp: expect.any(Number), - balanceChanges: expect.any(Array), - erc20Transfers: expect.any(Array), - data: { - calldata: '0x', - contractAddress: bob.address.toLowerCase(), - factoryDeps: null, - value: ethers.utils.hexValue(amount) - }, - transfer: { - from: alice.address.toLowerCase(), - to: bob.address.toLowerCase(), - amount: ethers.utils.hexValue(amount), - tokenInfo: { - address: zksync.utils.ETH_ADDRESS, - l1Address: zksync.utils.ETH_ADDRESS, - l2Address: zksync.utils.ETH_ADDRESS, - symbol: 'ETH', - name: 'Ether', - decimals: 18, - usdPrice: expect.any(String) - } - }, - type: tx.type - }); - - // Perform L1 batch-related checks in the long mode only. - if (!testMaster.isFastMode()) { - const tx = await waitForNewL1Batch(alice); - const response: any = await query('/transactions', { - l1BatchNumber: tx.l1BatchNumber.toString(), - limit: '100', - direction: 'older' - }); - expect(response).toEqual({ - total: expect.any(Number), - list: expect.anything() - }); - expect(response.total).toBeGreaterThanOrEqual(1); - - const apiTx = response.list.find((apiTx: any) => apiTx.transactionHash == tx.transactionHash); - expect(apiTx).toBeDefined(); - } - - // Check other query parameters combinations - const backwards = await query('/transactions', { - limit: '1', - direction: 'older' - }); - expect(backwards.list.length).toEqual(1); - - const forward = await query('/transactions', { - limit: '1', - offset: '1', - direction: 'newer' - }); - expect(forward.list.length).toEqual(1); - - const tom = testMaster.newEmptyAccount(); - await alice.transfer({ to: tom.address, amount }).then((tx) => tx.wait()); - - // Alice sent at least 2 txs: to Bob and to Tom. - let accountTxs = await query('/transactions', { - limit: '2', - direction: 'older', - accountAddress: alice.address - }); - expect(accountTxs.list.length).toEqual(2); - // Tom received only 1 tx from Alice. - accountTxs = await query('/transactions', { - limit: '10', - direction: 'older', - accountAddress: tom.address - }); - expect(accountTxs.list.length).toEqual(1); - - // Invariant: ERC20 tokens are distributed during init, so it must have transactions. - const contract = await query('/transactions', { - limit: '1', - direction: 'older', - contractAddress: erc20.l2Address - }); - expect(contract.list.length).toEqual(1); - }); - - test('Should test /contract endpoint', async () => { - const counterContract = await deployContract(alice, contracts.counter, []); - const createdInBlockNumber = ( - await alice.provider.getTransactionReceipt(counterContract.deployTransaction.hash) - ).blockNumber; - const apiContractInfo = await query(`/contract/${counterContract.address}`); - expect(apiContractInfo).toEqual({ - address: counterContract.address.toLowerCase(), - creatorAddress: alice.address.toLowerCase(), - creatorTxHash: counterContract.deployTransaction.hash, - createdInBlockNumber, - totalTransactions: 0, - bytecode: ethers.utils.hexlify(contracts.counter.bytecode), - verificationInfo: null, - balances: {} - }); - - // ERC20 contract is guaranteed to have more than 0 transactions. - const apiErc20Info = await query(`/contract/${erc20.l2Address}`); - expect(apiErc20Info.totalTransactions).toBeGreaterThan(0); - }); - - test('Should test /events endpoint', async () => { - const apiEvents = await query('/events', { - direction: 'older', - limit: '100', - fromBlockNumber: (await alice.provider.getBlockNumber()).toString() - }); - // Check generic API response structure. - expect(apiEvents).toEqual({ - list: expect.anything(), - total: expect.any(Number) - }); - expect(apiEvents.total).toBeGreaterThan(0); - expect(apiEvents.list.length).toBeGreaterThan(0); - expect(apiEvents.list[0]).toMatchObject({ - address: expect.stringMatching(ADDRESS_REGEX), - blockHash: expect.stringMatching(HASH_REGEX), - blockNumber: expect.stringMatching(HEX_VALUE_REGEX), - data: expect.stringMatching(HEX_VALUE_REGEX), - logIndex: expect.stringMatching(HEX_VALUE_REGEX), - removed: expect.any(Boolean), - topics: expect.any(Array), - transactionHash: expect.stringMatching(HASH_REGEX), - transactionIndex: expect.stringMatching(HEX_VALUE_REGEX), - transactionLogIndex: expect.stringMatching(HEX_VALUE_REGEX) - }); - - // Test per-contract filtering. - const apiErc20Events = await query('/events', { - direction: 'older', - limit: '100', - contractAddress: erc20.l2Address - }); - for (const apiEvent of apiErc20Events.list) { - expect(apiEvent.address).toEqual(erc20.l2Address.toLowerCase()); - } - }); - - test('Should test /token endpoint', async () => { - const apiToken = await query(`/token/${erc20.l2Address}`); - expect(apiToken).toEqual(await erc20TokenInfo(erc20.l2Address, erc20.l1Address)); - }); - - test('should test contract verification', async () => { - if (process.env.RUN_CONTRACT_VERIFICATION_TEST != 'true') { - // Contract verification test is not requested to run. - return; - } - - const counterContract = await deployContract(alice, contracts.counter, []); - const constructorArguments = counterContract.interface.encodeDeploy([]); - - const requestBody = { - contractAddress: counterContract.address, - contractName: 'contracts/counter/counter.sol:Counter', - sourceCode: getContractSource('counter/counter.sol'), - compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, - optimizationUsed: true, - constructorArguments, - isSystem: true - }; - let requestId = await query('/contract_verification', undefined, requestBody); - - await expectVerifyRequestToSucceed(requestId, counterContract.address); - }); - - test('should test multi-files contract verification', async () => { - if (process.env.RUN_CONTRACT_VERIFICATION_TEST != 'true') { - // Contract verification test is not requested to run. - return; - } - - const contractFactory = new zksync.ContractFactory(contracts.create.abi, contracts.create.bytecode, alice); - const contractHandle = await contractFactory.deploy({ - customData: { - factoryDeps: [contracts.create.factoryDep] - } - }); - const importContract = await contractHandle.deployed(); - const standardJsonInput = { - language: 'Solidity', - sources: { - 'contracts/create/create.sol': { content: getContractSource('create/create.sol') }, - 'contracts/create/Foo.sol': { content: getContractSource('create/Foo.sol') } - }, - settings: { - optimizer: { enabled: true }, - isSystem: true - } - }; - - const constructorArguments = importContract.interface.encodeDeploy([]); - - const requestBody = { - contractAddress: importContract.address, - contractName: 'contracts/create/create.sol:Import', - sourceCode: standardJsonInput, - codeFormat: 'solidity-standard-json-input', - compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, - optimizationUsed: true, - constructorArguments - }; - let requestId = await query('/contract_verification', undefined, requestBody); - - await expectVerifyRequestToSucceed(requestId, importContract.address); - }); - - test('should test yul contract verification', async () => { - if (process.env.RUN_CONTRACT_VERIFICATION_TEST != 'true') { - // Contract verification test is not requested to run. - return; - } - const contractPath = `${process.env.ZKSYNC_HOME}/core/tests/ts-integration/contracts/yul/Empty.yul`; - const sourceCode = fs.readFileSync(contractPath, 'utf8'); - - const bytecodePath = `${process.env.ZKSYNC_HOME}/core/tests/ts-integration/contracts/yul/artifacts/Empty.yul/Empty.yul.zbin`; - const bytecode = fs.readFileSync(bytecodePath); - - const contractFactory = new zksync.ContractFactory([], bytecode, alice); - const deployTx = await contractFactory.deploy(); - const contractAddress = (await deployTx.deployed()).address; - - const requestBody = { - contractAddress, - contractName: 'Empty', - sourceCode, - codeFormat: 'yul-single-file', - compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, - optimizationUsed: true, - constructorArguments: '0x', - isSystem: true - }; - let requestId = await query('/contract_verification', undefined, requestBody); - - await expectVerifyRequestToSucceed(requestId, contractAddress); - }); - - test('should test vyper contract verification', async () => { - if (process.env.RUN_CONTRACT_VERIFICATION_TEST != 'true') { - // Contract verification test is not requested to run. - return; - } - - const contractFactory = new zksync.ContractFactory( - contracts.createForwarder.abi, - contracts.createForwarder.bytecode, - alice - ); - const contractHandle = await contractFactory.deploy({ - customData: { - factoryDeps: [contracts.createForwarder.factoryDep] - } - }); - const contractAddress = (await contractHandle.deployed()).address; - - const requestBody = { - contractAddress, - contractName: 'CreateForwarder', - sourceCode: { - CreateForwarder: getContractSource('vyper/CreateForwarder.vy'), - DeployMe: getContractSource('vyper/DeployMe.vy') - }, - codeFormat: 'vyper-multi-file', - compilerZkvyperVersion: ZKVYPER_VERSION, - compilerVyperVersion: VYPER_VERSION, - optimizationUsed: true, - constructorArguments: '0x' - }; - let requestId = await query('/contract_verification', undefined, requestBody); - - await expectVerifyRequestToSucceed(requestId, contractAddress); - }); - - afterAll(async () => { - await testMaster.deinitialize(); - }); - - /** - * Performs an API call to the Explorer API. - * - * @param endpoint API endpoint to call. - * @param queryParams Parameters for a query string. - * @param requestBody Request body. If provided, a POST request would be met and body would be encoded to JSON. - * @returns API response parsed as a JSON. - */ - async function query(endpoint: string, queryParams?: { [key: string]: string }, requestBody?: any): Promise { - const url = new URL(endpoint, testMaster.environment().explorerUrl); - // Iterate through query params and add them to URL. - if (queryParams) { - Object.entries(queryParams).forEach(([key, value]) => url.searchParams.set(key, value)); - } - - let init = undefined; - if (requestBody) { - init = { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify(requestBody) - }; - } - - let response = await fetch(url, init); - try { - return await response.json(); - } catch (e) { - throw { - error: 'Could not decode JSON in response', - status: `${response.status} ${response.statusText}` - }; - } - } - - /** - * Constructs an Explorer API balance object representation - */ - async function apiBalanceObject(address: string, balance: ethers.BigNumber, l1Address?: string) { - address = address.toLowerCase(); - // `hexValue` can contain an uneven number of nibbles (unlike `.toHexString()`), which is required for API. - const hexBalance = ethers.utils.hexValue(balance); - if (address == zksync.utils.ETH_ADDRESS) { - return { - balance: hexBalance, - tokenInfo: { - address, - decimals: 18, - l1Address: address, - l2Address: address, - name: 'Ether', - symbol: 'ETH', - usdPrice: expect.any(String) - } - }; - } - - return { - balance: hexBalance, - tokenInfo: await erc20TokenInfo(address, l1Address) - }; - } - - /** - * Constructs an object that represent the token information sent by the Explorer API. - */ - async function erc20TokenInfo(address: string, l1Address?: string) { - const erc20 = IERC20MetadataFactory.connect(address, alice); - return { - address: address.toLowerCase(), - decimals: await erc20.decimals(), - l1Address: l1Address ? l1Address.toLowerCase() : expect.stringMatching(ADDRESS_REGEX), - l2Address: address.toLowerCase(), - name: await erc20.name(), - symbol: await erc20.symbol(), - usdPrice: expect.any(String) - }; - } - - /** - * Runs a provided asynchronous predicate until it returns `true`. - * If it doesn't happen for a while, fails the test from which it has been called. - */ - async function waitFor(cond: () => Promise, errorMessage: string) { - const MAX_RETRIES = 15_000; - let iter = 0; - while (iter++ < MAX_RETRIES) { - if (await cond()) { - return; - } - await sleep(alice.provider.pollingInterval); - } - - expect(null).fail(errorMessage); - } - - async function expectVerifyRequestToSucceed(requestId: number, contractAddress: string) { - let retries = 0; - while (true) { - if (retries > 20) { - throw new Error('Too many retries'); - } - - let statusObject = await query(`/contract_verification/${requestId}`); - if (statusObject.status == 'successful') { - break; - } else if (statusObject.status == 'failed') { - throw new Error(statusObject.error); - } else { - retries += 1; - await sleep(1000); - } - } - - let contractObject = await query(`/contract/${contractAddress}`); - expect(contractObject.verificationInfo).toBeTruthy(); - } -}); - -/** - * Checks that timestamp has some relatively sane value (not too much in the past, and not in the future) - */ -function expectTimestampToBeSane(timestamp: number) { - const minDate = new Date('01 Jan 2022 00:00:00 UTC').getSeconds(); - const maxDate = Date.now(); - expect(timestamp).toBeGreaterThan(minDate); - expect(timestamp).toBeLessThanOrEqual(maxDate); -} diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index cab3dda6c73d..48888e106541 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -6,7 +6,7 @@ import * as zksync from 'zksync-web3'; import { types } from 'zksync-web3'; import { ethers, Event } from 'ethers'; import { serialize } from '@ethersproject/transactions'; -import { deployContract, getTestContract, waitForNewL1Batch } from '../../src/helpers'; +import { deployContract, getTestContract, waitForNewL1Batch, anyTransaction } from '../../src/helpers'; import { shouldOnlyTakeFee } from '../../src/modifiers/balance-checker'; import fetch, { RequestInit } from 'node-fetch'; import { EIP712_TX_TYPE, PRIORITY_OPERATION_L2_TX_TYPE } from 'zksync-web3/build/src/utils'; @@ -734,6 +734,50 @@ describe('web3 API compatibility tests', () => { ).rejects.toThrow(`invalid filter: if blockHash is supplied fromBlock and toBlock must not be`); }); + test('Should check eth_feeHistory', async () => { + const receipt = await anyTransaction(alice); + const response = await alice.provider.send('eth_feeHistory', [ + '0x2', + ethers.utils.hexlify(receipt.blockNumber), + [] + ]); + + expect(ethers.BigNumber.from(response.oldestBlock).toNumber()).toEqual(receipt.blockNumber - 1); + + expect(response.baseFeePerGas).toHaveLength(3); + for (let i = 0; i < 2; i += 1) { + const expectedBaseFee = (await alice.provider.getBlock(receipt.blockNumber - 1 + i)).baseFeePerGas; + expect(ethers.BigNumber.from(response.baseFeePerGas[i])).toEqual(expectedBaseFee); + } + }); + + test('Should check zks_getProtocolVersion endpoint', async () => { + const latestProtocolVersion = await alice.provider.send('zks_getProtocolVersion', []); + let expectedSysContractsHashes = { + bootloader: expect.stringMatching(HEX_VALUE_REGEX), + default_aa: expect.stringMatching(HEX_VALUE_REGEX) + }; + let expectedProtocolVersion = { + version_id: expect.any(Number), + base_system_contracts: expectedSysContractsHashes, + verification_keys_hashes: { + params: { + recursion_circuits_set_vks_hash: expect.stringMatching(HEX_VALUE_REGEX), + recursion_leaf_level_vk_hash: expect.stringMatching(HEX_VALUE_REGEX), + recursion_node_level_vk_hash: expect.stringMatching(HEX_VALUE_REGEX) + }, + recursion_scheduler_level_vk_hash: expect.stringMatching(HEX_VALUE_REGEX) + }, + timestamp: expect.any(Number) + }; + expect(latestProtocolVersion).toMatchObject(expectedProtocolVersion); + + const exactProtocolVersion = await alice.provider.send('zks_getProtocolVersion', [ + latestProtocolVersion.version_id + ]); + expect(exactProtocolVersion).toMatchObject(expectedProtocolVersion); + }); + afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index 2b8ef178bdc2..d9d4484bd615 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -96,6 +96,8 @@ describe('Smart contract behavior checks', () => { const infiniteLoop = await deployContract(alice, contracts.infinite, []); // Test eth_call first + // await expect(infiniteLoop.callStatic.infiniteLoop()).toBeRejected('cannot estimate transaction: out of gas'); + // ...and then an actual transaction await expect(infiniteLoop.infiniteLoop({ gasLimit: 1_000_000 })).toBeReverted([]); }); diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index cbe627e39ed1..f2525cc4d1ad 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -190,7 +190,8 @@ async function setInternalL1GasPrice(provider: zksync.Provider, newPrice?: strin } catch (_) {} // Run server in background. - let command = 'zk server --components api,tree_lightweight,eth,data_fetcher,state_keeper'; + let command = 'zk server --components api,tree,eth,data_fetcher,state_keeper'; + command = `DATABASE_MERKLE_TREE_MODE=lightweight ${command}`; if (newPrice) { command = `ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_L1_GAS_PRICE=${newPrice} ${command}`; } diff --git a/core/tests/ts-integration/tests/l1.test.ts b/core/tests/ts-integration/tests/l1.test.ts index 44bd543519fe..d719350e4c24 100644 --- a/core/tests/ts-integration/tests/l1.test.ts +++ b/core/tests/ts-integration/tests/l1.test.ts @@ -374,6 +374,13 @@ function getOverheadForTransaction( blockOverheadForTransaction = overheadForLength; } + // The overhead for possible published public data + // let maxPubdataInTx = ceilDiv(bodyGasLimit, gasPricePerPubdata); + // let overheadForPublicData = ceilDiv(maxPubdataInTx.mul(maxBlockOverhead), MAX_PUBDATA_PER_BLOCK); + // if (overheadForPublicData.gt(blockOverheadForTransaction)) { + // blockOverheadForTransaction = overheadForPublicData; + // } + // The overhead for gas that could be used to use single-instance circuits let overheadForSingleInstanceCircuits = ceilDiv(bodyGasLimit.mul(maxBlockOverhead), L2_TX_MAX_GAS_LIMIT); if (overheadForSingleInstanceCircuits.gt(blockOverheadForTransaction)) { diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index bd59f4f821ec..6f30d9c0c6f2 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -1,28 +1,50 @@ import * as utils from 'zk/build/utils'; import { Tester } from './tester'; import * as zkweb3 from 'zksync-web3'; -import { BigNumber, Contract, ethers, Wallet } from 'ethers'; +import { BigNumber, BigNumberish, ethers } from 'ethers'; import { expect } from 'chai'; import { hashBytecode } from 'zksync-web3/build/src/utils'; import fs from 'fs'; -import path from 'path'; -import { IZkSyncFactory } from 'zksync-web3/build/typechain'; import { TransactionResponse } from 'zksync-web3/build/src/types'; +import { BytesLike } from '@ethersproject/bytes'; +const L1_CONTRACTS_FOLDER = `${process.env.ZKSYNC_HOME}/contracts/ethereum/artifacts/cache/solpp-generated-contracts`; +const L1_DEFAULT_UPGRADE_ABI = new ethers.utils.Interface( + require(`${L1_CONTRACTS_FOLDER}/upgrades/DefaultUpgrade.sol/DefaultUpgrade.json`).abi +); +const DIAMOND_CUT_FACET_ABI = new ethers.utils.Interface( + require(`${L1_CONTRACTS_FOLDER}/zksync/facets/DiamondCut.sol/DiamondCutFacet.json`).abi +); +const L2_FORCE_DEPLOY_UPGRADER_ABI = new ethers.utils.Interface( + require(`${process.env.ZKSYNC_HOME}/contracts/zksync/artifacts-zk/cache-zk/solpp-generated-contracts/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi +); +const COMPLEX_UPGRADER_ABI = new ethers.utils.Interface( + require(`${process.env.ZKSYNC_HOME}/etc/system-contracts/artifacts-zk/cache-zk/solpp-generated-contracts/ComplexUpgrader.sol/ComplexUpgrader.json`).abi +); +const COUNTER_BYTECODE = + require(`${process.env.ZKSYNC_HOME}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json`).deployedBytecode; const depositAmount = ethers.utils.parseEther('0.001'); describe('Upgrade test', function () { let tester: Tester; let alice: zkweb3.Wallet; - let mainContract: Contract; + let govWallet: ethers.Wallet; + let mainContract: ethers.Contract; let bootloaderHash: string; + let proposeUpgradeCalldata: string; + let executeUpgradeCalldata: string; + let forceDeployAddress: string; + let forceDeployBytecode: string; - before('create test wallet', async () => { + before('Create test wallet', async () => { tester = await Tester.init(process.env.CHAIN_ETH_NETWORK || 'localhost'); alice = tester.emptyWallet(); + + const govMnemonic = require('../../../../etc/test_config/constant/eth.json').mnemonic; + govWallet = ethers.Wallet.fromMnemonic(govMnemonic, "m/44'/60'/0'/0/1").connect(alice._providerL1()); }); - step('run server and execute some transactions', async () => { + step('Run server and execute some transactions', async () => { // Make sure server isn't running. try { await utils.exec('pkill zksync_server'); @@ -30,9 +52,12 @@ describe('Upgrade test', function () { await utils.sleep(120); } catch (_) {} - // Set 1000 seconds deadline for `CommitBlock` operation. - process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE = '1000'; - process.env.CHAIN_STATE_KEEPER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1'; + // Set small timeouts. + process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE = '1'; + process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_PROVE_DEADLINE = '1'; + process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1'; + // Must be > 1s, because bootloader requires l1 batch timestamps to be incremental. + process.env.CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS = '2000'; // Run server in background. utils.background( `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,data_fetcher,state_keeper > server_logs.txt` @@ -101,48 +126,114 @@ describe('Upgrade test', function () { } }); await txHandle.wait(); + await waitForNewL1Batch(alice); + }); - // Set the new bootloader hash and do not send the l1 batches with new bootloader - process.env.CHAIN_STATE_KEEPER_BOOTLOADER_HASH = bootloaderHash; - process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE = '1'; + step('Propose upgrade', async () => { + forceDeployAddress = '0xf04ce00000000000000000000000000000000000'; + forceDeployBytecode = COUNTER_BYTECODE; - await utils.exec('pkill zksync_server'); - await utils.sleep(10); - utils.background( - `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,data_fetcher,state_keeper >> server_logs.txt` - ); + const forceDeployment: ForceDeployment = { + bytecodeHash: hashBytecode(forceDeployBytecode), + newAddress: forceDeployAddress, + callConstructor: false, + value: BigNumber.from(0), + input: '0x' + }; + + const delegateTo = process.env.CONTRACTS_L2_DEFAULT_UPGRADE_ADDR!; + const delegateCalldata = L2_FORCE_DEPLOY_UPGRADER_ABI.encodeFunctionData('forceDeploy', [[forceDeployment]]); + const data = COMPLEX_UPGRADER_ABI.encodeFunctionData('upgrade', [delegateTo, delegateCalldata]); + + const newProtocolVersion = (await alice._providerL2().send('zks_getProtocolVersion', [null])).version_id + 1; + const calldata = await prepareUpgradeCalldata(govWallet, alice._providerL2(), { + l2ProtocolUpgradeTx: { + txType: 254, + from: '0x0000000000000000000000000000000000008007', // FORCE_DEPLOYER address + to: '0x000000000000000000000000000000000000800f', // ComplexUpgrader address + gasLimit: process.env.CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT!, + gasPerPubdataByteLimit: zkweb3.utils.REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT, + maxFeePerGas: 0, + maxPriorityFeePerGas: 0, + paymaster: 0, + value: 0, + reserved: [0, 0, 0, 0], + data, + signature: '0x', + factoryDeps: [hashBytecode(forceDeployBytecode)], + paymasterInput: '0x', + reservedDynamic: '0x' + }, + factoryDeps: [forceDeployBytecode], + bootloaderHash, + upgradeTimestamp: 0, + newProtocolVersion + }); + proposeUpgradeCalldata = calldata.proposeTransparentUpgrade; + executeUpgradeCalldata = calldata.executeUpgrade; + + await ( + await govWallet.sendTransaction({ + to: mainContract.address, + data: proposeUpgradeCalldata + }) + ).wait(); + + // Wait for server to process L1 event. await utils.sleep(10); - // Wait for finalizing the last tx with old bootloader - await txHandle.waitFinalize(); - // Create one more tx with the new bootloader - await checkedRandomTransfer(alice, BigNumber.from(1)); }); - step('upgrade bootloader on contract', async () => { - const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); - const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); - const deployWallet = Wallet.fromMnemonic(ethTestConfig.mnemonic, "m/44'/60'/0'/0/1").connect( - tester.ethProvider - ); + step('Check bootloader is updated on L2', async () => { + const receipt = await waitForNewL1Batch(alice); + const batchDetails = await alice.provider.getL1BatchDetails(receipt.l1BatchNumber); + expect(batchDetails.baseSystemContractsHashes.bootloader).to.eq(bootloaderHash); + }); - const address = await tester.web3Provider.getMainContractAddress(); - const contract = IZkSyncFactory.connect(address, deployWallet); - let tx = await contract.setL2BootloaderBytecodeHash(bootloaderHash); - await tx.wait(10); - // Restart server. And start sending the blocks with the new bootloader - await utils.exec('pkill zksync_server'); - await utils.sleep(10); - utils.background( - `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,data_fetcher,state_keeper >> server_logs.txt` - ); - await utils.sleep(10); + step('Execute upgrade', async () => { + // Wait for batches with old bootloader to be executed on L1. + let l1BatchNumber = await alice.provider.getL1BatchNumber(); + while ( + (await alice.provider.getL1BatchDetails(l1BatchNumber)).baseSystemContractsHashes.bootloader == + bootloaderHash + ) { + l1BatchNumber -= 1; + } + + let lastBatchExecuted = await mainContract.getTotalBlocksExecuted(); + let tryCount = 0; + while (lastBatchExecuted < l1BatchNumber && tryCount < 10) { + lastBatchExecuted = await mainContract.getTotalBlocksExecuted(); + tryCount += 1; + await utils.sleep(3); + } + if (lastBatchExecuted < l1BatchNumber) { + throw new Error('Server did not execute old blocks'); + } + + // Send execute tx. + await ( + await govWallet.sendTransaction({ + to: mainContract.address, + data: executeUpgradeCalldata + }) + ).wait(); + + let bootloaderHashL1 = await mainContract.getL2BootloaderBytecodeHash(); + expect(bootloaderHashL1).eq(bootloaderHash); }); - step('execute transactions after simple restart', async () => { + step('Wait for block finalization', async () => { // Execute an L2 transaction const txHandle = await checkedRandomTransfer(alice, BigNumber.from(1)); await txHandle.waitFinalize(); + }); + + step('Check force deploy', async () => { + const deployedCode = await alice.provider.getCode(forceDeployAddress); + expect(deployedCode.toLowerCase()).eq(forceDeployBytecode.toLowerCase()); + }); + step('Execute transactions after simple restart', async () => { // Stop server. await utils.exec('pkill zksync_server'); await utils.sleep(10); @@ -155,9 +246,6 @@ describe('Upgrade test', function () { // Trying to send a transaction from the same address again await checkedRandomTransfer(alice, BigNumber.from(1)); - - let bootloaderHashL1 = await mainContract.getL2BootloaderBytecodeHash(); - expect(bootloaderHashL1).eq(bootloaderHash); }); after('Try killing server', async () => { @@ -186,3 +274,123 @@ async function checkedRandomTransfer(sender: zkweb3.Wallet, amount: BigNumber): .be.true; return transferHandle; } + +interface ForceDeployment { + // The bytecode hash to put on an address + bytecodeHash: BytesLike; + // The address on which to deploy the bytecodehash to + newAddress: string; + // Whether to call the constructor + callConstructor: boolean; + // The value with which to initialize a contract + value: BigNumber; + // The constructor calldata + input: BytesLike; +} + +async function waitForNewL1Batch(wallet: zkweb3.Wallet): Promise { + // Send a dummy transaction and wait until the new L1 batch is created. + const oldReceipt = await wallet.transfer({ to: wallet.address, amount: 0 }).then((tx) => tx.wait()); + // Invariant: even with 1 transaction, l1 batch must be eventually sealed, so this loop must exit. + while (!(await wallet.provider.getTransactionReceipt(oldReceipt.transactionHash)).l1BatchNumber) { + await zkweb3.utils.sleep(wallet.provider.pollingInterval); + } + return await wallet.provider.getTransactionReceipt(oldReceipt.transactionHash); +} + +async function prepareUpgradeCalldata( + govWallet: ethers.Wallet, + l2Provider: zkweb3.Provider, + params: { + l2ProtocolUpgradeTx: { + txType: BigNumberish; + from: BigNumberish; + to: BigNumberish; + gasLimit: BigNumberish; + gasPerPubdataByteLimit: BigNumberish; + maxFeePerGas: BigNumberish; + maxPriorityFeePerGas: BigNumberish; + paymaster: BigNumberish; + nonce?: BigNumberish; + value: BigNumberish; + reserved: [BigNumberish, BigNumberish, BigNumberish, BigNumberish]; + data: BytesLike; + signature: BytesLike; + factoryDeps: BigNumberish[]; + paymasterInput: BytesLike; + reservedDynamic: BytesLike; + }; + factoryDeps: BytesLike[]; + bootloaderHash?: BytesLike; + defaultAAHash?: BytesLike; + verifier?: string; + verifierParams?: { + recursionNodeLevelVkHash: BytesLike; + recursionLeafLevelVkHash: BytesLike; + recursionCircuitsSetVksHash: BytesLike; + }; + l1ContractsUpgradeCalldata?: BytesLike; + postUpgradeCalldata?: BytesLike; + upgradeTimestamp: BigNumberish; + newProtocolVersion?: BigNumberish; + newAllowList?: string; + } +) { + const upgradeAddress = process.env.CONTRACTS_DEFAULT_UPGRADE_ADDR; + + if (!upgradeAddress) { + throw new Error('CONTRACTS_DEFAULT_UPGRADE_ADDR not set'); + } + + const zkSyncContract = await l2Provider.getMainContractAddress(); + const zkSync = new ethers.Contract(zkSyncContract, zkweb3.utils.ZKSYNC_MAIN_ABI, govWallet); + + // In case there is some pending upgrade there, we cancel it + const upgradeProposalState = await zkSync.getUpgradeProposalState(); + if (upgradeProposalState != 0) { + const currentProposalHash = await zkSync.getProposedUpgradeHash(); + await (await zkSync.connect(govWallet).cancelUpgradeProposal(currentProposalHash)).wait(); + } + + const newProtocolVersion = params.newProtocolVersion ?? (await zkSync.getProtocolVersion()).add(1); + params.l2ProtocolUpgradeTx.nonce ??= newProtocolVersion; + const upgradeInitData = L1_DEFAULT_UPGRADE_ABI.encodeFunctionData('upgrade', [ + [ + params.l2ProtocolUpgradeTx, + params.factoryDeps, + params.bootloaderHash ?? ethers.constants.HashZero, + params.defaultAAHash ?? ethers.constants.HashZero, + params.verifier ?? ethers.constants.AddressZero, + params.verifierParams ?? [ethers.constants.HashZero, ethers.constants.HashZero, ethers.constants.HashZero], + params.l1ContractsUpgradeCalldata ?? '0x', + params.postUpgradeCalldata ?? '0x', + params.upgradeTimestamp, + newProtocolVersion, + params.newAllowList ?? ethers.constants.AddressZero + ] + ]); + + // Prepare the diamond cut data + const upgradeParam = { + facetCuts: [], + initAddress: upgradeAddress, + initCalldata: upgradeInitData + }; + const currentProposalId = (await zkSync.getCurrentProposalId()).add(1); + // Get transaction data of the `proposeTransparentUpgrade` + const proposeTransparentUpgrade = DIAMOND_CUT_FACET_ABI.encodeFunctionData('proposeTransparentUpgrade', [ + upgradeParam, + currentProposalId + ]); + + // Get transaction data of the `executeUpgrade` + const executeUpgrade = DIAMOND_CUT_FACET_ABI.encodeFunctionData('executeUpgrade', [ + upgradeParam, + ethers.constants.HashZero + ]); + + return { + proposeTransparentUpgrade, + executeUpgrade + }; +} diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 1a980a34bb9d..6b6b6caba71c 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -32,3 +32,7 @@ path = "src/iai_results_to_prometheus.rs" [[bin]] name = "compare_iai_results" path = "src/compare_iai_results.rs" + +[[bin]] +name = "find-slowest" +path = "src/find_slowest.rs" diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index 95b2df0585ba..f49358e0af15 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -29,4 +29,5 @@ make_functions_and_main!( finish_eventful_frames, write_and_decode, event_spam, + slot_hash_collision, ); diff --git a/core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision b/core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision new file mode 100644 index 000000000000..4204406c947d Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision differ diff --git a/core/tests/vm-benchmark/src/find_slowest.rs b/core/tests/vm-benchmark/src/find_slowest.rs new file mode 100644 index 000000000000..be04dda26d46 --- /dev/null +++ b/core/tests/vm-benchmark/src/find_slowest.rs @@ -0,0 +1,42 @@ +use std::{ + io::Write, + time::{Duration, Instant}, +}; +use vm_benchmark_harness::*; + +fn main() { + let mut results = vec![]; + + let arg = std::env::args() + .nth(1) + .expect("Expected directory of contracts to rank as first argument."); + let files = std::fs::read_dir(arg).expect("Failed to list dir"); + + let mut last_progress_update = Instant::now(); + + for (i, file) in files.enumerate() { + let path = file.unwrap().path(); + + let test_contract = std::fs::read(&path).expect("failed to read file"); + + if let Some(code) = cut_to_allowed_bytecode_size(&test_contract) { + let tx = get_deploy_tx(code); + + let start_time = Instant::now(); + BenchmarkingVm::new().run_transaction(&tx).unwrap(); + results.push((start_time.elapsed(), path)); + } + + if last_progress_update.elapsed() > Duration::from_millis(100) { + print!("\r{}", i); + std::io::stdout().flush().unwrap(); + last_progress_update = Instant::now(); + } + } + println!(); + + results.sort(); + for (time, path) in results.iter().rev().take(30) { + println!("{} took {:?}", path.display(), time); + } +} diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml new file mode 100644 index 000000000000..fa2cfb890f7f --- /dev/null +++ b/docker-compose-cpu-runner.yml @@ -0,0 +1,38 @@ +version: '3.2' +services: + geth: + image: "matterlabs/geth:latest" + environment: + - PLUGIN_CONFIG + + zk: + image: "matterlabs/zk-environment:latest2.0" + depends_on: + - geth + - postgres + security_opt: + - seccomp:unconfined + command: tail -f /dev/null + volumes: + - .:/usr/src/zksync + - /usr/src/cache:/usr/src/cache + - /var/run/docker.sock:/var/run/docker.sock + - /usr/src/setup-data:/mnt/prover_setup_keys + - /usr/src/setup-data:/usr/src/setup-data + environment: + - CACHE_DIR=/usr/src/cache + - SCCACHE_CACHE_SIZE=50g + - SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + - SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + - SCCACHE_ERROR_LOG=/tmp/sccache_log.txt + - SCCACHE_GCS_RW_MODE=READ_WRITE + - CI=1 + - GITHUB_WORKSPACE=$GITHUB_WORKSPACE + env_file: + - ./.env + postgres: + image: "postgres:14" + ports: + - "5432:5432" + environment: + - POSTGRES_HOST_AUTH_METHOD=trust diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml new file mode 100644 index 000000000000..9b5feea15b0e --- /dev/null +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -0,0 +1,52 @@ +version: '3.2' +services: + geth: + image: "matterlabs/geth:latest" + environment: + - PLUGIN_CONFIG + + zk: + image: matterlabs/zk-environment:cuda-12-0-latest + depends_on: + - geth + - postgres + security_opt: + - seccomp:unconfined + command: tail -f /dev/null + volumes: + - .:/usr/src/zksync + - /usr/src/cache:/usr/src/cache + - /var/run/docker.sock:/var/run/docker.sock + - /usr/src/keys:/mnt/prover_setup_keys + environment: + - CACHE_DIR=/usr/src/cache + - SCCACHE_CACHE_SIZE=50g + - SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + - SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + - SCCACHE_ERROR_LOG=/tmp/sccache_log.txt + - SCCACHE_GCS_RW_MODE=READ_WRITE + - CI=1 + - GITHUB_WORKSPACE=$GITHUB_WORKSPACE + # We set CUDAARCHS for l4 gpu's + - CUDAARCHS=89 + # We need to forward all nvidia-devices, as due to bug with cgroups and nvidia-container-runtime (https://github.com/NVIDIA/libnvidia-container/issues/176#issuecomment-1159454366), cgroups are disabled and thou GPU isn't properly forwarded to dind + devices: + - /dev/nvidia0:/dev/nvidia0 + - /dev/nvidiactl:/dev/nvidiactl + - /dev/nvidia-caps:/dev/nvidia-caps + - /dev/nvidia-modeset:/dev/nvidia-modeset + - /dev/nvidia-uvm:/dev/nvidia-uvm + - /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools + env_file: + - ./.env + deploy: + resources: + reservations: + devices: + - capabilities: [gpu] + postgres: + image: "postgres:14" + ports: + - "5432:5432" + environment: + - POSTGRES_HOST_AUTH_METHOD=trust diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index 68c48f01204a..7b2d6a340364 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -22,7 +22,7 @@ services: - CACHE_DIR=/usr/src/cache - SCCACHE_CACHE_SIZE=50g - SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage - - SCCACHE_GCS_OAUTH_URL=http://169.254.169.254/computeMetadata/v1/instance/service-accounts/gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com/token + - SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com - SCCACHE_ERROR_LOG=/tmp/sccache_log.txt - SCCACHE_GCS_RW_MODE=READ_WRITE - CI=1 @@ -35,7 +35,7 @@ services: devices: - capabilities: [gpu] postgres: - image: "postgres:12" + image: "postgres:14" ports: - "5432:5432" environment: diff --git a/docker-compose-runner.yml b/docker-compose-runner.yml index c9c6b815ebd7..7c110501e5c3 100644 --- a/docker-compose-runner.yml +++ b/docker-compose-runner.yml @@ -21,7 +21,7 @@ services: - CACHE_DIR=/usr/src/cache - SCCACHE_CACHE_SIZE=50g - SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage - - SCCACHE_GCS_OAUTH_URL=http://169.254.169.254/computeMetadata/v1/instance/service-accounts/gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com/token + - SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com - SCCACHE_ERROR_LOG=/tmp/sccache_log.txt - SCCACHE_GCS_RW_MODE=READ_WRITE - CI=1 @@ -29,7 +29,7 @@ services: env_file: - ./.env postgres: - image: "postgres:12" + image: "postgres:14" ports: - "5432:5432" environment: diff --git a/docker-compose.yml b/docker-compose.yml index 44dd9bba328c..dc1f375645a0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -10,7 +10,7 @@ services: source: ./volumes/geth target: /var/lib/geth/data postgres: - image: "postgres:12" + image: "postgres:14" ports: - "5432:5432" volumes: diff --git a/docker/circuit-synthesizer/Dockerfile b/docker/circuit-synthesizer/Dockerfile index 6ae033b3baa9..a2e8f6b4ba7e 100644 --- a/docker/circuit-synthesizer/Dockerfile +++ b/docker/circuit-synthesizer/Dockerfile @@ -2,7 +2,7 @@ # Not expected to work locally # syntax=docker/dockerfile:experimental -FROM debian:buster-slim as builder +FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -23,7 +23,8 @@ COPY . . RUN CARGO_HOME=./cargo cargo build --release -FROM debian:buster-slim +FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl openssl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +COPY core/bin/verification_key_generator_and_server/data/ /core/bin/verification_key_generator_and_server/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_circuit_synthesizer /usr/bin/ ENTRYPOINT ["zksync_circuit_synthesizer"] diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 2626ae76306f..8d8dac21f965 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:experimental -FROM rust:1.67 as builder +FROM rust:1.67-bookworm as builder RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* WORKDIR /usr/src/zksync COPY . . @@ -7,7 +7,7 @@ COPY . . # Doesn't expected to work local RUN CARGO_HOME=./cargo cargo build --release -FROM debian:11 +FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget python3 && rm -rf /var/lib/apt/lists/* # install zksolc diff --git a/docker/cross-external-nodes-checker/Dockerfile b/docker/cross-external-nodes-checker/Dockerfile index a021c8b093ba..d8e02fe32b8c 100644 --- a/docker/cross-external-nodes-checker/Dockerfile +++ b/docker/cross-external-nodes-checker/Dockerfile @@ -3,7 +3,7 @@ # BUILDING STAGE # syntax=docker/dockerfile:experimental -FROM rust:1.67-buster as builder +FROM rust:1.67-bookworm as builder RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* WORKDIR /usr/src/zksync COPY . . @@ -11,7 +11,7 @@ COPY . . RUN CARGO_HOME=./cargo cargo build --release # RUNNING STAGE -FROM debian:buster-slim +FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl ca-certificates && rm -rf /var/lib/apt/lists/* # Bring the below from the building stage to the final image. diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index eab152f74b0d..7a0d01370c0a 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -2,7 +2,7 @@ # Not expected to work locally # syntax=docker/dockerfile:experimental -FROM rust:1.67-buster as builder +FROM rust:1.67-bookworm as builder RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* WORKDIR /usr/src/zksync COPY . . @@ -10,7 +10,7 @@ COPY . . RUN CARGO_HOME=./cargo cargo build --release RUN CARGO_HOME=./cargo cargo install sqlx-cli --version 0.5.13 -FROM debian:buster-slim +FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin diff --git a/docker/prover-fri/Dockerfile b/docker/prover-fri/Dockerfile index a5bbe1eb24ca..9daf02bab257 100644 --- a/docker/prover-fri/Dockerfile +++ b/docker/prover-fri/Dockerfile @@ -2,7 +2,7 @@ # Not expected to work locally # syntax=docker/dockerfile:experimental -FROM debian:buster-slim as builder +FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -23,7 +23,7 @@ COPY . . RUN CARGO_HOME=./cargo cargo build --release -FROM debian:buster-slim +FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/src/zksync/target/release/zksync_prover_fri /usr/bin/ diff --git a/docker/prover-gar/Dockerfile b/docker/prover-gar/Dockerfile index 643d9caee214..974679cbebfa 100644 --- a/docker/prover-gar/Dockerfile +++ b/docker/prover-gar/Dockerfile @@ -4,7 +4,8 @@ FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-v2:2.0-$PROVER_ FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04 as app -COPY *.bin/ / +# HACK copying to root is the only way to make Docker layer caching work for these files for some reason +COPY *.bin / COPY setup_2\^26.key /setup_2\^26.key RUN apt-get update && apt-get install -y libpq5 ca-certificates openssl && rm -rf /var/lib/apt/lists/* diff --git a/docker/prover-gpu-fri-gar/Dockerfile b/docker/prover-gpu-fri-gar/Dockerfile new file mode 100644 index 000000000000..aef78ff26e90 --- /dev/null +++ b/docker/prover-gpu-fri-gar/Dockerfile @@ -0,0 +1,15 @@ +# syntax=docker/dockerfile:1 +ARG PROVER_IMAGE +FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-gpu-fri:2.0-$PROVER_IMAGE as prover + +FROM nvidia/cuda:12.0.0-runtime-ubuntu22.04 as app + +# HACK copying to root is the only way to make Docker layer caching work for these files for some reason +COPY *.bin / + +RUN apt-get update && apt-get install -y libpq5 ca-certificates openssl && rm -rf /var/lib/apt/lists/* + + +COPY --from=prover /usr/bin/zksync_prover_fri /usr/bin/ + +ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile new file mode 100644 index 000000000000..a1d214c0f91b --- /dev/null +++ b/docker/prover-gpu-fri/Dockerfile @@ -0,0 +1,37 @@ +# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner +# Not expected to work locally + +# syntax=docker/dockerfile:experimental +FROM nvidia/cuda:12.0.0-devel-ubuntu22.04 as builder + +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ + pkg-config build-essential libclang-dev && \ + rm -rf /var/lib/apt/lists/* + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +ENV CUDAARCHS=75 + +RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ + rustup install nightly-2023-05-31 && \ + rustup default nightly-2023-05-31 + +# Setup cmake +RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ + chmod +x cmake-3.24.2-linux-x86_64.sh && \ + ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local + +WORKDIR /usr/src/zksync +COPY . . + +RUN CARGO_HOME=./cargo cargo build --release --features "gpu" + +FROM nvidia/cuda:12.0.0-devel-ubuntu22.04 +RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /usr/src/zksync/target/release/zksync_prover_fri /usr/bin/ +ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index b5c94b1ffc9a..a0b03448cc10 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -2,21 +2,25 @@ # Not expected to work locally # syntax=docker/dockerfile:experimental -FROM rust:1.67-buster as builder -RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* +FROM rust:1.67-bookworm as builder +RUN apt-get update && apt-get install -y linux-libc-dev liburing-dev clang && \ + # ^ We need a newer version of `linux-libc-dev` from backports than the one installed by default + rm -rf /var/lib/apt/lists/* WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN CARGO_HOME=./cargo cargo build --release --features=rocksdb/io-uring -FROM debian:buster-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM debian:bookworm-slim +RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ + rm -rf /var/lib/apt/lists/* EXPOSE 3000 EXPOSE 3031 EXPOSE 3030 COPY --from=builder /usr/src/zksync/target/release/zksync_server /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/src/zksync/target/release/merkle_tree_consistency_checker /usr/bin +COPY --from=builder /usr/src/zksync/target/release/slot_index_consistency_checker /usr/bin COPY --from=builder /usr/src/zksync/target/release/rocksdb_util /usr/bin COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/bootloader/build/artifacts/ COPY etc/system-contracts/contracts/artifacts/ /etc/system-contracts/contracts/artifacts/ diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index abfaa2ae0594..7d54863c31ea 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -2,7 +2,7 @@ # Not expected to work locally # syntax=docker/dockerfile:experimental -FROM debian:buster-slim as builder +FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -23,10 +23,10 @@ COPY . . RUN CARGO_HOME=./cargo cargo build --release -FROM debian:buster-slim +FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* -COPY core/bin/vk_setup_data_generator_server_fri/data/ /core/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_witness_generator /usr/bin/ ENTRYPOINT ["zksync_witness_generator"] diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile new file mode 100644 index 000000000000..1fd67cbd7f32 --- /dev/null +++ b/docker/witness-vector-generator/Dockerfile @@ -0,0 +1,33 @@ +# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner +# Not expected to work locally + +# syntax=docker/dockerfile:experimental +FROM debian:bookworm-slim as builder + +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ + pkg-config build-essential libclang-dev && \ + rm -rf /var/lib/apt/lists/* + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ + rustup install nightly-2023-05-31 && \ + rustup default nightly-2023-05-31 + +WORKDIR /usr/src/zksync +COPY . . + +RUN CARGO_HOME=./cargo cargo build --release + +FROM debian:bookworm-slim +RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + +# copy finalization hints required for witness vector generation +COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ + +COPY --from=builder /usr/src/zksync/target/release/zksync_witness_vector_generator /usr/bin/ +ENTRYPOINT ["zksync_witness_vector_generator"] diff --git a/docker/zk-environment-cuda-12-0/Dockerfile b/docker/zk-environment-cuda-12-0/Dockerfile new file mode 100644 index 000000000000..0f9e601f4314 --- /dev/null +++ b/docker/zk-environment-cuda-12-0/Dockerfile @@ -0,0 +1,244 @@ +FROM ubuntu:20.04 as base + +WORKDIR /usr/src/zksync +ENV DEBIAN_FRONTEND noninteractive + +# Install required dependencies +RUN apt-get update && apt-get install -y \ + cmake \ + make \ + bash \ + git \ + openssl \ + libssl-dev \ + gcc \ + g++ \ + curl \ + pkg-config \ + software-properties-common \ + jq \ + openssh-server \ + openssh-client \ + wget \ + vim \ + ca-certificates \ + gnupg2 + +# Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, +# so we use a PPA with the backport +RUN add-apt-repository ppa:savoury1/virtualisation && \ + apt-get update && \ + apt-get install -y \ + curl \ + gnutls-bin git \ + build-essential \ + clang-7 \ + lldb-7 \ + lld-7 \ + liburing-dev + +# Install docker engine +RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +RUN apt update; apt install -y docker-ce-cli + +# Configurate git to fetch submodules correctly (https://stackoverflow.com/questions/38378914/how-to-fix-git-error-rpc-failed-curl-56-gnutls) +RUN git config --global http.postBuffer 1048576000 + +# Install node and yarn +RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - +RUN apt-get install -y nodejs +RUN npm install -g yarn + +# Install Rust and required cargo packages +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +ENV GCLOUD_VERSION=403.0.0 +# Install gloud for gcr login and gcfuze for mounting buckets +RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ + apt-get update -y && apt-get install google-cloud-cli=${GCLOUD_VERSION}-0 --no-install-recommends -y && \ + gcloud config set core/disable_usage_reporting true && \ + gcloud config set component_manager/disable_update_check true && \ + gcloud config set metrics/environment github_docker_image + +RUN curl https://sh.rustup.rs -sSf | bash -s -- -y +RUN rustup install nightly-2023-05-31 +RUN rustup default stable +RUN cargo install --version=0.5.6 sqlx-cli +RUN cargo install cargo-tarpaulin + +# Copy compiler (both solc and zksolc) binaries +# Obtain `solc` 0.8.20. +RUN wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ + && mv solc-linux-amd64-v0.8.20+commit.a1b79de6 /usr/bin/solc \ + && chmod +x /usr/bin/solc +# Obtain `zksolc` 1.3.13. +RUN wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.13 \ + && mv zksolc-linux-amd64-musl-v1.3.13 /usr/bin/zksolc \ + && chmod +x /usr/bin/zksolc + +# Setup the environment +ENV ZKSYNC_HOME=/usr/src/zksync +ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV CI=1 +RUN cargo install sccache +ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache + +FROM base as nvidia-tools + +# Install Rust and required cargo packages +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +# Setup the environment +ENV ZKSYNC_HOME=/usr/src/zksync +ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV CI=1 +ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache +ENV DEBIAN_FRONTEND noninteractive + +# Setup nvidia-cuda env +ENV NVARCH x86_64 + +ENV NVIDIA_REQUIRE_CUDA "cuda>=12.0 brand=tesla,driver>=450,driver<451 brand=tesla,driver>=470,driver<471 brand=unknown,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471 brand=geforce,driver>=470,driver<471 brand=geforcertx,driver>=470,driver<471 brand=quadro,driver>=470,driver<471 brand=quadrortx,driver>=470,driver<471 brand=titan,driver>=470,driver<471 brand=titanrtx,driver>=470,driver<471" +ENV NV_CUDA_CUDART_VERSION 12.0.107-1 +ENV NV_CUDA_COMPAT_PACKAGE cuda-compat-12-0 + +# curl purging is removed, it's required in next steps +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg2 curl ca-certificates && \ + curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/${NVARCH}/3bf863cc.pub | apt-key add - && \ + echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/${NVARCH} /" > /etc/apt/sources.list.d/cuda.list && \ + rm -rf /var/lib/apt/lists/* + +ENV CUDA_VERSION 12.0.0 + +# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a +RUN apt-get update && apt-get install -y --no-install-recommends \ + cuda-cudart-12-0=${NV_CUDA_CUDART_VERSION} \ + ${NV_CUDA_COMPAT_PACKAGE} \ + && rm -rf /var/lib/apt/lists/* + +# Required for nvidia-docker v1 +RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf \ + && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf + +ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} +ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64 + +# nvidia-container-runtime +ENV NVIDIA_VISIBLE_DEVICES all +ENV NVIDIA_DRIVER_CAPABILITIES compute,utility + +ENV NV_CUDA_LIB_VERSION 12.0.0-1 + +ENV NV_NVTX_VERSION 12.0.76-1 +ENV NV_LIBNPP_VERSION 12.0.0.30-1 +ENV NV_LIBNPP_PACKAGE libnpp-12-0=${NV_LIBNPP_VERSION} +ENV NV_LIBCUSPARSE_VERSION 12.0.0.76-1 + +ENV NV_LIBCUBLAS_PACKAGE_NAME libcublas-12-0 +ENV NV_LIBCUBLAS_VERSION 12.0.1.189-1 +ENV NV_LIBCUBLAS_PACKAGE ${NV_LIBCUBLAS_PACKAGE_NAME}=${NV_LIBCUBLAS_VERSION} + +ENV NV_LIBNCCL_PACKAGE_NAME libnccl2 +ENV NV_LIBNCCL_PACKAGE_VERSION 2.17.1-1 +ENV NCCL_VERSION 2.17.1-1 +ENV NV_LIBNCCL_PACKAGE ${NV_LIBNCCL_PACKAGE_NAME}=${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.0 + +ENV NV_NVTX_VERSION 12.0.76-1 +ENV NV_LIBNPP_VERSION 12.0.0.30-1 +ENV NV_LIBNPP_PACKAGE libnpp-12-0=${NV_LIBNPP_VERSION} +ENV NV_LIBCUSPARSE_VERSION 12.0.0.76-1 + +ENV NV_LIBCUBLAS_PACKAGE_NAME libcublas-12-0 +ENV NV_LIBCUBLAS_VERSION 12.0.1.189-1 +ENV NV_LIBCUBLAS_PACKAGE ${NV_LIBCUBLAS_PACKAGE_NAME}=${NV_LIBCUBLAS_VERSION} + +ENV NV_LIBNCCL_PACKAGE_NAME libnccl2 +ENV NV_LIBNCCL_PACKAGE_VERSION 2.17.1-1 +ENV NCCL_VERSION 2.17.1-1 +ENV NV_LIBNCCL_PACKAGE ${NV_LIBNCCL_PACKAGE_NAME}=${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.0 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + cuda-libraries-12-0=${NV_CUDA_LIB_VERSION} \ + ${NV_LIBNPP_PACKAGE} \ + cuda-nvtx-12-0=${NV_NVTX_VERSION} \ + libcusparse-12-0=${NV_LIBCUSPARSE_VERSION} \ + ${NV_LIBCUBLAS_PACKAGE} \ + ${NV_LIBNCCL_PACKAGE} \ + && rm -rf /var/lib/apt/lists/* + +# Keep apt from auto upgrading the cublas and nccl packages. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88 +RUN apt-mark hold ${NV_LIBCUBLAS_PACKAGE_NAME} ${NV_LIBNCCL_PACKAGE_NAME} + +#### devel + +ENV NV_CUDA_LIB_VERSION "12.0.0-1" + +ENV NV_CUDA_CUDART_DEV_VERSION 12.0.107-1 +ENV NV_NVML_DEV_VERSION 12.0.76-1 +ENV NV_LIBCUSPARSE_DEV_VERSION 12.0.0.76-1 +ENV NV_LIBNPP_DEV_VERSION 12.0.0.30-1 +ENV NV_LIBNPP_DEV_PACKAGE libnpp-dev-12-0=${NV_LIBNPP_DEV_VERSION} + +ENV NV_LIBCUBLAS_DEV_VERSION 12.0.1.189-1 +ENV NV_LIBCUBLAS_DEV_PACKAGE_NAME libcublas-dev-12-0 +ENV NV_LIBCUBLAS_DEV_PACKAGE ${NV_LIBCUBLAS_DEV_PACKAGE_NAME}=${NV_LIBCUBLAS_DEV_VERSION} + +ENV NV_CUDA_NSIGHT_COMPUTE_VERSION 12.0.0-1 +ENV NV_CUDA_NSIGHT_COMPUTE_DEV_PACKAGE cuda-nsight-compute-12-0=${NV_CUDA_NSIGHT_COMPUTE_VERSION} + +ENV NV_NVPROF_VERSION 12.0.90-1 +ENV NV_NVPROF_DEV_PACKAGE cuda-nvprof-12-0=${NV_NVPROF_VERSION} + +ENV NV_LIBNCCL_DEV_PACKAGE_NAME libnccl-dev +ENV NV_LIBNCCL_DEV_PACKAGE_VERSION 2.17.1-1 +ENV NCCL_VERSION 2.17.1-1 +ENV NV_LIBNCCL_DEV_PACKAGE ${NV_LIBNCCL_DEV_PACKAGE_NAME}=${NV_LIBNCCL_DEV_PACKAGE_VERSION}+cuda12.0 + +ENV NV_CUDA_CUDART_DEV_VERSION 12.0.107-1 +ENV NV_NVML_DEV_VERSION 12.0.76-1 +ENV NV_LIBCUSPARSE_DEV_VERSION 12.0.0.76-1 +ENV NV_LIBNPP_DEV_VERSION 12.0.0.30-1 +ENV NV_LIBNPP_DEV_PACKAGE libnpp-dev-12-0=${NV_LIBNPP_DEV_VERSION} + +ENV NV_LIBCUBLAS_DEV_PACKAGE_NAME libcublas-dev-12-0 +ENV NV_LIBCUBLAS_DEV_VERSION 12.0.1.189-1 +ENV NV_LIBCUBLAS_DEV_PACKAGE ${NV_LIBCUBLAS_DEV_PACKAGE_NAME}=${NV_LIBCUBLAS_DEV_VERSION} + +ENV NV_CUDA_NSIGHT_COMPUTE_VERSION 12.0.0-1 +ENV NV_CUDA_NSIGHT_COMPUTE_DEV_PACKAGE cuda-nsight-compute-12-0=${NV_CUDA_NSIGHT_COMPUTE_VERSION} + +ENV NV_LIBNCCL_DEV_PACKAGE_NAME libnccl-dev +ENV NV_LIBNCCL_DEV_PACKAGE_VERSION 2.17.1-1 +ENV NCCL_VERSION 2.17.1-1 +ENV NV_LIBNCCL_DEV_PACKAGE ${NV_LIBNCCL_DEV_PACKAGE_NAME}=${NV_LIBNCCL_DEV_PACKAGE_VERSION}+cuda12.0 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + libtinfo5 libncursesw5 \ + cuda-cudart-dev-12-0=${NV_CUDA_CUDART_DEV_VERSION} \ + cuda-command-line-tools-12-0=${NV_CUDA_LIB_VERSION} \ + cuda-minimal-build-12-0=${NV_CUDA_LIB_VERSION} \ + cuda-libraries-dev-12-0=${NV_CUDA_LIB_VERSION} \ + cuda-nvml-dev-12-0=${NV_NVML_DEV_VERSION} \ + ${NV_NVPROF_DEV_PACKAGE} \ + ${NV_LIBNPP_DEV_PACKAGE} \ + libcusparse-dev-12-0=${NV_LIBCUSPARSE_DEV_VERSION} \ + ${NV_LIBCUBLAS_DEV_PACKAGE} \ + ${NV_LIBNCCL_DEV_PACKAGE} \ + ${NV_CUDA_NSIGHT_COMPUTE_DEV_PACKAGE} \ + && rm -rf /var/lib/apt/lists/* + +# Keep apt from auto upgrading the cublas and nccl packages. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88 +RUN apt-mark hold ${NV_LIBCUBLAS_DEV_PACKAGE_NAME} ${NV_LIBNCCL_DEV_PACKAGE_NAME} +ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs + +# Install cmake 3.24, as we need it for boojum-cuda +RUN curl -LO https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-linux-x86_64.sh && \ + chmod +x cmake-3.24.3-linux-x86_64.sh && \ + ./cmake-3.24.3-linux-x86_64.sh --skip-license --prefix=/usr/local diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index fa2414a10359..c8809c904e20 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -24,14 +24,18 @@ RUN apt-get update && apt-get install -y \ ca-certificates \ gnupg2 -# Install dependencies for RocksDB -RUN apt-get install -y \ - curl \ - gnutls-bin git \ - build-essential \ - clang-7 \ - lldb-7 \ - lld-7 +# Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, +# so we use a PPA with the backport +RUN add-apt-repository ppa:savoury1/virtualisation && \ + apt-get update && \ + apt-get install -y \ + curl \ + gnutls-bin git \ + build-essential \ + clang-7 \ + lldb-7 \ + lld-7 \ + liburing-dev # Install docker engine RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - @@ -67,13 +71,13 @@ RUN cargo install --version=0.5.6 sqlx-cli RUN cargo install cargo-tarpaulin # Copy compiler (both solc and zksolc) binaries -# Obtain `solc` 0.8.12. -RUN wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.12%2Bcommit.f00d7308 \ - && mv solc-linux-amd64-v0.8.12+commit.f00d7308 /usr/bin/solc \ +# Obtain `solc` 0.8.20. +RUN wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ + && mv solc-linux-amd64-v0.8.20+commit.a1b79de6 /usr/bin/solc \ && chmod +x /usr/bin/solc -# Obtain `zksolc` 1.1.5. -RUN wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.1.6 \ - && mv zksolc-linux-amd64-musl-v1.1.6 /usr/bin/zksolc \ +# Obtain `zksolc` 1.3.13. +RUN wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.13 \ + && mv zksolc-linux-amd64-musl-v1.3.13 /usr/bin/zksolc \ && chmod +x /usr/bin/zksolc # Setup the environment diff --git a/docker/zk-rust-nightly-environment/Dockerfile b/docker/zk-rust-nightly-environment/Dockerfile index 52a2df4abb6c..49fec856f5cc 100644 --- a/docker/zk-rust-nightly-environment/Dockerfile +++ b/docker/zk-rust-nightly-environment/Dockerfile @@ -1,4 +1,4 @@ -FROM debian:buster-slim +FROM debian:bookworm-slim ARG DEBIAN_FRONTEND=noninteractive diff --git a/docs/advanced/01_initialization.md b/docs/advanced/01_initialization.md index 54c48fed8bf7..655eb0d41fba 100644 --- a/docs/advanced/01_initialization.md +++ b/docs/advanced/01_initialization.md @@ -43,7 +43,7 @@ After which we setup the schema (lots of lines with `Applied XX`). You can try connecting to postgres now, to see what's inside: -``` +```shell psql postgres://postgres@localhost/zksync_local ``` @@ -64,17 +64,17 @@ We're running two things in a docker: Let's see if they are running: -``` +```shell docker container ls ``` and then we can look at the Geth logs: -``` -docker logs zksync-2-dev_geth_1 +```shell +docker logs zksync-2-dev-geth-1 ``` -Where zksync-2-dev_geth_1 is the container id, that we got from the first command. +Where `zksync-2-dev-geth-1` is the container name, that we got from the first command. If everything goes well, you should see that L1 blocks are being produced. @@ -82,7 +82,7 @@ If everything goes well, you should see that L1 blocks are being produced. Now we can start the main server: -``` +```shell zk server ``` @@ -103,13 +103,13 @@ select * from miniblocks; Let's finish this article, by taking a look at our L1: -``` -docker container exec -it zksync-2-dev_geth_1 geth attach http://localhost:8545 +```shell +docker container exec -it zksync-2-dev-geth-1 geth attach http://localhost:8545 ``` The command above will start a shell - and you can check that you're a (localnet) crypto trillionaire, by running: -``` +```shell eth.getBalance(personal.listAccounts[0]) ``` @@ -127,7 +127,7 @@ contains the address. You can quickly verify that they were really deployed, by calling: -``` +```shell eth.getCode("XXXX") ``` @@ -146,4 +146,4 @@ Ok - so let's sum up what we have: - and two accounts with lots of tokens - and a server process -In the next article, we'll start playing with the system (bridging tokens etc). +In the [next article](02_deposits.md), we'll start playing with the system (bridging tokens etc). diff --git a/docs/advanced/deposit.md b/docs/advanced/02_deposits.md similarity index 61% rename from docs/advanced/deposit.md rename to docs/advanced/02_deposits.md index 29a66db79032..49b01b90e36b 100644 --- a/docs/advanced/deposit.md +++ b/docs/advanced/02_deposits.md @@ -1,20 +1,20 @@ # ZK-Sync deeper dive - bridging & deposits -In the first article, we've managed to setup our system on local machine and verify that it works. Now let's actually -start using it. +In the [first article](01_initialization.md), we've managed to setup our system on local machine and verify that it +works. Now let's actually start using it. ## Seeing the status of the accounts Let's use a small command line tool (web3 - ) to interact with our blockchains. -``` +```shell git clone https://github.com/mm-zk/web3 make build ``` Then let's create the keypair for our temporary account: -``` +```shell ./web3 account create ``` @@ -25,9 +25,11 @@ Private key: 0x5090c024edb3bdf4ce2ebc2da96bedee925d9d77d729687e5e2d56382cf0a5a6 Public address: 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd ``` +**NOTE:** Keep track of this key and address, as they will be constantly used throughout these articles + Now, let's see how many tokens we have: -``` +```shell // This checks the tokens on 'L1' (geth) ./web3 --rpc-url http://localhost:8545 balance 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd @@ -37,15 +39,15 @@ Now, let's see how many tokens we have: Unsurprisingly we have 0 on both - let's fix it by first transferring some tokens on L1: -``` -docker container exec -it zksync-2-dev_geth_1 geth attach http://localhost:8545 -//and inside: +```shell +docker container exec -it zksync-2-dev-geth-1 geth attach http://localhost:8545 +// and inside: eth.sendTransaction({from: personal.listAccounts[0], to: "0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd", value: "7400000000000000000"}) ``` And now when we check the balance, we should see: -``` +```shell ./web3 --rpc-url http://localhost:8545 balance 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd ``` @@ -58,13 +60,15 @@ and now let's bridge it over to L2. We'll use the zksync-cli from: and then run: ```shell +npm i npm run build -npm exec zksync-cli deposit +npm exec zksync-cli deposit --l1-rpc-url=http://localhost:8545 --l2-rpc-url=http://localhost:3050 +# Address to deposit funds to: 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd +# Amount in ETH: 3 +# Private key of the sender: 0x5090c024edb3bdf4ce2ebc2da96bedee925d9d77d729687e5e2d56382cf0a5a6 ``` -you should choose the 'localnet' as network, and provide the public key as address. - -If everything goes well, you should be able to see the tokens transferred: +If everything goes well, you should be able to see 3 tokens transferred: ```shell ./web3 --rpc-url http://localhost:3050 balance 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd @@ -78,39 +82,43 @@ If we look at what 'deposit' command has printed, we'll see something like this: ``` Transaction submitted 💸💸💸 -L1 transaction: 0xe27dc466c36ad2046766e191017e7acf29e84356465feef76e821708ff18e179 +[...]/tx/0xe27dc466c36ad2046766e191017e7acf29e84356465feef76e821708ff18e179 ``` Let's run the `geth attach` (exact command is above) and see the details: -``` +```shell eth.getTransaction("0xe27dc466c36ad2046766e191017e7acf29e84356465feef76e821708ff18e179") +``` +returns + +```json { - accessList: [], - blockHash: "0xd319b685a1a0b88545ec6df473a3efb903358ac655263868bb14b92797ea7504", - blockNumber: 79660, - chainId: "0x9", - from: "0x618263ce921f7dd5f4f40c29f6c524aaf97b9bbd", - gas: 125060, - gasPrice: 1500000007, - hash: "0xe27dc466c36ad2046766e191017e7acf29e84356465feef76e821708ff18e179", - input: "0xeb672419000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd00000000000000000000000000000000000000000000000029a2241af62c000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000009cb4200000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000000000100000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - maxFeePerGas: 1500000010, - maxPriorityFeePerGas: 1500000000, - nonce: 40, - r: "0xc9b0548ade9c5d7334f1ebdfba9239cf1acca7873381b8f0bc0e8f49ae1e456f", - s: "0xb9dd338283a3409c281b69c3d6f1d66ea6ee5486ee6884c71d82f596d6a934", - to: "0x54e8159f006750466084913d5bd288d4afb1ee9a", - transactionIndex: 0, - type: "0x2", - v: "0x1", - value: 3000320929000000000 + "accessList": [], + "blockHash": "0xd319b685a1a0b88545ec6df473a3efb903358ac655263868bb14b92797ea7504", + "blockNumber": 79660, + "chainId": "0x9", + "from": "0x618263ce921f7dd5f4f40c29f6c524aaf97b9bbd", + "gas": 125060, + "gasPrice": 1500000007, + "hash": "0xe27dc466c36ad2046766e191017e7acf29e84356465feef76e821708ff18e179", + "input": "0xeb672419000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd00000000000000000000000000000000000000000000000029a2241af62c000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000009cb4200000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000000000100000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "maxFeePerGas": 1500000010, + "maxPriorityFeePerGas": 1500000000, + "nonce": 40, + "r": "0xc9b0548ade9c5d7334f1ebdfba9239cf1acca7873381b8f0bc0e8f49ae1e456f", + "s": "0xb9dd338283a3409c281b69c3d6f1d66ea6ee5486ee6884c71d82f596d6a934", + "to": "0x54e8159f006750466084913d5bd288d4afb1ee9a", + "transactionIndex": 0, + "type": "0x2", + "v": "0x1", + "value": 3000320929000000000 } ``` -The witdraw command has called the contract on address `0x54e8` (which is exactly the DIAMOND_PROXY_ADDRESS), and it has -called the method `0xeb672419` - which is the `requestL2Transaction` from +The deposit command has called the contract on address `0x54e8` (which is exactly the `CONTRACTS_DIAMOND_PROXY_ADDR` +from `deployL1.log`), and it has called the method `0xeb672419` - which is the `requestL2Transaction` from [Mailbox.sol](https://github.com/matter-labs/zksync-2-contracts/blob/main/ethereum/contracts/zksync/facets/Mailbox.sol#L205) #### Quick note on our L1 contracts @@ -159,14 +167,14 @@ You can actually check it - by running the psql and looking at the contents of t transaction was succesfully inserted, and it was also marked as 'priority' (as it came from L1) - as regular transactions that are received by the server directly are not marked as priority. -You can verify that this is your transaction, by looking at the l1_block_number column (it should match the block_number -from the eth.getTransaction call above). +You can verify that this is your transaction, by looking at the `l1_block_number` column (it should match the +`block_number` from the `eth.getTransaction(...)` call above). -Notice that the hash of the transaction in the postgres will be different from the one returned by eth.getTransaction. -This is because the postgres keeps the hash of the 'L2' transaction (which was 'inside' the L1 transaction that -eth.getTransaction returned). +Notice that the hash of the transaction in the postgres will be different from the one returned by +`eth.getTransaction(...)`. This is because the postgres keeps the hash of the 'L2' transaction (which was 'inside' the +L1 transaction that `eth.getTransaction(...)` returned). ## Summary -In this article, we've learned how ETH gets bridged from L1 to L2. In the next episode, we'll look at the other -direction - how we transmit messages (and ETH) from L2 to L1 - stay tuned. +In this article, we've learned how ETH gets bridged from L1 to L2. In the [next article](03_withdrawals.md), we'll look +at the other direction - how we transmit messages (and ETH) from L2 to L1. diff --git a/docs/advanced/withdrawal.md b/docs/advanced/03_withdrawals.md similarity index 86% rename from docs/advanced/withdrawal.md rename to docs/advanced/03_withdrawals.md index 925a0b60a442..9a8bfb09a598 100644 --- a/docs/advanced/withdrawal.md +++ b/docs/advanced/03_withdrawals.md @@ -1,23 +1,24 @@ # zkSync deeper dive bridging stuff back (a.k.a withdrawals) -Assuming that we have the tools from part 1 installed, we can bridge the tokens back by simply calling the zksync-cli: +Assuming that you have completed [part 1](01_initialization.md) and [part 2](02_deposits.md) already, we can bridge the +tokens back by simply calling the zksync-cli: -``` -npm exec zksync-cli withdraw +```bash +npm exec zksync-cli withdraw --l1-rpc-url=http://localhost:8545 --l2-rpc-url=http://localhost:3050 ``` -And providing the account name (public key) and private key. +And providing the account name (public address) and private key. Afterward, by using `web3` tools, we can quickly check that funds were transferred back to L1. **And you discover that they didn't** - what happened? Actually we'll have to run one additional step: -``` -npm exec zksync-cli confirm_withdrawal +```bash +npm exec zksync-cli confirm-withdraw --l1-rpc-url=http://localhost:8545 --l2-rpc-url=http://localhost:3050 ``` -and pass the transaction that we received from the first call, into the `confirm_withdrawal` call. +and pass the transaction that we received from the first call, into the `confirm-withdraw` call. **Note:** This is not needed on testnet - as we (MatterLabs) - are running an automatic tool that confirms withdrawals. @@ -39,15 +40,20 @@ Your funds will be available in L1 in a couple of minutes. The tool created the withdraw transaction and it sent it directly to our server (so this is a L2 transaction). The zk server has received it, and added it into its database. You can check it by querying the `transactions` table: -``` +```shell # select * from transactions where hash = '\x` select * from transactions where hash = '\xe2c8a7beaf8879cb197555592c6eb4b6e4c39a772c3b54d1b93da14e419f4683'; ``` This will print a lot of columns, but let's start by looking at the `data` column: -``` - {"value": "0x6124fee993bc0000", "calldata": "0x51cff8d9000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd", "factoryDeps": null, "contractAddress": "0x000000000000000000000000000000000000800a"} +```json +{ + "value": "0x6124fee993bc0000", + "calldata": "0x51cff8d9000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd", + "factoryDeps": null, + "contractAddress": "0x000000000000000000000000000000000000800a" +} ``` We can use the ABI decoder tool to see what this call data means: @@ -93,7 +99,7 @@ And `L1MessagerContract` (that is deployed at 0x8008). ### Committing to L1 And how do these messages get into the L1? The `eth_sender` class from our server is taking care of this. You can see -the details of the transactions that it posts to L1 in our database in `eth_txs` column. +the details of the transactions that it posts to L1 in our database in `eth_txs` table. If you look at the `tx_type` column (in psql), you can see that we have 3 different transaction types: @@ -109,7 +115,7 @@ zksync_local=# select contract_address, tx_type from eth_txs; 0x54e8159f006750466084913d5bd288d4afb1ee9a | ExecuteBlocks ``` -BTW - all the transactions are sent to the 0x54e address - which is the `DiamonProxy` deployed on L1 (this address will +BTW - all the transactions are sent to the 0x54e address - which is the `DiamondProxy` deployed on L1 (this address will be different on your local node - see previous tutorial for more info) . And inside, all three methods above belong to diff --git a/docs/advanced/README.md b/docs/advanced/README.md index 65f2e38a1713..032153fb2dca 100644 --- a/docs/advanced/README.md +++ b/docs/advanced/README.md @@ -1,6 +1,6 @@ # Advanced documentation -This documentation is aimed at advanced users who are interested in developing the zkSyncERA itself (rather than just +This documentation is aimed at advanced users who are interested in developing the zkSync Era itself (rather than just the contracts on top) - and would like to understand how the system works internally. The documents in this directory are not meant to be a full specification, but give you the rough understanding of the @@ -8,7 +8,7 @@ system internals. Suggested order of reading: -- 01_initialization -- deposit -- withdrawal -- contracts +- [Initialization](01_initialization.md) +- [Deposits](02_deposits.md) +- [Withdrawals](03_withdrawals.md) +- [Contracts](contracts.md) diff --git a/docs/advanced/bytecode_compression.md b/docs/advanced/bytecode_compression.md index bc676b0279ec..1941306feef4 100644 --- a/docs/advanced/bytecode_compression.md +++ b/docs/advanced/bytecode_compression.md @@ -2,6 +2,9 @@ ## Overview +As we are a rollup - all the bytecodes that contracts use in our chain must be copied into L1 (so that the chain can be +reconstructed from L1 if needed). + Given the want/need to cutdown on space used, bytecode is compressed prior to being posted to L1. At a high level bytecode is chunked into opcodes (which have a size of 8 bytes), assigned a 2 byte index, and the newly formed byte sequence (indexes) are verified and sent to L1. This process is split into 2 different parts: (1) @@ -10,6 +13,35 @@ handling the compression and (2) [the system contract](https://github.com/matter-labs/system-contracts/blob/main/contracts/BytecodeCompressor.sol) verifying that the compression is correct before sending to L1. +## Example + +Original bytecode + +``` +000000000000000A 000000000000000D 000000000000000A 000000000000000C +000000000000000B 000000000000000B 000000000000000D 000000000000000A +``` + +Dictionary would be: + +``` +0 -> 0xA (count: 3) +1 -> 0xD (count: 2, first seen: 1) +2 -> 0xB (count: 2, first seen: 4) +3 -> 0xC (count: 1) +``` + +Note that '1' maps to '0xD', as it occurs twice, and first occurence is earlier than first occurence of 0xB, that also +occurs twice. + +Compressed bytecode: + +``` +0008 0000 000000000000000A 000000000000000D 000000000000000B 000000000000000C + +0000 0001 0000 0003 0002 0002 0001 0000 +``` + ## Server Side Operator This is the part that is responsible for taking bytecode, that has already been chunked into 8 byte words, performing diff --git a/docs/advanced/prover.md b/docs/advanced/prover.md new file mode 100644 index 000000000000..e20c8ddb8d6a --- /dev/null +++ b/docs/advanced/prover.md @@ -0,0 +1,442 @@ +# Overview + +The purpose of this document is to explain our new proof system from an engineering standpoint. We will examine the code +examples and how the libraries communicate. + +Let's begin by discussing our constraint system. In the previous prover, we utilized the Bellman repository. However, in +the new prover, the constraint system is implemented in Boojum. + +## Constraint system + +If you look at boojum repo (src/cs/traits/cs.rs): + +```rust +pub trait ConstraintSystem: Send + Sync { + + ... + fn alloc_variable() -> Variable; + fn alloc_witness_without_value(&mut self) -> Witness; + fn place_gate>(&mut self, gate: &G, row: usize); + ... +} +``` + +We have three main components: `Variable`, `Witness`, and `Gate`. + +To understand the constraint system, imagine it as a list of "placeholders" called Variables. We define rules, referred +to as "gates" for these Variables. The Witness represents a specific assignment of values to these Variables, ensuring +that the rules still hold true. + +Conceptually, this is similar to how we implement functions. Consider the following example: + +``` +fn fun(x) { + y = x + A; + z = y * B; + w = if y { z } else { y } +} +``` + +In this code snippet, `A`, `B`, `y`, `z`, and `w` are Variables (with `A` and `B` being constants). We establish rules, +or gates, specifying that the Variable `z` must equal `y` multiplied by the Variable `B`. + +Example Witness assignment would be: + +``` + x = 1; A = 3; y = 3; B = 0; z = 0; w = 3; +``` + +Gates can become more complex. For instance, the `w` case demonstrates a "selection" gate, which chooses one of two +options depending on a condition. + +Now, let's delve into this gate for a more detailed examination: + +### Selection gate + +The code is in boojum/src/cs/gates/selection_gate.rs + +Let's delve deeper into the concept. Our goal is to create a gate that implements the logic +`result = if selector == true a else b;`. To accomplish this, we will require four variables. + +```rust +pub struct SelectionGate { + pub a: Variable, + pub b: Variable, + pub selector: Variable, + pub result: Variable, +} +``` + +Internaly the `Variable` object is `pub struct Variable(pub(crate) u64);` - so it is an index to the position within the +constraint system object. + +And now let's see how we can add this gate into the system. + +```rust +pub fn select>( + cs: &mut CS, + a: Variable, + b: Variable, + selector: Variable, +) -> Variable { + // First, let's allocate the output variable: + let output_variable = cs.alloc_variable_without_value(); + ... +} +``` + +And then there is a block of code for witness evaluation (let's skip it for now), and the final block that adds the gate +to the constrain system `cs`: + +```rust + if ::SetupConfig::KEEP_SETUP { + let gate = Self { + a, + b, + selector, + result: output_variable, + }; + gate.add_to_cs(cs); + } + + output_variable +``` + +So to recap - we took 3 'Variables', created the output one, created a `SelectionGate` object out of them, which we +added to the system (by calling `add_to_cs`) - and the finally returned the output variable. + +But where is the 'logic'? Where do we actually enforce the constraint? + +For this, we have to look at the `Evaluator`: + +```rust +impl Gate for SelectionGate { + type Evaluator = SelectionGateConstraitEvaluator; + + #[inline] + fn evaluator(&self) -> Self::Evaluator { + SelectionGateConstraitEvaluator + } +} +``` + +```rust +impl GateConstraitEvaluator for SelectionGateConstraitEvaluator { + fn evaluate_once{ + let a = trace_source.get_variable_value(0); + let b = trace_source.get_variable_value(1); + let selector = trace_source.get_variable_value(2); + let result = trace_source.get_variable_value(3); + + // contribution = a * selector + let mut contribution = a; + contribution.mul_assign(&selector, ctx); + + // tmp = 1 - selector + let mut tmp = P::one(ctx); + tmp.sub_assign(&selector, ctx); + + // contribution += tmp * b + // So: + // contribution = a*selector + (1-selector) * b + P::mul_and_accumulate_into(&mut contribution, &tmp, &b, ctx); + + // contribution = a*selector + (1-selector) * b - result + contribution.sub_assign(&result, ctx); + + // And if we're successful, the contribution == 0. + // Because result == a * selector + (1-selector) * b + destination.push_evaluation_result(contribution, ctx); + } +} +``` + +This evaluator is actually operating on top of the `Field` objects, trying to build & evaluate the correct polynomials. +The details of it will be covered in a separate article. + +Congratulations, you hopefully understood the code for the first gate. To recap - we created the 'output' Variable, and +added the Gate to the CS system. Later when CS system 'computes' all the dependencies, it will run the constraint +evaluator, to add the 'raw' dependency (which is basically an equation) to the list. + +You can look into other files in `src/cs/gates` to see other examples. + +## Structures + +Now, that we handled the basic variables, let's see what we can do with more complex structures. Boojum has added a +bunch of derive macros, to make development easier. + +Let's look at the example: + +```rust +#[derive(Derivative, CSSelectable, CSAllocatable, CSVarLengthEncodable, WitnessHookable)] +pub struct VmLocalState { + pub previous_code_word: UInt256, + pub registers: [VMRegister; REGISTERS_COUNT], + pub flags: ArithmeticFlagsPort, + pub timestamp: UInt32, + pub memory_page_counter: UInt32, +``` + +First - all the UInt that you see above, are actually implemented in Boojum: + +```rust +pub struct UInt32 { + pub(crate) variable: Variable, +} +impl CSAllocatable for UInt32 { + // So the 'witness' type (concrete value) for U32 is u32 - no surprsise ;-) + type Witness = u32; + ... +} + +pub struct UInt256 { + pub inner: [UInt32; 8], +} +``` + +### WitnessHookable + +In the example above, the Witness type for U32, was u32 - easy. But what should we do, when we have more complex struct +(like VmLocalState)? + +This derive will automatically create a new struct named XXWitness (in example above `VmLocalStateWitness`), that can be +filled with concrete values. + +### CsAllocatable + +Implements CsAllocatable - which allows you to directly 'allocate' this struct within constraing system (similarly to +how we were operating on regular 'Variables' above). + +### CSSelectable + +Implements the `Selectable` trait - that allows this struct to participage in operations like conditionally select (so +it can be used as 'a' or 'b' in the Select gate example above). + +### CSVarLengthEncodable + +Implements CircuitVarLengthEncodable - which allows encoding the struct into a vector of varaibles (think about it as +serializing to Bytes). + +### Summary + +Now with the tools above, we can do operations on our constraint system using more complex structures. So we have gates +as 'complex operators' and structures as complex object. Now we're ready to start taking it to the next level: Circuits. + +## Circuits + +Circuit's definitions are spread across 2 separate repositories: `zkevm_circuits` and `zkevm_test_harness`. + +While we have around 9 different circuits (log_sorter, ram_permutation etc) - in this article we'll focus only on the +one: MainVM - which is responsible for handling almost all of the VM operations (other circuits are used to handle some +of the precompiles, and operations that happen after VM was run - like preparing pubdata etc). + +Looking at zkevm_test_harness, we can see the definition: + +```rust +pub type VMMainCircuit = + ZkSyncUniformCircuitInstance>; +``` + +### So what is a circuit + +```rust +pub struct ZkSyncUniformCircuitInstance> { + // Assignment of values to all the Variables. + pub witness: AtomicCell>, + + // Configuration - that is circuit specific, in case of MainVM - the configuration + // is simply the amount of opcodes that we put wihtin 1 circuit. + pub config: std::sync::Arc, + + // Circuit 'friendly' hash function. + pub round_function: std::sync::Arc, + + // Inputs to the circuits. + pub expected_public_input: Option<[F; INPUT_OUTPUT_COMMITMENT_LENGTH]>, +} +``` + +Notice - that circuit doesn't have any 'explicit' outputs. + +Where ZkSyncUniformCircuitInstance is a proxy, so let's look deeper, into the main function: + +```rust +impl VmMainInstanceSynthesisFunction { + fn synthesize_into_cs_inner>( + cs: &mut CS, + witness: Self::Witness, + round_function: &Self::RoundFunction, + config: Self::Config, + ) -> [Num; INPUT_OUTPUT_COMMITMENT_LENGTH] { + main_vm_entry_point(cs, witness, round_function, config) + } +} + +``` + +This is the main logic, that takes the witness (remember - Witness is a concrete assignment of values to Variables) - +and returns the public input. + +If we look deeper into 'main_vm_entry_point' (which is already in zkevm_circuits repo), we can see: + +```rust +pub fn main_vm_entry_point( + cs: &mut CS, + witness: VmCircuitWitness, + round_function: &R, + limit: usize, +) -> [Num; INPUT_OUTPUT_COMMITMENT_LENGTH] +``` + +And in this function we do following operations: + +```rust + // Prepare current 'state' + // + // First - unpack the witness + let VmCircuitWitness { + closed_form_input, + witness_oracle, + } = witness; + + // And add it to the constraint system + let mut structured_input = + VmCircuitInputOutput::alloc_ignoring_outputs(cs, closed_form_input.clone()); + + let mut state = + VmLocalState::conditionally_select(cs, start_flag, &bootloader_state, &hidden_fsm_input); + + // Notice, that now state is a VmLocalState object -- which contains 'Variables' inside. + + // And now run the cycles + for _cycle_idx in 0..limit { + state = vm_cycle( + cs, + state, + &synchronized_oracle, + &per_block_context, + round_function, + ); + } +``` + +The `vm_cycle` method is where the magic is - it takes a given opcode, and creates all the necessary gates, temporary +Variables etc inside the Constraint system. This method is around 800 lines long, so I'd encourage you to take a sneak +peek if you're interested. + +Now that we've added all the constraints for the 'limit' number of opcodes, we have to do some additional housekeeping - +like storing the Queue hashes (for memory, code decommitment etc). + +And then we're ready to prepare the result of this method (input_commitment). + +```rust + // Prepare compact form (that contains just the hashes of values, rather than full values). + let compact_form = + ClosedFormInputCompactForm::from_full_form(cs, &structured_input, round_function); + + // And serialize it. + let input_commitment: [_; INPUT_OUTPUT_COMMITMENT_LENGTH] = + commit_variable_length_encodable_item(cs, &compact_form, round_function); + input_commitment +``` + +## And now putting it all together + +Now let's look at the zkevm_test_harness repo, '/src/external_calls.rs' run method. This is used in many tests, and +tries to execute the whole flow end to end. + +And while the signature is quite scary - let's walk through this together: + +```rust +pub fn run< + F: SmallField, + R: BuildableCircuitRoundFunction + AlgebraicRoundFunction + serde::Serialize + serde::de::DeserializeOwned, + H: RecursiveTreeHasher>, + EXT: FieldExtension<2, BaseField = F>, + S: Storage +>( +caller: Address, // for real block must be zero +entry_point_address: Address, // for real block must be the bootloader +entry_point_code: Vec<[u8; 32]>, // for read lobkc must be a bootloader code +initial_heap_content: Vec, // bootloader starts with non-deterministic heap + zk_porter_is_available: bool, + default_aa_code_hash: U256, +used_bytecodes: std::collections::HashMap>, // auxilary information to avoid passing a full set of all used codes +ram_verification_queries: Vec<(u32, U256)>, // we may need to check that after the bootloader's memory is filled + cycle_limit: usize, +round_function: R, // used for all queues implementation + geometry: GeometryConfig, + storage: S, + tree: &mut impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf>, +) -> ( + BlockBasicCircuits, + BlockBasicCircuitsPublicInputs, + BlockBasicCircuitsPublicCompactFormsWitnesses, + SchedulerCircuitInstanceWitness, + BlockAuxilaryOutputWitness, +) + where [(); as CSAllocatableExt>::INTERNAL_STRUCT_LEN]:, + [(); as CSAllocatableExt>::INTERNAL_STRUCT_LEN]:, + [(); as CSAllocatableExt>::INTERNAL_STRUCT_LEN]:, + [(); as CSAllocatableExt>::INTERNAL_STRUCT_LEN]:, + [(); as CSAllocatableExt>::INTERNAL_STRUCT_LEN + 1]:, + [(); as CSAllocatableExt>::INTERNAL_STRUCT_LEN]:, + [(); as CSAllocatableExt>::INTERNAL_STRUCT_LEN]:, + +``` + +The first section, is adding some decommitments (explain later). + +Then we create a vm: + +```rust + let mut out_of_circuit_vm = + create_out_of_circuit_vm(&mut tools, &block_properties, caller, entry_point_address); +``` + +And we'll run it over all the operands: + +```rust + for _cycle in 0..cycle_limit { + + out_of_circuit_vm + .cycle(&mut tracer) + .expect("cycle should finish succesfully"); + } +``` + +While doing it, we collect 'snapshots' - the detailed information of the state of the system between each operand. + +Then we create a `Vec` - let's see what's inside: + +```rust +pub struct VmInstanceWitness> { + // we need everything to start a circuit from this point of time + + // initial state (state of registers etc) + pub initial_state: VmLocalState, + pub witness_oracle: O, + pub auxilary_initial_parameters: VmInCircuitAuxilaryParameters, + pub cycles_range: std::ops::Range, + + // final state for test purposes + pub final_state: VmLocalState, + pub auxilary_final_parameters: VmInCircuitAuxilaryParameters, +} +``` + +With this, let's finally start creating circuits (via `create_leaf_level_circuits_and_scheduler_witness`) + +```rust + + for (instance_idx, vm_instance) in vm_instances_witness.into_iter().enumerate() { + let instance = VMMainCircuit { + witness: AtomicCell::new(Some(circuit_input)), + config: Arc::new(geometry.cycles_per_vm_snapshot as usize), + round_function: round_function.clone(), + expected_public_input: Some(proof_system_input), + }; + + main_vm_circuits.push(instance); + } +``` diff --git a/docs/external-node/01_intro.md b/docs/external-node/01_intro.md index a88e210be228..4389e6c37920 100644 --- a/docs/external-node/01_intro.md +++ b/docs/external-node/01_intro.md @@ -120,6 +120,23 @@ Available methods: | -------------------- | ----- | | `web3_clientVersion` | | +### `debug` namespace + +The `debug` namespace gives access to several non-standard RPC methods, which will allow developers to inspect and debug +calls and transactions. + +This namespace is disabled by default and can be configured via setting `EN_API_NAMESPACES` as described in the +[example config](docs/external-node/prepared_configs/mainnet-config.env). + +Available methods: + +| Method | Notes | +| -------------------------- | ----- | +| `debug_traceBlockByNumber` | | +| `debug_traceBlockByHash` | | +| `debug_traceCall` | | +| `debug_traceTransaction` | | + ### `zks` namespace This namespace contains rollup-specific extensions to the Web3 API. Note that _only methods_ specified in the @@ -129,3 +146,8 @@ methods come without any kind of stability guarantees and can be changed or remo Always refer to the documentation linked above to see the list of stabilized methods in this namespace. [zks_docs]: https://era.zksync.io/docs/api/api.html#zksync-specific-json-rpc-methods + +### `en` namespace + +This namespace contains methods that external nodes call on the main node while syncing. If this namespace is enabled, +other ENs can sync from this node. diff --git a/docs/external-node/02_configuration.md b/docs/external-node/02_configuration.md index 67c3a4c39ada..5f2d221bd14d 100644 --- a/docs/external-node/02_configuration.md +++ b/docs/external-node/02_configuration.md @@ -9,9 +9,9 @@ files as a starting point and modify only the necessary sections. The EN uses two databases: PostgreSQL and RocksDB. -PostgreSQL serves as the main source of truth in the EN, so all the API requests fetch the state from there. PostgreSQL -connection is configured by the `DATABASE_URL`. Additionally, the `DATABASE_POOL_SIZE` variable defines the size of the -connection pool. +PostgreSQL serves as the main source of truth in the EN, so all the API requests fetch the state from there. The +PostgreSQL connection is configured by the `DATABASE_URL`. Additionally, the `DATABASE_POOL_SIZE` variable defines the +size of the connection pool. RocksDB is used in components where IO is a bottleneck, such as the State Keeper and the Merkle tree. If possible, it is recommended to use an NVME SSD for RocksDB. RocksDB requires two variables to be set: `EN_STATE_CACHE_PATH` and @@ -33,17 +33,29 @@ The dockerized version of the server exposes the following ports: - HTTP JSON-RPC: 3060 - WebSocket JSON-RPC: 3061 - Prometheus listener: 3322 -- Healtcheck server: 3081 +- Healthcheck server: 3081 While the configuration variables for them exist, you are not expected to change them unless you want to use the EN outside of provided docker environment (not supported at the time of writing). +**NOTE**: if the Prometheus port is configured, it must be [scraped](https://prometheus.io/docs/introduction/overview/) +periodically to avoid a memory leak due to a +[bug in an external metrics library](https://github.com/metrics-rs/metrics/issues/245). If you are not intending to use +the metrics, leave this port not configured, and the metrics won't be collected. + ## API limits There are variables that allow you to fine-tune the limits of the RPC servers, such as limits on the number of returned entries or the limit for the accepted transaction size. Provided files contain sane defaults that are recommended for use, but these can be edited, e.g. to make the EN more/less restrictive. +## JSON-RPC API namespaces + +There are 7 total supported API namespaces: `eth`, `net`, `web3`, `debug` - standard ones; `zks` - rollup-specific one; +`pubsub` - a.k.a. `eth_subscribe`; `en` - used by external nodes while syncing. You can configure what namespaces you +want to enable using `EN_API_NAMESPACES` and specifying namespace names in a comma-separated list. By default, all but +the `debug` namespace are enabled. + ## Logging and observability `MISC_LOG_FORMAT` defines the format in which logs are shown: `plain` corresponds to the human-readable format, while diff --git a/docs/external-node/03_running.md b/docs/external-node/03_running.md index 9b50d1e0f2d8..66d1bfeb77bb 100644 --- a/docs/external-node/03_running.md +++ b/docs/external-node/03_running.md @@ -9,12 +9,27 @@ This configuration is approximate, expect updates to these specs. - 32-core CPU - 32GB RAM -- 400GB SSD storage (NVMe recommended) +- SSD storage: + - Testnet - ~800 GB (at the time of writing) and will grow over time, so should be constantly monitored + - Mainnet - ~400 GB (at the time of writing) and will grow over time, so should be constantly monitored + - NVMe recommended - 100 Mbps network connection. +### A note about PostgreSQL storage + +By far, the heaviest table to maintain is the `call_traces` table. This table is only required for the `debug` +namespace. If you want to clear some space and aren't using the `debug` namespace, you can + +- clear it with a simple query `DELETE FROM call_traces;` +- leave the `debug` namespace disabled via the `EN_API_NAMESPACES` env var as described in the + [example config](docs/external-node/prepared_configs/mainnet-config.env). + ## Infrastructure -You need to set up a PostgreSQL server capable of holding at least 1TB of data. +You need to set up a PostgreSQL server with SSD storage: + +- Testnet - ~1TB (at the time of writing) and will grow over time, so should be constantly monitored +- Mainnet - ~2TB (at the time of writing) and will grow over time, so should be constantly monitored Setting up Postgres is out of the scope of these docs, but the popular choice is to run it in Docker. There are many of guides on that, [here's one example](https://www.docker.com/blog/how-to-use-the-postgres-docker-official-image/). @@ -31,7 +46,8 @@ Besides running Postgres, you are expected to have a DB dump from a correspondin ## Running -Assuming you have the EN Docker image and an env file with the prepared configuration, that is all you need. +Assuming you have the EN Docker image, an env file with the prepared configuration, and you have restored your DB with +the pg dump, that is all you need. Sample running command: @@ -46,7 +62,17 @@ Helm charts and other infrastructure configuration options, if required, would b When you start the node for the first time, the state in PostgreSQL corresponds to the dump you have used, but the state in RocksDB (mainly the Merkle tree) is absent. Before the node can make any progress, it has to rebuild the state in RocksDB and verify consistency. The exact time required for that depends on the hardware configuration, but it is -reasonable to expect the state rebuild on the mainnet to take more than 8 hours. +reasonable to expect the state rebuild on the mainnet to take more than 20 hours. + +## Redeploying the EN with a new PG dump + +If you've been running the EN for some time and are going to redeploy it using a new PG dump, you should + +- Stop the EN +- Remove SK cache (corresponding to `EN_STATE_CACHE_PATH`) +- Remove your current DB +- Restore with the new dump +- Start the EN Monitoring the node behavior and analyzing the state it's in is covered in the [observability section](./04_observability.md). diff --git a/docs/external-node/04_observability.md b/docs/external-node/04_observability.md index 379f263de6bf..c3efc33a30bd 100644 --- a/docs/external-node/04_observability.md +++ b/docs/external-node/04_observability.md @@ -16,9 +16,12 @@ By default, latency histograms are distributed in the following buckets (in seco ## Metrics -EN exposes a lot of metrics, a significant amount of which aren't interesting outside of the development flow. This +EN exposes a lot of metrics, a significant amount of which aren't interesting outside the development flow. This section's purpose is to highlight metrics that may be worth observing in the external setup. +If you are not planning to scrape Prometheus metrics, please unset `EN_PROMETHEUS_PORT` environment variable to prevent +memory leaking. + | Metric name | Type | Labels | Description | | ---------------------------------------------- | --------- | ------------------------------------- | ------------------------------------------------------------------ | | `external_node_synced` | Gauge | - | 1 if synced, 0 otherwise. Matches `eth_call` behavior | diff --git a/docs/external-node/05_troubleshooting.md b/docs/external-node/05_troubleshooting.md index 1b60c3e880df..1046044572cd 100644 --- a/docs/external-node/05_troubleshooting.md +++ b/docs/external-node/05_troubleshooting.md @@ -22,6 +22,11 @@ will immediately crash. Other kinds of panic aren't normally expected. While in most cases, the state will be recovered after a restart, please [report][contact_us] such cases to Matter Labs regardless. +## Genesis Issues + +The EN is supposed to start with an applied DB dump. If you see any genesis-related errors, it probably means the EN was +started without an applied dump. + [contact_us]: https://docs.zksync.io/contact/ ## Logs diff --git a/docs/external-node/prepared_configs/mainnet-config.env b/docs/external-node/prepared_configs/mainnet-config.env index 43589095a506..a50e00341136 100644 --- a/docs/external-node/prepared_configs/mainnet-config.env +++ b/docs/external-node/prepared_configs/mainnet-config.env @@ -25,8 +25,11 @@ EN_ETH_CLIENT_URL=http://127.0.0.1:8545 EN_HTTP_PORT=3060 # Port on which to serve the WebSocket JSONRPC API. EN_WS_PORT=3061 + # Port on which to serve metrics to be collected by Prometheus. -EN_PROMETHEUS_PORT=3322 +# If not set, metrics won't be collected. +# EN_PROMETHEUS_PORT=3322 + # Port on which to serve the healthcheck endpoint (to check if the service is live). EN_HEALTHCHECK_PORT=3081 @@ -53,6 +56,8 @@ EN_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 # Max possible size of an ABI encoded tx (in bytes). # This shouldn't be larger than the value on the main node. EN_MAX_TX_SIZE=1000000 +# Enabled JSON-RPC API namespaces. Also available: en, debug. +EN_API_NAMESPACES=eth,net,web3,zks,pubsub # Settings related to sentry and opentelemetry. MISC_LOG_FORMAT=plain @@ -90,8 +95,3 @@ EN_SENTRY_ENVIRONMENT=zksync_mainnet # ------------------------------------------------------------------------ ZKSYNC_HOME=/ - -EN_MAX_ALLOWED_L2_TX_GAS_LIMIT=4000000000 -EN_FAIR_L2_GAS_PRICE=250000000 -EN_VALIDATION_COMPUTATIONAL_GAS_LIMIT=300000 -EN_FEE_ACCOUNT_ADDR=0x0000000000000000000000000000000000000000 diff --git a/docs/external-node/prepared_configs/testnet-config.env b/docs/external-node/prepared_configs/testnet-config.env index 7b975372e2c0..ff905f4d9311 100644 --- a/docs/external-node/prepared_configs/testnet-config.env +++ b/docs/external-node/prepared_configs/testnet-config.env @@ -25,8 +25,11 @@ EN_ETH_CLIENT_URL=http://127.0.0.1:8545 EN_HTTP_PORT=3060 # Port on which to serve the WebSocket JSONRPC API. EN_WS_PORT=3061 + # Port on which to serve metrics to be collected by Prometheus. -EN_PROMETHEUS_PORT=3322 +# If not set, metrics won't be collected. +# EN_PROMETHEUS_PORT=3322 + # Port on which to serve the healthcheck endpoint (to check if the service is live). EN_HEALTHCHECK_PORT=3081 @@ -53,6 +56,8 @@ EN_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 # Max possible size of an ABI encoded tx (in bytes). # This shouldn't be larger than the value on the main node. EN_MAX_TX_SIZE=1000000 +# Enabled JSON-RPC API namespaces. Also available: en, debug. +EN_API_NAMESPACES=eth,net,web3,zks,pubsub # Settings related to sentry and opentelemetry. MISC_LOG_FORMAT=plain @@ -90,8 +95,3 @@ EN_SENTRY_ENVIRONMENT=zksync_testnet # ------------------------------------------------------------------------ ZKSYNC_HOME=/ - -EN_MAX_ALLOWED_L2_TX_GAS_LIMIT=4000000000 -EN_FAIR_L2_GAS_PRICE=250000000 -EN_VALIDATION_COMPUTATIONAL_GAS_LIMIT=300000 -EN_FEE_ACCOUNT_ADDR=0x0000000000000000000000000000000000000000 diff --git a/etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol b/etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol new file mode 100644 index 000000000000..83321ec47271 --- /dev/null +++ b/etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +import {MIMIC_CALL_CALL_ADDRESS, SystemContractsCaller, CalldataForwardingMode} from "../custom-account/SystemContractsCaller.sol"; +import "../custom-account/interfaces/IContractDeployer.sol"; + +import { DEPLOYER_SYSTEM_CONTRACT, FORCE_DEPLOYER } from "../custom-account/Constants.sol"; +import "./msg-sender.sol"; + +contract ComplexUpgrade { + constructor() {} + + function mimicCall( + address _address, + address _whoToMimic, + bytes memory _calldata + ) internal { + address callAddr = MIMIC_CALL_CALL_ADDRESS; + + uint32 dataStart; + uint32 dataLength; + assembly { + dataStart := add(_calldata, 0x20) + dataLength := mload(_calldata) + } + + uint256 farCallAbi = SystemContractsCaller.getFarCallABI( + 0, + 0, + dataStart, + dataLength, + uint32(gasleft()), + // Only rollup is supported for now + 0, + CalldataForwardingMode.UseHeap, + false, + true + ); + + assembly { + let success := call(_address, callAddr, 0, farCallAbi, _whoToMimic, 0, 0) + + if iszero(success) { + returndatacopy(0, 0, returndatasize()) + revert(0, returndatasize()) + } + } + } + + // This function is used to imitate some complex upgrade logic + function someComplexUpgrade( + address _address1, + address _address2, + bytes32 _bytecodeHash + ) external { + IContractDeployer.ForceDeployment memory forceDeployment1 = IContractDeployer.ForceDeployment( + _bytecodeHash, + _address1, + false, + 0, + new bytes(0) + ); + + IContractDeployer.ForceDeployment memory forceDeployment2 = IContractDeployer.ForceDeployment( + _bytecodeHash, + _address2, + false, + 0, + new bytes(0) + ); + + IContractDeployer.ForceDeployment[] memory deploymentInput1 = new IContractDeployer.ForceDeployment[](1); + deploymentInput1[0] = forceDeployment1; + + IContractDeployer.ForceDeployment[] memory deploymentInput2 = new IContractDeployer.ForceDeployment[](1); + deploymentInput2[0] = forceDeployment2; + + DEPLOYER_SYSTEM_CONTRACT.forceDeployOnAddresses(deploymentInput1); + DEPLOYER_SYSTEM_CONTRACT.forceDeployOnAddresses(deploymentInput2); + + // Here we also test the fact that complex upgrade implementation can use mimicCall + MsgSenderTest msgSenderTest = new MsgSenderTest(); + address toMimic = address(0x1); + bytes memory _mimicCallCalldata = abi.encodeWithSelector( + MsgSenderTest.testMsgSender.selector, + toMimic + ); + mimicCall(address(msgSenderTest), toMimic, _mimicCallCalldata); + } +} diff --git a/etc/contracts-test-data/contracts/complex-upgrade/msg-sender.sol b/etc/contracts-test-data/contracts/complex-upgrade/msg-sender.sol new file mode 100644 index 000000000000..0388f2f54080 --- /dev/null +++ b/etc/contracts-test-data/contracts/complex-upgrade/msg-sender.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +contract MsgSenderTest { + function testMsgSender( + address _expectedSender + ) external view { + require(msg.sender == _expectedSender, "Wrong sender"); + } +} diff --git a/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol b/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol index b31764a8de27..e4d241116a1e 100644 --- a/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol +++ b/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol @@ -1,8 +1,8 @@ -// SPDX-License-Identifier: MIT OR Apache-2.0 +// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; +pragma solidity ^0.8; -import {MSG_VALUE_SIMULATOR_IS_SYSTEM_BIT, MSG_VALUE_SYSTEM_CONTRACT} from "./Constants.sol"; +import {MSG_VALUE_SYSTEM_CONTRACT, MSG_VALUE_SIMULATOR_IS_SYSTEM_BIT} from "./Constants.sol"; import "./Utils.sol"; // Addresses used for the compiler to be replaced with the @@ -72,12 +72,7 @@ library SystemContractsCaller { /// @param data The calldata. /// @return success Whether the transaction has been successful. /// @dev Note, that the `isSystem` flag can only be set when calling system contracts. - function systemCall( - uint32 gasLimit, - address to, - uint128 value, - bytes memory data - ) internal returns (bool success) { + function systemCall(uint32 gasLimit, address to, uint256 value, bytes memory data) internal returns (bool success) { address callAddr = SYSTEM_CALL_CALL_ADDRESS; uint32 dataStart; @@ -105,19 +100,13 @@ library SystemContractsCaller { success := call(to, callAddr, 0, 0, farCallAbi, 0, 0) } } else { - require(value <= MSG_VALUE_SIMULATOR_IS_SYSTEM_BIT, "Value can not be greater than 2**128"); - // We must direct the call through the MSG_VALUE_SIMULATOR - // The first abi param for the MSG_VALUE_SIMULATOR carries - // the value of the call and whether the call should be a system one - // (in our case, it should be) - uint256 abiParam1 = (MSG_VALUE_SIMULATOR_IS_SYSTEM_BIT | value); - - // The second abi param carries the address to call. - uint256 abiParam2 = uint256(uint160(to)); - address msgValueSimulator = MSG_VALUE_SYSTEM_CONTRACT; + // We need to supply the mask to the MsgValueSimulator to denote + // that the call should be a system one. + uint256 forwardMask = MSG_VALUE_SIMULATOR_IS_SYSTEM_BIT; + assembly { - success := call(msgValueSimulator, callAddr, abiParam1, abiParam2, farCallAbi, 0, 0) + success := call(msgValueSimulator, callAddr, value, to, farCallAbi, forwardMask, 0) } } } @@ -145,7 +134,7 @@ library SystemContractsCaller { returnData = new bytes(size); assembly { - returndatacopy(add(returnData, 0x20), 0, size) + returndatacopy(add(returnData, 0x20), 0, size) } } @@ -154,7 +143,7 @@ library SystemContractsCaller { /// @param to The address to call. /// @param value The value to pass with the transaction. /// @param data The calldata. - /// @return returnData The returndata of the transaction. In case the transaction reverts, the error + /// @return returnData The returndata of the transaction. In case the transaction reverts, the error /// bubbles up to the parent frame. /// @dev Note, that the `isSystem` flag can only be set when calling system contracts. function systemCallWithPropagatedRevert( @@ -166,7 +155,7 @@ library SystemContractsCaller { bool success; (success, returnData) = systemCallWithReturndata(gasLimit, to, value, data); - if(!success) { + if (!success) { assembly { let size := mload(returnData) revert(add(returnData, 0x20), size) @@ -217,8 +206,8 @@ library SystemContractsCaller { /// [96..128) bits -- the length of the slice. /// [128..192) bits -- empty bits. /// [192..224) bits -- gasPassed. - /// [224..232) bits -- shard id. - /// [232..240) bits -- forwarding_mode + /// [224..232) bits -- forwarding_mode + /// [232..240) bits -- shard id. /// [240..248) bits -- constructor call flag /// [248..256] bits -- system call flag function getFarCallABI( @@ -232,18 +221,46 @@ library SystemContractsCaller { bool isConstructorCall, bool isSystemCall ) internal pure returns (uint256 farCallAbi) { + // Fill in the call parameter fields + farCallAbi = getFarCallABIWithEmptyFatPointer( + gasPassed, + shardId, + forwardingMode, + isConstructorCall, + isSystemCall + ); + // Fill in the fat pointer fields farCallAbi |= dataOffset; farCallAbi |= (uint256(memoryPage) << 32); farCallAbi |= (uint256(dataStart) << 64); farCallAbi |= (uint256(dataLength) << 96); - farCallAbi |= (uint256(gasPassed) << 192); - farCallAbi |= (uint256(shardId) << 224); - farCallAbi |= (uint256(forwardingMode) << 232); + } + + /// @notice Calculates the packed representation of the FarCallABI with zero fat pointer fields. + /// @param gasPassed The gas to pass with the call. + /// @param shardId Of the account to call. Currently only 0 is supported. + /// @param forwardingMode The forwarding mode to use: + /// - provide CalldataForwardingMode.UseHeap when using your current memory + /// - provide CalldataForwardingMode.ForwardFatPointer when using custom pointer. + /// @param isConstructorCall Whether the call will be a call to the constructor + /// (ignored when the caller is not a system contract). + /// @param isSystemCall Whether the call will have the `isSystem` flag. + /// @return farCallAbiWithEmptyFatPtr The far call ABI with zero fat pointer fields. + function getFarCallABIWithEmptyFatPointer( + uint32 gasPassed, + uint8 shardId, + CalldataForwardingMode forwardingMode, + bool isConstructorCall, + bool isSystemCall + ) internal pure returns (uint256 farCallAbiWithEmptyFatPtr) { + farCallAbiWithEmptyFatPtr |= (uint256(gasPassed) << 192); + farCallAbiWithEmptyFatPtr |= (uint256(forwardingMode) << 224); + farCallAbiWithEmptyFatPtr |= (uint256(shardId) << 232); if (isConstructorCall) { - farCallAbi |= (1 << 240); + farCallAbiWithEmptyFatPtr |= (1 << 240); } if (isSystemCall) { - farCallAbi |= (1 << 248); + farCallAbiWithEmptyFatPtr |= (1 << 248); } } } diff --git a/etc/contracts-test-data/contracts/custom-account/interfaces/IContractDeployer.sol b/etc/contracts-test-data/contracts/custom-account/interfaces/IContractDeployer.sol index aa8a7718c8da..f9a6db8c4676 100644 --- a/etc/contracts-test-data/contracts/custom-account/interfaces/IContractDeployer.sol +++ b/etc/contracts-test-data/contracts/custom-account/interfaces/IContractDeployer.sol @@ -93,6 +93,8 @@ interface IContractDeployer { bytes32 bytecodeHash; // The address on which to deploy the bytecodehash to address newAddress; + // Whether to call the constructor or not + bool callConstructor; // The value with which to initialize a contract uint256 value; // The constructor calldata diff --git a/etc/contracts-test-data/hardhat.config.ts b/etc/contracts-test-data/hardhat.config.ts index 59080306c84e..1828a5547c78 100644 --- a/etc/contracts-test-data/hardhat.config.ts +++ b/etc/contracts-test-data/hardhat.config.ts @@ -2,7 +2,7 @@ import '@matterlabs/hardhat-zksync-solc'; export default { zksolc: { - version: '1.3.1', + version: '1.3.7', compilerSource: 'binary', settings: { isSystem: true diff --git a/etc/env/base/api.toml b/etc/env/base/api.toml index 4d9e9190a567..3bfe29ab5437 100644 --- a/etc/env/base/api.toml +++ b/etc/env/base/api.toml @@ -43,15 +43,11 @@ account_pks=[ estimate_gas_scale_factor=1.2 estimate_gas_acceptable_overestimation=1000 max_tx_size=1000000 -# Configuration for the explorer API -[api.explorer] -# Port for the explorer API. +# Configuration for the contract verification API +[api.contract_verification] +# Port for the contract verification API. port=3070 url="http://127.0.0.1:3070" -# Interval between polling db for network stats (in ms). -network_stats_polling_interval=1000 -req_entities_limit=100 -offset_limit=250 threads_per_server=128 # Configuration for the prometheus exporter server. diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index 225a910d4703..41c0c7c05f60 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -4,6 +4,7 @@ [contracts] DIAMOND_INIT_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" DIAMOND_UPGRADE_INIT_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" +DEFAULT_UPGRADE_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" MAILBOX_FACET_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" DIAMOND_CUT_FACET_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" EXECUTOR_FACET_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" @@ -11,6 +12,7 @@ GOVERNANCE_FACET_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" GETTERS_FACET_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" VERIFIER_ADDR="0xDAbb67b676F5b01FcC8997Cc8439846D0d8078ca" DIAMOND_PROXY_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" L1_ERC20_BRIDGE_PROXY_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L1_ERC20_BRIDGE_IMPL_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L2_ERC20_BRIDGE_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" @@ -19,9 +21,10 @@ L1_ALLOW_LIST_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" CREATE2_FACTORY_ADDR="0xce0042B868300000d44A59004Da54A005ffdcf9f" VALIDATOR_TIMELOCK_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" VALIDATOR_TIMELOCK_EXECUTION_DELAY=0 -VK_COMMITMENT_BASIC_CIRCUITS="0x0a3657f884af32d3a573c5fdb3440c9ac45271ede8c982faeaae7434d032ab3e" -VK_COMMITMENT_LEAF="0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" -VK_COMMITMENT_NODE="0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8" +RECURSION_SCHEDULER_LEVEL_VK_HASH="0x548dcd0bcf9f94b9d27b7adebf4afb7f28e540277b6757b56a10fb587cb71919" +RECURSION_NODE_LEVEL_VK_HASH="0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8" +RECURSION_LEAF_LEVEL_VK_HASH="0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" +RECURSION_CIRCUITS_SET_VKS_HASH="0x0a3657f884af32d3a573c5fdb3440c9ac45271ede8c982faeaae7434d032ab3e" GENESIS_TX_HASH="0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" GENESIS_ROOT="0x2d5ab622df708ab44944bb02377be85b6f27812e9ae520734873b7a193898ba4" PRIORITY_TX_MAX_GAS_LIMIT=72000000 @@ -35,6 +38,8 @@ L1_WETH_TOKEN_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L2_WETH_BRIDGE_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L2_WETH_TOKEN_IMPL_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L2_WETH_TOKEN_PROXY_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" +FRI_VK_COMMITMENT_LEAF ="[0x72167c43a46cf388, 0x75b267d67716edc4, 0x563861364a3c03ab, 0x7aee73498421e828]" +FRI_VK_COMMITMENT_NODE ="[0x5a3ef282b21e12fe, 0x1f4438e5bb158fc5, 0x060b160559c5158c, 0x6389d62d9fe3d080]" [contracts.test] dummy_verifier=true diff --git a/etc/env/base/database.toml b/etc/env/base/database.toml index f841829e388c..50767b344d38 100644 --- a/etc/env/base/database.toml +++ b/etc/env/base/database.toml @@ -1,12 +1,16 @@ [database] # Path to the directory that contains RocksDB with VM state cache. state_keeper_db_path="./db/main/state_keeper" -# Path to the directory that contains RocksDB backups for Merkle tree. -merkle_tree_backup_path="./db/main/backups" -# Path to the directory that contains RocksDB with Merkle tree. -new_merkle_tree_ssd_path="./db/main/tree" backup_count=5 backup_interval_ms=60000 -max_block_batch=100 # Amount of open connections to the database. pool_size=50 +# Postgres statement timeout. Applies only to the replica connection pool +# used by the API servers. +statement_timeout_sec=300 + +[database.merkle_tree] +# Path to the directory that contains RocksDB with Merkle tree. +path="./db/main/tree" +# Path to the directory that contains RocksDB backups for Merkle tree. +backup_path="./db/main/backups" diff --git a/etc/env/base/fri_prover.toml b/etc/env/base/fri_prover.toml index 59877f2c26e7..9e97492c9537 100644 --- a/etc/env/base/fri_prover.toml +++ b/etc/env/base/fri_prover.toml @@ -7,3 +7,6 @@ base_layer_circuit_ids_to_be_verified="1" recursive_layer_circuit_ids_to_be_verified="1" setup_load_mode="FromDisk" specialized_group_id=100 +witness_vector_generator_thread_count=5 +queue_capacity=10 +witness_vector_receiver_port=4000 diff --git a/etc/env/base/fri_witness_vector_generator.toml b/etc/env/base/fri_witness_vector_generator.toml new file mode 100644 index 000000000000..1e8837965bb1 --- /dev/null +++ b/etc/env/base/fri_witness_vector_generator.toml @@ -0,0 +1,8 @@ +[fri_witness_vector_generator] +prover_instance_wait_timeout_in_secs=200 +prover_instance_poll_time_in_milli_secs=250 +prometheus_listener_port=3314 +prometheus_pushgateway_url="http://127.0.0.1:9091" +prometheus_push_interval_ms=100 +specialized_group_id=100 +max_prover_reservation_duration_in_secs=1000 diff --git a/etc/env/base/proof_data_handler.toml b/etc/env/base/proof_data_handler.toml new file mode 100644 index 000000000000..5f51c9cf17a6 --- /dev/null +++ b/etc/env/base/proof_data_handler.toml @@ -0,0 +1,3 @@ +[proof_data_handler] +http_port=3320 +proof_generation_timeout_in_secs=18000 \ No newline at end of file diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 6293515e3e1b..28b197ba2027 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -31,6 +31,8 @@ zksync_external_node=info,\ cross_nodes_checker=debug,\ zksync_witness_generator=info,\ zksync_prover_fri=info,\ +slot_index_consistency_checker=debug,\ +zksync_witness_vector_generator=info,\ """ # `RUST_BACKTRACE` variable diff --git a/etc/env/ext-node-docker.toml b/etc/env/ext-node-docker.toml index dfb4bb633302..c51006336562 100644 --- a/etc/env/ext-node-docker.toml +++ b/etc/env/ext-node-docker.toml @@ -5,7 +5,7 @@ zksync_action="dont_ask" # Needed to run integration tests. l1_rpc_address = "http://geth:8545" -api_explorer_url="http://127.0.0.1:3070" +api_contract_verification_url="http://127.0.0.1:3070" [en] http_port = 3060 @@ -20,15 +20,12 @@ req_entities_limit = 10000 state_cache_path = "./db/ext-node/state_keeper" merkle_tree_path = "./db/ext-node/lightweight" -max_blocks_per_tree_batch = 20 +max_l1_batches_per_tree_iter = 20 main_node_url = "http://127.0.0.1:3050" eth_client_url = "http://geth:8545" -fee_account_addr = "0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" -max_allowed_l2_tx_gas_limit = 4000000000 -fair_l2_gas_price = 250000000 -validation_computational_gas_limit = 300000 +api_namespaces = ["eth", "web3", "net", "pubsub", "zks", "en", "debug"] # Note: # `bootloader_hash` and `default_aa_hash` are overridden from the `.init.env` values by `zk` tool. diff --git a/etc/env/ext-node.toml b/etc/env/ext-node.toml index f8a43624dbb3..204ed6d7da2f 100644 --- a/etc/env/ext-node.toml +++ b/etc/env/ext-node.toml @@ -20,21 +20,22 @@ req_entities_limit = 10000 state_cache_path = "./db/ext-node/state_keeper" merkle_tree_path = "./db/ext-node/lightweight" -max_blocks_per_tree_batch = 20 +max_l1_batches_per_tree_iter = 20 main_node_url = "http://127.0.0.1:3050" eth_client_url = "http://127.0.0.1:8545" -fee_account_addr = "0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" -max_allowed_l2_tx_gas_limit = 4000000000 -fair_l2_gas_price = 250000000 -validation_computational_gas_limit = 300000 +api_namespaces = ["eth", "web3", "net", "pubsub", "zks", "en", "debug"] # Note: # `bootloader_hash` and `default_aa_hash` are overridden from the `.init.env` values by `zk` tool. bootloader_hash="0x0100038581be3d0e201b3cc45d151ef5cc59eb3a0f146ad44f0f72abf00b594c" default_aa_hash="0x0100038dc66b69be75ec31653c64cb931678299b9b659472772b2550b703f41c" +# Whether to try using MultiVM +# WARN: Only to be used locally until the feature is fully tested. +experimental_multivm_support = false + [rust] # `RUST_LOG` environmnet variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. diff --git a/etc/openzeppelin-contracts b/etc/openzeppelin-contracts deleted file mode 160000 index e7f6deb03e2d..000000000000 --- a/etc/openzeppelin-contracts +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e7f6deb03e2d2bf750554d7aff14b0f584009718 diff --git a/etc/tokens/goerli.json b/etc/tokens/goerli.json index 3e42e7eaec93..65790bc7dc8a 100644 --- a/etc/tokens/goerli.json +++ b/etc/tokens/goerli.json @@ -22,5 +22,11 @@ "symbol": "DAI", "decimals": 18, "address": "0x5C221E77624690fff6dd741493D735a17716c26B" + }, + { + "name": "Wrapped Ether", + "symbol": "WETH", + "decimals": 18, + "address": "0xB4FBF271143F4FBf7B91A5ded31805e42b2208d6" } ] diff --git a/etc/upgrades/1692195639-upgrade-system/common.json b/etc/upgrades/1692195639-upgrade-system/common.json new file mode 100644 index 000000000000..4aa0c5bacd07 --- /dev/null +++ b/etc/upgrades/1692195639-upgrade-system/common.json @@ -0,0 +1,5 @@ +{ + "name": "upgrade-system", + "creationTimestamp": 1692195639, + "protocolVersion": "12" +} \ No newline at end of file diff --git a/etc/upgrades/1692195639-upgrade-system/stage2/facetCuts.json b/etc/upgrades/1692195639-upgrade-system/stage2/facetCuts.json new file mode 100644 index 000000000000..3a1f9b3c57b0 --- /dev/null +++ b/etc/upgrades/1692195639-upgrade-system/stage2/facetCuts.json @@ -0,0 +1,190 @@ +[ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xe58bb639", + "0xed6d06c0", + "0x86cb9909", + "0x0707ac09", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x5437988d", + "0x0b508883" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0xc6f7e57C6e1e20468D869Fe33675524e243CD6a0", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x7444DE636699F080cA1C033528D2bB3705B391Ce", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x33ce93fe", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x5349E94435Cc9Cab9FfB40A492DA46935052733A", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xB54f822966FD4940b6fb465AC67075e5119094C3", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0x2E64926BE35412f7710A3E097Ba076740bF97CC0", + "selectors": [ + "0xe58bb639", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d" + ], + "action": 0, + "isFreezable": true + } +] \ No newline at end of file diff --git a/etc/upgrades/1692195639-upgrade-system/stage2/facets.json b/etc/upgrades/1692195639-upgrade-system/stage2/facets.json new file mode 100644 index 000000000000..9fcdac88c7e5 --- /dev/null +++ b/etc/upgrades/1692195639-upgrade-system/stage2/facets.json @@ -0,0 +1,22 @@ +{ + "ExecutorFacet": { + "address": "0xB54f822966FD4940b6fb465AC67075e5119094C3", + "txHash": "0xebe43e4c37e9581e5cc0c7d8a498e99ae255aee8169f2f3357e2a10d02705ab9" + }, + "GovernanceFacet": { + "address": "0x2E64926BE35412f7710A3E097Ba076740bF97CC0", + "txHash": "0xf3555fdc95f7df54b5eee67e80d0b22b831a58d84fbf7d384b6794a968360189" + }, + "DiamondCutFacet": { + "address": "0xc6f7e57C6e1e20468D869Fe33675524e243CD6a0", + "txHash": "0x732ba438007d7a30871f38594efe14565ca03eaf2b30fc440977b33052a2c0a5" + }, + "GettersFacet": { + "address": "0x7444DE636699F080cA1C033528D2bB3705B391Ce", + "txHash": "0xd5fcb3dc88a933810f502384882668a8068d35c097a11cde732c75fef5b562d6" + }, + "MailboxFacet": { + "address": "0x5349E94435Cc9Cab9FfB40A492DA46935052733A", + "txHash": "0xe0f50dd600de1240d6b753cf5a88e97679f754e3e6d0e93ba66877da0ca70d0e" + } +} \ No newline at end of file diff --git a/etc/upgrades/1692195639-upgrade-system/stage2/l2Upgrade.json b/etc/upgrades/1692195639-upgrade-system/stage2/l2Upgrade.json new file mode 100644 index 000000000000..63fbb387322e --- /dev/null +++ b/etc/upgrades/1692195639-upgrade-system/stage2/l2Upgrade.json @@ -0,0 +1,211 @@ +{ + "systemContracts": [ + { + "name": "AccountCodeStorage", + "bytecodeHashes": [ + "0x01000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce717970105" + ], + "address": "0x0000000000000000000000000000000000008002" + }, + { + "name": "NonceHolder", + "bytecodeHashes": [ + "0x0100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b4" + ], + "address": "0x0000000000000000000000000000000000008003" + }, + { + "name": "KnownCodesStorage", + "bytecodeHashes": [ + "0x0100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd3" + ], + "address": "0x0000000000000000000000000000000000008004" + }, + { + "name": "ImmutableSimulator", + "bytecodeHashes": [ + "0x010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d" + ], + "address": "0x0000000000000000000000000000000000008005" + }, + { + "name": "ContractDeployer", + "bytecodeHashes": [ + "0x010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc" + ], + "address": "0x0000000000000000000000000000000000008006" + }, + { + "name": "L1Messenger", + "bytecodeHashes": [ + "0x0100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb" + ], + "address": "0x0000000000000000000000000000000000008008" + }, + { + "name": "MsgValueSimulator", + "bytecodeHashes": [ + "0x010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf03" + ], + "address": "0x0000000000000000000000000000000000008009" + }, + { + "name": "L2EthToken", + "bytecodeHashes": [ + "0x010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d" + ], + "address": "0x000000000000000000000000000000000000800a" + }, + { + "name": "SystemContext", + "bytecodeHashes": [ + "0x010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc" + ], + "address": "0x000000000000000000000000000000000000800b" + }, + { + "name": "BootloaderUtilities", + "bytecodeHashes": [ + "0x010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286" + ], + "address": "0x000000000000000000000000000000000000800c" + }, + { + "name": "BytecodeCompressor", + "bytecodeHashes": [ + "0x010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a" + ], + "address": "0x000000000000000000000000000000000000800e" + }, + { + "name": "ComplexUpgrader", + "bytecodeHashes": [ + "0x0100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16" + ], + "address": "0x000000000000000000000000000000000000800f" + } + ], + "bootloader": { + "name": "Bootloader", + "bytecodeHashes": [ + "0x010007af2271352ac99f97670a7d6999ae63b53f4e9480a957091d8848a46c61" + ] + }, + "defaultAA": { + "name": "DefaultAccount", + "bytecodeHashes": [ + "0x0100067d592a040e8914eda295f3521561d64b1a4c1b9e6dbd2933093102febf" + ] + }, + "tx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "12", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000004800000000000000000000000000000000000000000000000000000000000000540000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006c000000000000000000000000000000000000000000000000000000000000007800000000000000000000000000000000000000000000000000000000000000840000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009c001000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce71797010500000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b400000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd300000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf0300000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "forcedDeployments": [ + { + "bytecodeHash": "0x01000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce717970105", + "newAddress": "0x0000000000000000000000000000000000008002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b4", + "newAddress": "0x0000000000000000000000000000000000008003", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd3", + "newAddress": "0x0000000000000000000000000000000000008004", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d", + "newAddress": "0x0000000000000000000000000000000000008005", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc", + "newAddress": "0x0000000000000000000000000000000000008006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb", + "newAddress": "0x0000000000000000000000000000000000008008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf03", + "newAddress": "0x0000000000000000000000000000000000008009", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d", + "newAddress": "0x000000000000000000000000000000000000800a", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc", + "newAddress": "0x000000000000000000000000000000000000800b", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286", + "newAddress": "0x000000000000000000000000000000000000800c", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a", + "newAddress": "0x000000000000000000000000000000000000800e", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16", + "newAddress": "0x000000000000000000000000000000000000800f", + "value": 0, + "input": "0x", + "callConstructor": false + } + ], + "forcedDeploymentCalldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000004800000000000000000000000000000000000000000000000000000000000000540000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006c000000000000000000000000000000000000000000000000000000000000007800000000000000000000000000000000000000000000000000000000000000840000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009c001000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce71797010500000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b400000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd300000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf0300000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "calldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000004800000000000000000000000000000000000000000000000000000000000000540000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006c000000000000000000000000000000000000000000000000000000000000007800000000000000000000000000000000000000000000000000000000000000840000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009c001000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce71797010500000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b400000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd300000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf0300000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1692195639-upgrade-system/stage2/transactions.json b/etc/upgrades/1692195639-upgrade-system/stage2/transactions.json new file mode 100644 index 000000000000..9cc3c1eb7fd1 --- /dev/null +++ b/etc/upgrades/1692195639-upgrade-system/stage2/transactions.json @@ -0,0 +1,248 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "12", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000004800000000000000000000000000000000000000000000000000000000000000540000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006c000000000000000000000000000000000000000000000000000000000000007800000000000000000000000000000000000000000000000000000000000000840000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009c001000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce71797010500000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b400000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd300000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf0300000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x010007af2271352ac99f97670a7d6999ae63b53f4e9480a957091d8848a46c61", + "defaultAccountHash": "0x0100067d592a040e8914eda295f3521561d64b1a4c1b9e6dbd2933093102febf", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x64ddf6a8" + }, + "factoryDeps": [], + "newProtocolVersion": "12", + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000f80010007af2271352ac99f97670a7d6999ae63b53f4e9480a957091d8848a46c610100067d592a040e8914eda295f3521561d64b1a4c1b9e6dbd2933093102febf00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fa00000000000000000000000000000000000000000000000000000000000000fc00000000000000000000000000000000000000000000000000000000064ddf6a8000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000d600000000000000000000000000000000000000000000000000000000000000d800000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000ac4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000004800000000000000000000000000000000000000000000000000000000000000540000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006c000000000000000000000000000000000000000000000000000000000000007800000000000000000000000000000000000000000000000000000000000000840000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009c001000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce71797010500000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b400000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd300000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf0300000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xdb876240F01A6dd38F5EFC4EcEFe52e5C13dB3C7", + "protocolVersion": "12", + "diamondUpgradeProposalId": { + "type": "BigNumber", + "hex": "0x02" + }, + "upgradeTimestamp": "1692268200", + "proposeTransparentUpgradeCalldata": "0x8043760a000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000db876240f01a6dd38f5efc4ecefe52e5c13db3c700000000000000000000000000000000000000000000000000000000000016c0000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000008600000000000000000000000000000000000000000000000000000000000000a400000000000000000000000000000000000000000000000000000000000000be00000000000000000000000000000000000000000000000000000000000000d80000000000000000000000000000000000000000000000000000000000000128000000000000000000000000000000000000000000000000000000000000013e00000000000000000000000000000000000000000000000000000000000001500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb6724190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000ae58bb63900000000000000000000000000000000000000000000000000000000ed6d06c00000000000000000000000000000000000000000000000000000000086cb9909000000000000000000000000000000000000000000000000000000000707ac0900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000005437988d000000000000000000000000000000000000000000000000000000000b508883000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c700000000000000000000000000000000000000000000000000000000000000000000000000000000c6f7e57c6e1e20468d869fe33675524e243cd6a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c7000000000000000000000000000000000000000000000000000000000000000000000000000000007444de636699f080ca1c033528d2bb3705b391ce0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000023cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff0000000000000000000000000000000000000000000000000000000033ce93fe000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000005349e94435cc9cab9ffb40a492da46935052733a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000b54f822966fd4940b6fb465ac67075e5119094c300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a000000000000000000000000000000000000000000000000000000000000000000000000000000002e64926be35412f7710a3e097ba076740bf97cc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005e58bb63900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010041ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000f80010007af2271352ac99f97670a7d6999ae63b53f4e9480a957091d8848a46c610100067d592a040e8914eda295f3521561d64b1a4c1b9e6dbd2933093102febf00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fa00000000000000000000000000000000000000000000000000000000000000fc00000000000000000000000000000000000000000000000000000000064ddf6a8000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000d600000000000000000000000000000000000000000000000000000000000000d800000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000ac4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000004800000000000000000000000000000000000000000000000000000000000000540000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006c000000000000000000000000000000000000000000000000000000000000007800000000000000000000000000000000000000000000000000000000000000840000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009c001000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce71797010500000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b400000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd300000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf0300000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "transparentUpgrade": { + "facetCuts": [ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xe58bb639", + "0xed6d06c0", + "0x86cb9909", + "0x0707ac09", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x5437988d", + "0x0b508883" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0xc6f7e57C6e1e20468D869Fe33675524e243CD6a0", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x7444DE636699F080cA1C033528D2bB3705B391Ce", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x33ce93fe", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x5349E94435Cc9Cab9FfB40A492DA46935052733A", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xB54f822966FD4940b6fb465AC67075e5119094C3", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0x2E64926BE35412f7710A3E097Ba076740bF97CC0", + "selectors": [ + "0xe58bb639", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d" + ], + "action": 0, + "isFreezable": true + } + ], + "initAddress": "0xdb876240F01A6dd38F5EFC4EcEFe52e5C13dB3C7", + "initCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000f80010007af2271352ac99f97670a7d6999ae63b53f4e9480a957091d8848a46c610100067d592a040e8914eda295f3521561d64b1a4c1b9e6dbd2933093102febf00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fa00000000000000000000000000000000000000000000000000000000000000fc00000000000000000000000000000000000000000000000000000000064ddf6a8000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000d600000000000000000000000000000000000000000000000000000000000000d800000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000ac4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000004800000000000000000000000000000000000000000000000000000000000000540000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006c000000000000000000000000000000000000000000000000000000000000007800000000000000000000000000000000000000000000000000000000000000840000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009c001000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce71797010500000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b400000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd300000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf0300000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "executeUpgradeCalldata": "0x36d4eb84000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000db876240f01a6dd38f5efc4ecefe52e5c13db3c700000000000000000000000000000000000000000000000000000000000016c0000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000008600000000000000000000000000000000000000000000000000000000000000a400000000000000000000000000000000000000000000000000000000000000be00000000000000000000000000000000000000000000000000000000000000d80000000000000000000000000000000000000000000000000000000000000128000000000000000000000000000000000000000000000000000000000000013e00000000000000000000000000000000000000000000000000000000000001500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb6724190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000ae58bb63900000000000000000000000000000000000000000000000000000000ed6d06c00000000000000000000000000000000000000000000000000000000086cb9909000000000000000000000000000000000000000000000000000000000707ac0900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000005437988d000000000000000000000000000000000000000000000000000000000b508883000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c700000000000000000000000000000000000000000000000000000000000000000000000000000000c6f7e57c6e1e20468d869fe33675524e243cd6a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c7000000000000000000000000000000000000000000000000000000000000000000000000000000007444de636699f080ca1c033528d2bb3705b391ce0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000023cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff0000000000000000000000000000000000000000000000000000000033ce93fe000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000005349e94435cc9cab9ffb40a492da46935052733a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000b54f822966fd4940b6fb465ac67075e5119094c300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a000000000000000000000000000000000000000000000000000000000000000000000000000000002e64926be35412f7710a3e097ba076740bf97cc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005e58bb63900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010041ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000f80010007af2271352ac99f97670a7d6999ae63b53f4e9480a957091d8848a46c610100067d592a040e8914eda295f3521561d64b1a4c1b9e6dbd2933093102febf00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fa00000000000000000000000000000000000000000000000000000000000000fc00000000000000000000000000000000000000000000000000000000064ddf6a8000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000d600000000000000000000000000000000000000000000000000000000000000d800000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000ac4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000004800000000000000000000000000000000000000000000000000000000000000540000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006c000000000000000000000000000000000000000000000000000000000000007800000000000000000000000000000000000000000000000000000000000000840000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009c001000091af8506f97c5a2b2ff4edf06ba1bfd2ec2304056fb9987ce71797010500000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012f78c4022788ff0deb6c282f93df2b209d3c819999c3384498f1fec9b400000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008fa5c8a47d82ec783ad7cdefd382f5b031e7ef0754fcf242ccb4c65fd300000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000475561147799af163bef4d19262be10ed2d4a8977f793a195cd077e83d00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010005c17cd3ee506e5156f76afda69c905dfdc22578bec37609a1693bba4fdc00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100007562a15173ca081ddb179bb321500997ba6b43b15cf601cdf266af1cdb00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000711288f63c4cd27ecd73df769b834eeda2645f6567beea6b7ed44aaf0300000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001397d906d62832c455f701b9f81267a1687e52210986d4e943b3f440d7d000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000f1d9ad80b5eb80ad14232d7438b1c09314dda76f296eca922da87dbfcc000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009ad8e522bb120c5f52d2236035edeb0a4259b89c3429c5ece7d1dc50286000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000e1066c916b985495ac9e671ae6930e373e9b5074499c7f6014c439a04a000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005b3e7150f7f6daac7cc9a46522f5ad593f9a3e3f0573282b41f86acf16000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/infrastructure/openzeppelin-tests-preparation/package.json b/infrastructure/openzeppelin-tests-preparation/package.json deleted file mode 100644 index e23fcf34f96a..000000000000 --- a/infrastructure/openzeppelin-tests-preparation/package.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "openzeppelin-tests-preparation", - "version": "1.0.0", - "main": "build/index.js", - "license": "MIT", - "dependencies": { - "axios": "^0.27.2", - "ethers": "~5.7.0", - "fs": "^0.0.1-security", - "path": "^0.12.7", - "ts-node": "^10.7.0", - "zksync-web3": "link:../../sdk/zksync-web3.js" - }, - "devDependencies": { - "typescript": "^4.5.5" - }, - "scripts": { - "start": "ts-node ./src/index.ts" - } -} diff --git a/infrastructure/openzeppelin-tests-preparation/src/index.ts b/infrastructure/openzeppelin-tests-preparation/src/index.ts deleted file mode 100644 index 6ac9b3a1d7e9..000000000000 --- a/infrastructure/openzeppelin-tests-preparation/src/index.ts +++ /dev/null @@ -1,72 +0,0 @@ -import * as zkweb3 from 'zksync-web3'; -import * as ethers from 'ethers'; -import * as path from 'path'; -import * as fs from 'fs'; -import * as axios from 'axios'; - -async function depositTestAccounts() { - const ethProvider = new ethers.providers.JsonRpcProvider( - process.env.L1_RPC_ADDRESS || process.env.ETH_CLIENT_WEB3_URL - ); - const testConfigPath = path.join(process.env.ZKSYNC_HOME!, `etc/test_config/constant`); - const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); - const ethWallet = ethers.Wallet.fromMnemonic(ethTestConfig.test_mnemonic as string, "m/44'/60'/0'/0/0").connect( - ethProvider - ); - const web3Provider = new zkweb3.Provider(process.env.ZKSYNC_WEB3_API_URL || 'http://localhost:3050'); - const syncWallet = new zkweb3.Wallet(ethWallet.privateKey, web3Provider, ethProvider); - - const testAccountPks = process.env.API_WEB3_JSON_RPC_ACCOUNT_PKS!.split(','); - let handles = []; - for (const key of testAccountPks) { - const wallet = new zkweb3.Wallet(key, web3Provider, ethProvider); - handles.push( - await syncWallet.deposit({ - token: ethers.constants.AddressZero, - to: wallet.address, - amount: ethers.utils.parseEther('10000') - }) - ); - } - for (const handle of handles) { - await handle.wait(); - } -} - -async function sendBytecodeFromFolder(folderPath: string) { - const files = fs.readdirSync(folderPath); - for (const file of files) { - const filePath = path.join(folderPath, file); - if (fs.lstatSync(filePath).isDirectory()) { - await sendBytecodeFromFolder(filePath); - } else { - if (filePath.includes('.json')) { - const text = fs.readFileSync(filePath, 'utf-8'); - const data = JSON.parse(text); - if ('bytecode' in data) { - const req = { - jsonrpc: '2.0', - method: 'zks_setKnownBytecode', - params: [data.bytecode], - id: 1 - }; - const resp = await axios.default.post('http://127.0.0.1:3050', req); - console.log(filePath + ': ' + resp.data.toString()); - } - } - } - } -} - -async function main() { - await depositTestAccounts(); - await sendBytecodeFromFolder(`${process.env.ZKSYNC_HOME}/etc/openzeppelin-contracts/artifacts-zk`); -} - -main() - .then(() => { - console.log('Finished successfully'); - }) - .catch((err) => { - console.log('err: ' + err); - }); diff --git a/infrastructure/reading-tool/.gitignore b/infrastructure/protocol-upgrade/.gitignore similarity index 100% rename from infrastructure/reading-tool/.gitignore rename to infrastructure/protocol-upgrade/.gitignore diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md new file mode 100644 index 000000000000..676126aca452 --- /dev/null +++ b/infrastructure/protocol-upgrade/README.md @@ -0,0 +1,255 @@ +# Protocol Upgrade Tool + +## Introduction + +The Protocol Upgrade Tool is a command-line utility that enables users to upgrade the protocol of a node. It is designed +to be used in conjunction with the +[protocol upgrade proposal](https://www.notion.so/matterlabs/Server-rolling-upgrade-mechanism-e4a57f8545e84c2c9edb4928b6e0f36b). + +## Usage + +To generate a protocol upgrade proposal, follow the steps below: + +1. Create a protocol upgrade proposal +2. Deploy new facets and generate facet cuts +3. Publish new system contracts and base system contracts +4. Prepare calldata for L2 upgrade +5. Deploy a new verifier and upgrade verifier params +6. Generate the proposal transaction and execute it + +### Default Values + +If not provided as arguments, the tool can retrieve certain values from environment variables or from the l1 node: + +1. `l1rpc` - `ETH_CLIENT_WEB3_URL` +2. `l2rpc` - `API_WEB3_JSON_RPC_HTTP_URL` +3. `create2-address` - `CONTRACTS_CREATE2_FACTORY_ADDR` +4. `zksync-address` - `CONTRACTS_DIAMOND_PROXY_ADDR` +5. `nonce` - Taken from the node via `l1rpc` +6. `gas-price` - Taken from the node via `l1rpc` +7. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, + `testnet2`, `stage2`, `mainnet2`. Each upgrade on different environments is performed separately since the contract + addresses differ between environments. +8. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it + explicitly. + +### Create a Protocol Upgrade Proposal + +To create a protocol upgrade proposal, use the following command: + +```bash +zk f yarn start upgrades create --protocol-version +``` + +This command will create a folder named after the upgrade in the `etc/upgrades` directory. All necessary files for the +upgrade will be generated in this folder. The folder name follows the format: `-`. + +Subsequent commands will use the latest upgrade located in the `etc/upgrades` folder. The latest upgrade is determined +based on the timestamp in the name. + +The command also creates a common file with fields such as `name`, `protocolVersion`, and `timestamp`. + +### Deploy New Facets and Generate Facet Cuts + +First, deploy the new facets. Later, you can generate facet cuts for them. + +To deploy all facets together, use the following command: + +```bash +$ zk f yarn start facets deploy-all \ +--private-key \ +--l1rpc \ +--gas-price \ +--nonce \ +--create2-address \ +--zksync-address \ +--environment +``` + +This command will also generate facet cuts for all facets. + +Alternatively, you can deploy facets individually using the following command: + +```bash +$ zk f yarn start facets deploy \ +--private-key \ +--l1rpc \ +--gas-price \ +--nonce \ +--create2-address \ +--zksync-address \ +--environment \ +--executor \ +--governance \ +--diamond-cut \ +--getters \ +--mailbox +``` + +The results will be saved in the `etc/upgrades///facets.json` file. WARNING: Redeploying +facets doesn't overwrite the `facets.json` file. You need to delete it manually. + +After deploying the facets, you can generate facet cuts using the following command: + +Note: `zksync-address` and `l1rpc` are required for correct generation of facet cuts, but they will be removed. + +```bash +$ zk f yarn start facets generate-facet-cuts \ +--l1rpc \ +--zksync-address \ +--environment +``` + +### Deploy New System Contracts and Base System Contracts + +To publish bytecodes for new system contracts and base system contracts together, use the following command: + +Note: All transactions will go through L1. + +```bash +$ zk f yarn start system-contracts publish-all \ +--private-key \ +--l1rpc \ +--l2rpc \ +--gas-price \ +--nonce \ +--environment +``` + +Alternatively, you can deploy them individually using the following command: + +```bash +$ zk f yarn start system-contracts publish \ +--private-key \ +--l1rpc \ +--l2rpc \ +--gas-price \ +--nonce \ +--environment \ +--bootloader \ +--default-aa \ +--system-contracts +``` + +The results will be saved in the `etc/upgrades///l2Upgrade.json` file. + +Please note that publishing new system contracts will append to the existing file, while publishing them all together +will overwrite the file. + +### Prepare Calldata for L2 Upgrade + +You can generate calldata using the Complex Upgrader contract. + +- `l2-upgrader-address`: Address of the L2 upgrader contract. By default, it is the DefaultUpgrade contract from env + `CONTRACTS_DEFAULT_UPGRADE_ADDR`. +- `use-forced-deployments`: Using this parameter, the tool will prepare calldata using the default upgrade contract abi. + This is the preferred way to use the upgrade. If not specified, the tool will grab the data for the upgrade from the + `delegatedCalldata` field of the L2Upgrade file. +- `use-contract-deployer`: Using this parameter, the tool will prepare calldata using the ContractDeployer contract. + This skips the delegation step of the Complex Upgrader contract. This is mostly needed for the very first upgrade. + +```bash +$ zk f yarn start l2-transaction complex-upgrader-calldata \ +--environment \ +--l2-upgrader-address \ +--use-forced-deployments \ +--use-contract-deployer +``` + +To generate ForceDeployment calldata for an L2 upgrade, use the following command: + +```bash +$ zk f yarn start l2-transaction force-deployment-calldata \ +--environment +``` + +### Deploy New Verifier and Upgrade Verifier Params + +To deploy a new verifier, use the following command: + +```bash +$ zk f yarn start crypto deploy-verifier +--private-key \ +--l1rpc \ +--gas-price \ +--nonce \ +--create2-address \ +--zksync-address \ +--environment +``` + +The results will be saved in the `etc/upgrades///crypto.json` file. + +To upgrade verifier params, you can specify recursion level vk and recursion circuits set vk or use params from the +environment variables: + +```bash +$ zk f yarn start crypto save-verification-params \ +--recursion-node-level-vk \ +--recursion-leaf-level-vk \ +--recursion-circuits-set-vks \ +--environment +``` + +The results will be saved in the `etc/upgrades///crypto.json` file. + +### Generate Proposal Transaction and Execute It + +To generate a proposal transaction, combining all the data from the previous steps and save it to the +`etc/upgrades///transactions.json` file, use the following command: + +- `l2UpgraderAddress`: Address of the L2 upgrader contract + +. By default, it's the Complex Upgrader contract. If you want to use a different contract, such as ForcedDeploy +directly, you can override it. + +- `diamondUpgradeProposalId`: ID of the diamond upgrade proposal. If not specified, it will be taken from the contract + using l1rpc and zksync-address. + +```bash +$ zk f yarn start transactions build-default \ +--upgrade-address \ +--upgrade-timestamp \ +--environment \ +--new-allow-list \ +--l2-upgrader-address \ +--diamond-upgrade-proposal-id \ +--l1rpc \ +--zksync-address +``` + +To execute the `proposeTransparentUpgrade` transaction on L1, use the following command: + +```bash +$ zk f yarn start transactions propose-upgrade \ +--private-key \ +--l1rpc \ +--gas-price \ +--nonce \ +--zksync-address \ +--environment +``` + +To execute the latest upgrade, use the following command: + +```bash +$ zk f yarn start transactions execute-upgrade \ +--private-key \ +--l1rpc \ +--gas-price \ +--nonce \ +--zksync-address \ +--environment +``` + +To cancel the proposed upgrade, use the following command: + +```bash +$ zk f yarn start transactions cancel-upgrade \ +--private-key \ +--l1rpc \ +--zksync-address \ +--gas-price \ +--nonce \ +--environment +``` diff --git a/infrastructure/protocol-upgrade/package.json b/infrastructure/protocol-upgrade/package.json new file mode 100644 index 000000000000..bca8f3d10152 --- /dev/null +++ b/infrastructure/protocol-upgrade/package.json @@ -0,0 +1,33 @@ +{ + "name": "protocol-upgrade-tool", + "version": "1.0.0", + "main": "build/index.js", + "license": "MIT", + "bin": "build/index.js", + "private": true, + "dependencies": { + "chalk": "^4.0.0", + "commander": "^6.0.0", + "deep-extend": "^0.6.0", + "dotenv": "^8.2.0", + "ethers": "~5.5.0", + "node-fetch": "^2.6.1", + "tabtab": "^3.0.2" + }, + "devDependencies": { + "@matterlabs/hardhat-zksync-solc": "^0.3.15", + "@types/deep-extend": "^0.4.31", + "@types/node": "^14.6.1", + "@types/node-fetch": "^2.5.7", + "@types/tabtab": "^3.0.1", + "hardhat": "=2.12.4", + "typescript": "^4.3.5", + "l2-zksync-contracts": "link:../../contacts/zksync", + "l1-zksync-contracts": "link:../../contacts/ethereum" + }, + "scripts": { + "build": "tsc", + "watch": "tsc --watch", + "start": "ts-node src/index.ts" + } +} diff --git a/infrastructure/protocol-upgrade/src/crypto/crypto.ts b/infrastructure/protocol-upgrade/src/crypto/crypto.ts new file mode 100644 index 000000000000..98a2a8468972 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/crypto/crypto.ts @@ -0,0 +1,76 @@ +import { getCryptoFileName, getUpgradePath, VerifierParams } from '../utils'; +import fs from 'fs'; +import { BytesLike } from 'ethers'; +import { Command } from 'commander'; +import { deployVerifier } from './deployer'; + +function saveVerificationKeys( + recursionNodeLevelVkHash: BytesLike, + recursionLeafLevelVkHash: BytesLike, + recursionCircuitsSetVksHash: BytesLike, + environment: string +) { + recursionNodeLevelVkHash = recursionNodeLevelVkHash ?? process.env.CONTRACTS_RECURSION_LEAF_LEVEL_VK_HASH; + recursionLeafLevelVkHash = recursionLeafLevelVkHash ?? process.env.CONTRACTS_RECURSION_NODE_LEVEL_VK_HASH; + recursionCircuitsSetVksHash = recursionCircuitsSetVksHash ?? process.env.CONTRACTS_RECURSION_CIRCUITS_SET_VKS_HASH; + const verificationParams: VerifierParams = { + recursionNodeLevelVkHash, + recursionLeafLevelVkHash, + recursionCircuitsSetVksHash + }; + updateCryptoFile('keys', verificationParams, environment); + console.log(`Verification keys ${JSON.stringify(verificationParams)} saved`); +} + +function updateCryptoFile(name: string, values: any, environment: string) { + const cryptoFile = getCryptoFileName(environment); + + if (!fs.existsSync(cryptoFile)) { + let params = {}; + params[name] = values; + fs.writeFileSync(cryptoFile, JSON.stringify(params, null, 2)); + } else { + const cryptoData = JSON.parse(fs.readFileSync(cryptoFile, 'utf8')); + cryptoData[name] = values; + console.log(JSON.stringify(cryptoData, null, 2)); + fs.writeFileSync(cryptoFile, JSON.stringify(cryptoData, null, 2)); + } +} + +export const command = new Command('crypto').description('Prepare crypto params'); + +command + .command('save-verification-params') + .description('Save verification params, if not provided, will be taken from env variables') + .option('--recursion-node-level-vk ') + .option('--recursion-leaf-level-vk ') + .option('--recursion-circuits-set-vks ') + .option('--environment ') + .action(async (cmd) => { + await saveVerificationKeys( + cmd.recursionNodeLevelVk, + cmd.recursionLeafLevelVk, + cmd.recursionCircuitsSetVks, + cmd.environment + ); + }); + +command + .command('deploy-verifier') + .option('--l1Rpc ') + .option('--private-key ') + .option('--create2-address ') + .option('--nonce ') + .option('--gas-price ') + .option('--environment ') + .description('Deploy verifier contract') + .action(async (cmd) => { + console.log('Deploying verifier contract'); + const path = getUpgradePath(cmd.environment); + const tmpFile = `${path}/cryptoTmp.json`; + await deployVerifier(cmd.l1Rpc, cmd.privateKey, cmd.create2Address, tmpFile, cmd.nonce, cmd.gasPrice); + let tmpData = JSON.parse(fs.readFileSync(tmpFile, 'utf8')); + console.log(`Verifier contract deployed at ${tmpData.address}`); + updateCryptoFile('verifier', tmpData, cmd.environment); + fs.unlinkSync(tmpFile); + }); diff --git a/infrastructure/protocol-upgrade/src/crypto/deployer.ts b/infrastructure/protocol-upgrade/src/crypto/deployer.ts new file mode 100644 index 000000000000..b0ca3c5eea7e --- /dev/null +++ b/infrastructure/protocol-upgrade/src/crypto/deployer.ts @@ -0,0 +1,32 @@ +import { spawn } from 'zk/build/utils'; + +export async function deployVerifier( + l1Rpc: string, + privateKey: string, + create2Address: string, + file: string, + nonce?: number, + gasPrice?: number +) { + const cwd = process.cwd(); + process.chdir(`${process.env.ZKSYNC_HOME}/contracts/ethereum/`); + let argsString = ''; + if (l1Rpc) { + argsString += ` --l1rpc ${l1Rpc}`; + } + if (nonce) { + argsString += ` --nonce ${nonce}`; + } + if (gasPrice) { + argsString += ` --gas-price ${gasPrice}`; + } + + create2Address = create2Address ?? process.env.CONTRACTS_CREATE2_FACTORY_ADDR; + argsString += ` --create2-address ${create2Address}`; + + argsString += ` --file ${file}`; + + await spawn(`yarn upgrade-system verifier deploy ${argsString}`); + + process.chdir(cwd); +} diff --git a/infrastructure/protocol-upgrade/src/custom-upgrade.ts b/infrastructure/protocol-upgrade/src/custom-upgrade.ts new file mode 100644 index 000000000000..f898b31517e5 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/custom-upgrade.ts @@ -0,0 +1,36 @@ +import fs from 'fs'; +import { Command } from 'commander'; +import { + DEFAULT_L1CONTRACTS_FOR_UPGRADE_PATH, + DEFAULT_L2CONTRACTS_FOR_UPGRADE_PATH, + getNameOfTheLastUpgrade +} from './utils'; + +async function createCustomUpgrade(defaultUpgradePath: string) { + const name = getNameOfTheLastUpgrade(); + const nameWithoutTimestamp = name.split('-').slice(1).join('_'); + const upgradePath = `${defaultUpgradePath}/${name}`; + const upgradeFile = `${upgradePath}/Upgrade.sol`; + fs.mkdirSync(upgradePath, { recursive: true }); + fs.copyFileSync(`${defaultUpgradePath}/DefaultUpgrade.sol`, upgradeFile); + let result = fs.readFileSync(upgradeFile, 'utf8').replace('DefaultUpgrade', nameWithoutTimestamp); + fs.writeFileSync(upgradeFile, result, 'utf8'); + console.log(`Custom upgrade ${name} created in ${upgradePath}`); +} + +export const command = new Command('custom-upgrade').description('create and publish custom l2 upgrade'); + +command + .command('create') + .option('--l2') + .option('--l1') + .description('Create custom contract upgrade') + .action(async (options) => { + if (options.l2) { + await createCustomUpgrade(DEFAULT_L2CONTRACTS_FOR_UPGRADE_PATH); + } else if (options.l1) { + await createCustomUpgrade(DEFAULT_L1CONTRACTS_FOR_UPGRADE_PATH); + } else { + throw new Error('Please specify --l1 or --l2'); + } + }); diff --git a/infrastructure/protocol-upgrade/src/index.ts b/infrastructure/protocol-upgrade/src/index.ts new file mode 100644 index 000000000000..67a0afde0782 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/index.ts @@ -0,0 +1,33 @@ +import { program } from 'commander'; + +import { command as publish } from './l2upgrade/system-contracts'; +import { command as manager } from './protocol-upgrade-manager'; +import { command as customUpgrade } from './custom-upgrade'; +import { command as l1Upgrade } from './l1upgrade/facets'; +import { command as l2Upgrade } from './l2upgrade/transactions'; +import { command as transactions } from './transaction'; +import { command as crypto } from './crypto/crypto'; + +const COMMANDS = [publish, manager, customUpgrade, l1Upgrade, transactions, crypto, l2Upgrade]; + +async function main() { + const ZKSYNC_HOME = process.env.ZKSYNC_HOME; + + if (!ZKSYNC_HOME) { + throw new Error('Please set $ZKSYNC_HOME to the root of zkSync repo!'); + } else { + process.chdir(ZKSYNC_HOME); + } + + program.version('0.1.0').name('zk').description('zksync protocol upgrade tools'); + + for (const command of COMMANDS) { + program.addCommand(command); + } + await program.parseAsync(process.argv); +} + +main().catch((err: Error) => { + console.error('Error:', err.message || err); + process.exitCode = 1; +}); diff --git a/infrastructure/protocol-upgrade/src/l1upgrade/deployer.ts b/infrastructure/protocol-upgrade/src/l1upgrade/deployer.ts new file mode 100644 index 000000000000..e0f8765ba21c --- /dev/null +++ b/infrastructure/protocol-upgrade/src/l1upgrade/deployer.ts @@ -0,0 +1,54 @@ +import { spawn } from 'zk/build/utils'; + +export async function callFacetDeployer( + l1RpcProvider: string, + privateKey: string, + gasPrice: string, + create2Address: string, + nonce: string, + executor: boolean, + governance: boolean, + diamondCut: boolean, + getters: boolean, + mailbox: boolean, + file: string +) { + const cwd = process.cwd(); + process.chdir(`${process.env.ZKSYNC_HOME}/contracts/ethereum/`); + let argsString = ''; + if (executor) { + argsString += ' --executor'; + } + if (governance) { + argsString += ' --governance'; + } + if (diamondCut) { + argsString += ' --diamondCut'; + } + if (getters) { + argsString += ' --getters'; + } + if (mailbox) { + argsString += ' --mailbox'; + } + if (file) { + argsString += ` --file ${file}`; + } + if (gasPrice) { + argsString += ` --gasPrice ${gasPrice}`; + } + if (nonce) { + argsString += ` --nonce ${nonce}`; + } + if (l1RpcProvider) { + argsString += ` --l1Rpc ${l1RpcProvider}`; + } + if (privateKey) { + argsString += ` --privateKey ${privateKey}`; + } + if (create2Address) { + argsString += ` --create2-address ${create2Address}`; + } + await spawn(`yarn upgrade-system facets deploy ${argsString}`); + process.chdir(cwd); +} diff --git a/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts b/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts new file mode 100644 index 000000000000..2d72fbd415bd --- /dev/null +++ b/infrastructure/protocol-upgrade/src/l1upgrade/facets.ts @@ -0,0 +1,232 @@ +import fs from 'fs'; +import { Command } from 'commander'; +import { spawn } from 'zk/build/utils'; +import { getFacetCutsFileName, getFacetsFileName, getUpgradePath } from '../utils'; +import { callFacetDeployer } from './deployer'; + +async function deployAllFacets( + l1RpcProvider: string, + privateKey: string, + gasPrice: string, + create2Address: string, + nonce: string, + environment: string +) { + const file = getFacetsFileName(environment); + await callFacetDeployer( + l1RpcProvider, + privateKey, + gasPrice, + create2Address, + nonce, + true, + true, + true, + true, + true, + file + ); +} + +async function deployFacetsAndMergeFiles( + l1RpcProvider: string, + privateKey: string, + gasPrice: string, + create2Address: string, + nonce: string, + executor: boolean, + governance: boolean, + diamondCut: boolean, + getters: boolean, + mailbox: boolean, + environment +) { + create2Address = create2Address ?? process.env.CONTRACTS_CREATE2_FACTORY_ADDR; + const upgradePath = getUpgradePath(environment); + const tmpFacetsFile = `${upgradePath}/tmp.json`; + await callFacetDeployer( + l1RpcProvider, + privateKey, + gasPrice, + create2Address, + nonce, + executor, + governance, + diamondCut, + getters, + mailbox, + tmpFacetsFile + ); + const tmpFacets = JSON.parse(fs.readFileSync(tmpFacetsFile).toString()); + const facetsFile = getFacetsFileName(environment); + const facets = JSON.parse(fs.readFileSync(facetsFile).toString()); + for (const key in tmpFacets) { + facets[key] = tmpFacets[key]; + } + fs.writeFileSync(facetsFile, JSON.stringify(facets, null, 4)); + fs.unlinkSync(tmpFacetsFile); +} + +async function generateFacetCuts(l1RpcProvider?: string, zksyncAddress?: string, environment?: string) { + zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; + + console.log('Generating facet cuts'); + const file = getFacetsFileName(environment); + const facets = JSON.parse(fs.readFileSync(file).toString()); + let gettersAddress = facets['GettersFacet']; + if (gettersAddress) { + gettersAddress = gettersAddress['address']; + } + let diamondCutAddress = facets['DiamondCutFacet']; + if (diamondCutAddress) { + diamondCutAddress = diamondCutAddress['address']; + } + let mailboxAddress = facets['MailboxFacet']; + if (mailboxAddress) { + mailboxAddress = mailboxAddress['address']; + } + let executorAddress = facets['ExecutorFacet']; + if (executorAddress) { + executorAddress = executorAddress['address']; + } + let governanceAddress = facets['GovernanceFacet']; + if (governanceAddress) { + governanceAddress = governanceAddress['address']; + } + + await callGenerateFacetCuts( + zksyncAddress, + getFacetCutsFileName(environment), + l1RpcProvider, + diamondCutAddress, + gettersAddress, + mailboxAddress, + executorAddress, + governanceAddress + ); +} + +async function callGenerateFacetCuts( + zksyncAddress: string, + file: string, + l1RpcProvider?: string, + diamondCutAddress?: string, + gettersAddress?: string, + mailboxAddress?: string, + executorAddress?: string, + governanceAddress?: string +) { + const cwd = process.cwd(); + process.chdir(`${process.env.ZKSYNC_HOME}/contracts/ethereum/`); + let argsString = ''; + if (l1RpcProvider) { + argsString += ` --l1Rpc ${l1RpcProvider}`; + } + if (diamondCutAddress) { + argsString += ` --diamond-cut-facet-address ${diamondCutAddress}`; + } + if (gettersAddress) { + argsString += ` --getters-address ${gettersAddress}`; + } + if (mailboxAddress) { + argsString += ` --mailbox-address ${mailboxAddress}`; + } + if (executorAddress) { + argsString += ` --executor-address ${executorAddress}`; + } + if (governanceAddress) { + argsString += ` --governance-address ${governanceAddress}`; + } + + argsString += ` --zkSyncAddress ${zksyncAddress}`; + argsString += ` --file ${file}`; + await spawn(`yarn upgrade-system facets generate-facet-cuts ${argsString}`); + process.chdir(cwd); +} + +async function deployAllFacetsAndGenerateFacetCuts( + l1RpcProvider: string, + privateKey: string, + gasPrice: string, + create2Address: string, + zkSyncAddress: string, + nonce: string, + environment: string +) { + console.log('Deploying all facets'); + create2Address = create2Address ?? process.env.CONTRACTS_CREATE2_FACTORY_ADDR; + zkSyncAddress = zkSyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; + + await deployAllFacets(l1RpcProvider, privateKey, gasPrice, create2Address, nonce, environment); + await generateFacetCuts(l1RpcProvider, zkSyncAddress, environment); + console.log('Done'); +} + +export const command = new Command('facets').description('Deploy facets and generate facet cuts'); + +command + .command('deploy-all') + .description('Deploy all facets') + .option('--private-key ') + .option('--l1rpc ') + .option('--gas-price ') + .option('--nonce ') + .option('--create2-address ') + .option('--zksync-address ') + .option('--environment ') + .action(async (cmd) => { + await deployAllFacetsAndGenerateFacetCuts( + cmd.l1rpc, + cmd.privateKey, + cmd.gasPrice, + cmd.create2Address, + cmd.zksyncAddress, + cmd.nonce, + cmd.environment + ); + }); + +command + .command('deploy') + .description('deploy facets one by one') + .option('--environment ') + .option('--private-key ') + .option('--create2-address ') + .option('--gas-price ') + .option('--nonce ') + .option('--l1rpc ') + .option('--executor') + .option('--governance') + .option('--diamond-cut') + .option('--getters') + .option('--mailbox') + .action(async (cmd) => { + await deployFacetsAndMergeFiles( + cmd.l1Rpc, + cmd.privateKey, + cmd.gasPrice, + cmd.create2Address, + cmd.nonce, + cmd.executor, + cmd.governance, + cmd.diamondCut, + cmd.getters, + cmd.mailbox, + cmd.environment + ); + }); + +command + .command('generate-facet-cuts') + .description('Generate facet cuts') + .option('--l1rpc ') + .option('--zksync-address ') + .option('--environment ') + .action(async (cmd) => { + try { + await generateFacetCuts(cmd.l1rpc, cmd.zksyncAddress, cmd.environment); + } catch (e) { + console.error('Not all facets have been deployed: ', e); + process.exit(1); + } + }); diff --git a/infrastructure/protocol-upgrade/src/l2upgrade/deployer.ts b/infrastructure/protocol-upgrade/src/l2upgrade/deployer.ts new file mode 100644 index 000000000000..0f5d14ebf7a1 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/l2upgrade/deployer.ts @@ -0,0 +1,46 @@ +import { spawn } from 'zk/build/utils'; + +export async function callSystemContractDeployer( + l1RpcProvider: string, + privateKey: string, + l2RpcProvider: string, + gasPrice: string, + nonce: string, + bootloader: boolean, + defaultAA: boolean, + systemContracts: boolean, + file: string +) { + const cwd = process.cwd(); + process.chdir(`${process.env.ZKSYNC_HOME}/etc/system-contracts`); + let argsString = ''; + if (bootloader) { + argsString += ' --bootloader'; + } + if (defaultAA) { + argsString += ' --default-aa'; + } + if (systemContracts) { + argsString += ' --system-contracts'; + } + if (file) { + argsString += ` --file ${file}`; + } + if (gasPrice) { + argsString += ` --gas-price ${gasPrice}`; + } + if (nonce) { + argsString += ` --nonce ${nonce}`; + } + if (l1RpcProvider) { + argsString += ` --l1Rpc ${l1RpcProvider}`; + } + if (l2RpcProvider) { + argsString += ` --l2Rpc ${l2RpcProvider}`; + } + if (privateKey) { + argsString += ` --private-key ${privateKey}`; + } + await spawn(`yarn deploy-preimages ${argsString}`); + process.chdir(cwd); +} diff --git a/infrastructure/protocol-upgrade/src/l2upgrade/system-contracts.ts b/infrastructure/protocol-upgrade/src/l2upgrade/system-contracts.ts new file mode 100644 index 000000000000..b81d12c9be15 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/l2upgrade/system-contracts.ts @@ -0,0 +1,117 @@ +import fs from 'fs'; +import { Command } from 'commander'; +import { getL2UpgradeFileName, getUpgradePath } from '../utils'; +import { callSystemContractDeployer } from './deployer'; + +async function publishAllFactoryDeps( + l1RpcProvider: string, + privateKey: string, + l2RpcProvider: string, + gasPrice: string, + nonce: string, + environment: string +) { + console.log('Publishing bytecodes for all system contracts'); + const upgradeFile = getL2UpgradeFileName(environment); + await callSystemContractDeployer( + l1RpcProvider, + privateKey, + l2RpcProvider, + gasPrice, + nonce, + true, + true, + true, + upgradeFile + ); +} + +async function publishAndMergeFiles( + l1RpcProvider: string, + privateKey: string, + l2RpcProvider: string, + gasPrice: string, + nonce: string, + bootloader: boolean, + defaultAA: boolean, + systemContracts: boolean, + environment: string +) { + console.log('Publishing bytecodes for system contracts'); + const upgradePath = getUpgradePath(environment); + const tmpUpgradeFile = upgradePath + '/tmp.json'; + await callSystemContractDeployer( + l1RpcProvider, + privateKey, + l2RpcProvider, + gasPrice, + nonce, + bootloader, + defaultAA, + systemContracts, + tmpUpgradeFile + ); + const mainUpgradeFile = getL2UpgradeFileName(environment); + let tmpUpgradeData = JSON.parse(fs.readFileSync(tmpUpgradeFile, 'utf8')); + + if (!fs.existsSync(mainUpgradeFile)) { + fs.writeFileSync(mainUpgradeFile, JSON.stringify(tmpUpgradeData, null, 2)); + fs.unlinkSync(tmpUpgradeFile); + return; + } + + let mainUpgradeData = JSON.parse(fs.readFileSync(mainUpgradeFile, 'utf8')); + if (bootloader !== undefined) { + mainUpgradeData.bootloader = tmpUpgradeData.bootloader; + } + if (defaultAA !== undefined) { + mainUpgradeData.defaultAA = tmpUpgradeData.defaultAA; + } + if (systemContracts) { + mainUpgradeData.systemContracts = tmpUpgradeData.systemContracts; + } + fs.writeFileSync(mainUpgradeFile, JSON.stringify(mainUpgradeData, null, 2)); + fs.unlinkSync(tmpUpgradeFile); + console.log('All system contracts published'); +} + +export const command = new Command('system-contracts').description('publish system contracts'); + +command + .command('publish-all') + .description('Publish all factory dependencies and base system contracts') + .option('--private-key ') + .option('--gas-price ') + .option('--nonce ') + .option('--l1rpc ') + .option('--l2rpc ') + .option('--environment ') + .action(async (cmd) => { + await publishAllFactoryDeps(cmd.l1rpc, cmd.privateKey, cmd.l2rpc, cmd.gasPrice, cmd.nonce, cmd.environment); + }); + +command + .command('publish') + .description('Publish contracts one by one') + .option('--private-key ') + .option('--gas-price ') + .option('--nonce ') + .option('--l1rpc ') + .option('--l2rpc ') + .option('--environment ') + .option('--bootloader') + .option('--default-aa') + .option('--system-contracts') + .action(async (cmd) => { + await publishAndMergeFiles( + cmd.l1rpc, + cmd.privateKey, + cmd.l2rpc, + cmd.gasPrice, + cmd.nonce, + cmd.bootloader, + cmd.defaultAA, + cmd.systemContracts, + cmd.environment + ); + }); diff --git a/infrastructure/protocol-upgrade/src/l2upgrade/transactions.ts b/infrastructure/protocol-upgrade/src/l2upgrade/transactions.ts new file mode 100644 index 000000000000..2babc5bf04a3 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/l2upgrade/transactions.ts @@ -0,0 +1,128 @@ +import { BytesLike } from 'ethers'; +import { ComplexUpgraderFactory, ContractDeployerFactory } from '../../../../etc/system-contracts/typechain'; +import { ForceDeployment, L2CanonicalTransaction } from '../transaction'; +import { ForceDeployUpgraderFactory } from 'l2-zksync-contracts/typechain'; +import { Command } from 'commander'; +import { getCommonDataFileName, getL2UpgradeFileName } from '../utils'; +import fs from 'fs'; +import { REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT } from 'zksync-web3/build/src/utils'; + +const SYSTEM_UPGRADE_TX_TYPE = 254; +const FORCE_DEPLOYER_ADDRESS = '0x0000000000000000000000000000000000008007'; +const CONTRACT_DEPLOYER_ADDRESS = '0x0000000000000000000000000000000000008006'; +const COMPLEX_UPGRADE_ADDRESS = '0x000000000000000000000000000000000000800f'; + +function buildL2CanonicalTransaction(calldata: BytesLike, nonce, toAddress: string): L2CanonicalTransaction { + return { + txType: SYSTEM_UPGRADE_TX_TYPE, + from: FORCE_DEPLOYER_ADDRESS, + to: toAddress, + gasLimit: 72_000_000, + gasPerPubdataByteLimit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT, + maxFeePerGas: 0, + maxPriorityFeePerGas: 0, + paymaster: 0, + nonce, + value: 0, + reserved: [0, 0, 0, 0], + data: calldata, + signature: '0x', + factoryDeps: [], + paymasterInput: '0x', + reservedDynamic: '0x' + }; +} + +export function forceDeploymentCalldataUpgrader(forcedDeployments: ForceDeployment[]): BytesLike { + let forceDeployUpgrader = new ForceDeployUpgraderFactory(); + let calldata = forceDeployUpgrader.interface.encodeFunctionData('forceDeploy', [forcedDeployments]); + return calldata; +} + +export function forceDeploymentCalldataContractDeployer(forcedDeployments: ForceDeployment[]): BytesLike { + let contractDeployer = new ContractDeployerFactory(); + let calldata = contractDeployer.interface.encodeFunctionData('forceDeployOnAddresses', [forcedDeployments]); + return calldata; +} + +export function prepareCallDataForComplexUpgrader(calldata: BytesLike, to: string): BytesLike { + const upgrader = new ComplexUpgraderFactory(); + let finalCalldata = upgrader.interface.encodeFunctionData('upgrade', [to, calldata]); + return finalCalldata; +} + +export const command = new Command('l2-transaction').description('publish system contracts'); + +command + .command('force-deployment-calldata') + .option('--environment ') + .action(async (cmd) => { + const l2upgradeFileName = getL2UpgradeFileName(cmd.environment); + if (fs.existsSync(l2upgradeFileName)) { + console.log(`Found l2 upgrade file ${l2upgradeFileName}`); + let l2Upgrade = JSON.parse(fs.readFileSync(l2upgradeFileName).toString()); + const forcedDeployments = systemContractsToForceDeployments(l2Upgrade.systemContracts); + const calldata = forceDeploymentCalldataContractDeployer(forcedDeployments); + l2Upgrade.forcedDeployments = forcedDeployments; + l2Upgrade.forcedDeploymentCalldata = calldata; + l2Upgrade.delegatedCalldata = calldata; + fs.writeFileSync(l2upgradeFileName, JSON.stringify(l2Upgrade, null, 2)); + } else { + throw new Error(`No l2 upgrade file found at ${l2upgradeFileName}`); + } + }); + +function systemContractsToForceDeployments(systemContracts): ForceDeployment[] { + return systemContracts.map((dependency) => { + return { + bytecodeHash: dependency.bytecodeHashes[0], + newAddress: dependency.address, + value: 0, + input: '0x', + callConstructor: false + }; + }); +} + +command + .command('complex-upgrader-calldata') + .option('--environment ') + .option('--l2-upgrader-address ') + .option( + '--use-forced-deployments', + 'Build calldata with forced deployments instead of using prebuild delegated calldata' + ) + .option( + '--use-contract-deployer', + 'Use contract deployer address instead of complex upgrader address. ' + + "Warning: this shouldn't be a default option, it's only for first upgrade purposes" + ) + .action(async (cmd) => { + const l2upgradeFileName = getL2UpgradeFileName(cmd.environment); + const l2UpgraderAddress = cmd.l2UpgraderAddress ?? process.env.CONTRACTS_L2_DEFAULT_UPGRADE_ADDR; + const commonData = JSON.parse(fs.readFileSync(getCommonDataFileName(), { encoding: 'utf-8' })); + if (fs.existsSync(l2upgradeFileName)) { + console.log(`Found l2 upgrade file ${l2upgradeFileName}`); + let l2Upgrade = JSON.parse(fs.readFileSync(l2upgradeFileName).toString()); + let delegatedCalldata = l2Upgrade.delegatedCalldata; + if (cmd.useForcedDeployments) { + l2Upgrade.forcedDeployments = systemContractsToForceDeployments(l2Upgrade.systemContracts); + l2Upgrade.forcedDeploymentCalldata = forceDeploymentCalldataContractDeployer( + l2Upgrade.forcedDeployments + ); + delegatedCalldata = l2Upgrade.forcedDeploymentCalldata; + } + let toAddress = COMPLEX_UPGRADE_ADDRESS; + if (cmd.useContractDeployer) { + toAddress = CONTRACT_DEPLOYER_ADDRESS; + l2Upgrade.calldata = delegatedCalldata; + } else { + l2Upgrade.calldata = prepareCallDataForComplexUpgrader(delegatedCalldata, l2UpgraderAddress); + } + + l2Upgrade.tx = buildL2CanonicalTransaction(l2Upgrade.calldata, commonData.protocolVersion, toAddress); + fs.writeFileSync(l2upgradeFileName, JSON.stringify(l2Upgrade, null, 2)); + } else { + throw new Error(`No l2 upgrade file found at ${l2upgradeFileName}`); + } + }); diff --git a/infrastructure/protocol-upgrade/src/protocol-upgrade-manager.ts b/infrastructure/protocol-upgrade/src/protocol-upgrade-manager.ts new file mode 100644 index 000000000000..563967f9f603 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/protocol-upgrade-manager.ts @@ -0,0 +1,33 @@ +import fs from 'fs'; +import { Command } from 'commander'; +import { DEFAULT_UPGRADE_PATH, getNameOfTheLastUpgrade, getTimestampInSeconds } from './utils'; + +function createNewUpgrade(name, protocolVersion: number) { + const timestamp = getTimestampInSeconds(); + const upgradePath = `${DEFAULT_UPGRADE_PATH}/${timestamp}-${name}`; + fs.mkdirSync(upgradePath, { recursive: true }); + const upgradeJson = { + name, + creationTimestamp: timestamp, + protocolVersion + }; + fs.writeFileSync(`${upgradePath}/common.json`, JSON.stringify(upgradeJson, null, 2)); + console.log(`Created new upgrade: ${upgradePath}`); +} + +export const command = new Command('upgrades').description('manage protocol upgrades'); + +command + .command('create ') + .requiredOption('--protocol-version ') + .description('Create new upgrade') + .action(async (name, cmd) => { + createNewUpgrade(name, cmd.protocolVersion); + }); + +command + .command('get-last') + .description('get name of the last upgrade') + .action(async () => { + console.log(getNameOfTheLastUpgrade()); + }); diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts new file mode 100644 index 000000000000..6ff23180dc50 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -0,0 +1,474 @@ +import { BigNumberish } from '@ethersproject/bignumber'; +import { BytesLike, ethers } from 'ethers'; +import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-zksync-contracts/typechain'; +import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1 } from 'l1-zksync-contracts/typechain'; +import { FacetCut } from 'l1-zksync-contracts/src.ts/diamondCut'; +import { IZkSyncFactory } from 'l1-zksync-contracts/typechain/IZkSyncFactory'; +import { ComplexUpgraderFactory } from '../../../etc/system-contracts/typechain'; +import { + getCommonDataFileName, + getCryptoFileName, + getFacetCutsFileName, + getL2TransactionsFileName, + getL2UpgradeFileName, + VerifierParams +} from './utils'; +import fs from 'fs'; +import { Command } from 'commander'; +import { web3Url } from 'zk/build/utils'; +import * as path from 'path'; + +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); + +export interface TransparentUpgrade { + facetCuts: FacetCut[]; + initAddress: string; + initCalldata: string; +} + +export interface ForceDeployment { + // The bytecode hash to put on an address + bytecodeHash: BytesLike; + // The address on which to deploy the bytecodehash to + newAddress: string; + // Whether to call the constructor + callConstructor: boolean; + // The value with which to initialize a contract + value: ethers.BigNumber; + // The constructor calldata + input: BytesLike; +} + +export interface L2CanonicalTransaction { + txType: BigNumberish; + from: BigNumberish; + to: BigNumberish; + gasLimit: BigNumberish; + gasPerPubdataByteLimit: BigNumberish; + maxFeePerGas: BigNumberish; + maxPriorityFeePerGas: BigNumberish; + paymaster: BigNumberish; + nonce: BigNumberish; + value: BigNumberish; + // In the future, we might want to add some + // new fields to the struct. The `txData` struct + // is to be passed to account and any changes to its structure + // would mean a breaking change to these accounts. In order to prevent this, + // we should keep some fields as "reserved". + // It is also recommneded that their length is fixed, since + // it would allow easier proof integration (in case we will need + // some special circuit for preprocessing transactions). + reserved: [BigNumberish, BigNumberish, BigNumberish, BigNumberish]; + data: BytesLike; + signature: BytesLike; + factoryDeps: BigNumberish[]; + paymasterInput: BytesLike; + // Reserved dynamic type for the future use-case. Using it should be avoided, + // But it is still here, just in case we want to enable some additional functionality. + reservedDynamic: BytesLike; +} + +export interface ProposedUpgrade { + // The tx for the upgrade call to the l2 system upgrade contract + l2ProtocolUpgradeTx: L2CanonicalTransaction; + factoryDeps: BytesLike[]; + bootloaderHash: BytesLike; + defaultAccountHash: BytesLike; + verifier: string; + verifierParams: VerifierParams; + l1ContractsUpgradeCalldata: BytesLike; + postUpgradeCalldata: BytesLike; + upgradeTimestamp: ethers.BigNumber; + newProtocolVersion: BigNumberish; + newAllowList: string; +} + +export function buildProposeUpgrade( + upgradeTimestamp: ethers.BigNumber, + newProtocolVersion: number, + l1ContractsUpgradeCalldata?: BytesLike, + postUpgradeCalldata?: BytesLike, + verifierParams?: VerifierParams, + bootloaderHash?: BytesLike, + defaultAccountHash?: BytesLike, + verifier?: string, + newAllowList?: string, + l2ProtocolUpgradeTx?: L2CanonicalTransaction +): ProposedUpgrade { + newAllowList = newAllowList ?? ethers.constants.AddressZero; + bootloaderHash = bootloaderHash ?? ethers.constants.HashZero; + defaultAccountHash = defaultAccountHash ?? ethers.constants.HashZero; + l1ContractsUpgradeCalldata = l1ContractsUpgradeCalldata ?? '0x'; + postUpgradeCalldata = postUpgradeCalldata ?? '0x'; + return { + l2ProtocolUpgradeTx, + bootloaderHash, + defaultAccountHash, + verifier, + verifierParams, + l1ContractsUpgradeCalldata, + postUpgradeCalldata, + upgradeTimestamp, + factoryDeps: [], + newProtocolVersion, + newAllowList + }; +} + +export function forceDeploymentCalldata(forcedDeployments: ForceDeployment[]): BytesLike { + let forceDeployUpgrader = new ForceDeployUpgraderFactoryL2(); + let calldata = forceDeployUpgrader.interface.encodeFunctionData('forceDeploy', [forcedDeployments]); + return calldata; +} + +export function prepareCallDataForComplexUpgrader(calldata: BytesLike, to: string): BytesLike { + const upgrader = new ComplexUpgraderFactory(); + let finalCalldata = upgrader.interface.encodeFunctionData('upgrade', [to, calldata]); + return finalCalldata; +} + +export function prepareDefaultCalldataForL1upgrade(upgrade: ProposedUpgrade): BytesLike { + let defaultUpgrade = new DefaultUpgradeFactoryL1(); + let calldata = defaultUpgrade.interface.encodeFunctionData('upgrade', [upgrade]); + return calldata; +} + +export function prepareDefaultCalldataForL2upgrade(forcedDeployments: ForceDeployment[], l2UpgraderAddress): BytesLike { + const forcedDeploymentsCalldata = forceDeploymentCalldata(forcedDeployments); + const complexUpgraderCalldata = prepareCallDataForComplexUpgrader(forcedDeploymentsCalldata, l2UpgraderAddress); + return complexUpgraderCalldata; +} + +export function prepareproposeTransparentUpgradeCalldata( + initCalldata, + upgradeAddress: string, + facetCuts: FacetCut[], + diamondUpgradeProposalId: number +) { + let zkSyncFactory = IZkSyncFactory.connect(upgradeAddress, ethers.providers.getDefaultProvider()); + let transparentUpgrade: TransparentUpgrade = { + facetCuts, + initAddress: upgradeAddress, + initCalldata + }; + + let proposeTransparentUpgradeCalldata = zkSyncFactory.interface.encodeFunctionData('proposeTransparentUpgrade', [ + transparentUpgrade, + diamondUpgradeProposalId + ]); + + let executeUpgradeCalldata = zkSyncFactory.interface.encodeFunctionData('executeUpgrade', [ + transparentUpgrade, + ethers.constants.HashZero + ]); + return [proposeTransparentUpgradeCalldata, executeUpgradeCalldata, transparentUpgrade]; +} + +export function buildDefaultUpgradeTx( + environment, + diamondUpgradeProposalId, + upgradeAddress, + l2UpgraderAddress, + upgradeTimestamp, + newAllowList +) { + const commonData = JSON.parse(fs.readFileSync(getCommonDataFileName(), { encoding: 'utf-8' })); + const protocolVersion = commonData.protocolVersion; + console.log( + `Building default upgrade tx for ${environment} protocol version ${protocolVersion} upgradeTimestamp ${upgradeTimestamp} ` + ); + let facetCuts = []; + let facetCutsFileName = getFacetCutsFileName(environment); + if (fs.existsSync(facetCutsFileName)) { + console.log(`Found facet cuts file ${facetCutsFileName}`); + facetCuts = JSON.parse(fs.readFileSync(facetCutsFileName).toString()); + } + upgradeAddress = upgradeAddress ?? process.env.CONTRACTS_DEFAULT_UPGRADE_ADDR; + + let bootloaderHash = ethers.constants.HashZero; + let defaultAAHash = ethers.constants.HashZero; + + const l2upgradeFileName = getL2UpgradeFileName(environment); + let l2UpgradeTx = undefined; + if (fs.existsSync(l2upgradeFileName)) { + console.log(`Found l2 upgrade file ${l2upgradeFileName}`); + const l2Upgrade = JSON.parse(fs.readFileSync(l2upgradeFileName).toString()); + + l2UpgradeTx = l2Upgrade.tx; + if (l2Upgrade.bootloader) { + bootloaderHash = l2Upgrade.bootloader.bytecodeHashes[0]; + } + + if (l2Upgrade.defaultAA) { + defaultAAHash = l2Upgrade.defaultAA.bytecodeHashes[0]; + } + } + + let cryptoVerifierAddress = ethers.constants.AddressZero; + let cryptoVerifierParams = { + recursionNodeLevelVkHash: ethers.constants.HashZero, + recursionLeafLevelVkHash: ethers.constants.HashZero, + recursionCircuitsSetVksHash: ethers.constants.HashZero + }; + let cryptoFileName = getCryptoFileName(environment); + if (fs.existsSync(cryptoFileName)) { + console.log(`Found crypto file ${cryptoFileName}`); + const crypto = JSON.parse(fs.readFileSync(cryptoFileName).toString()); + if (crypto.verifier) { + cryptoVerifierAddress = crypto.verifier.address; + } + if (crypto.keys) { + cryptoVerifierParams = crypto.keys; + } + } + + let proposeUpgradeTx = buildProposeUpgrade( + ethers.BigNumber.from(upgradeTimestamp), + protocolVersion, + '0x', + '0x', + cryptoVerifierParams, + bootloaderHash, + defaultAAHash, + cryptoVerifierAddress, + newAllowList, + l2UpgradeTx + ); + + let l1upgradeCalldata = prepareDefaultCalldataForL1upgrade(proposeUpgradeTx); + + let [proposeTransparentUpgradeCalldata, executeUpgradeCalldata, transparentUpgrade] = + prepareproposeTransparentUpgradeCalldata( + l1upgradeCalldata, + upgradeAddress, + facetCuts, + diamondUpgradeProposalId + ); + const transactions = { + proposeUpgradeTx, + l1upgradeCalldata, + upgradeAddress, + protocolVersion, + diamondUpgradeProposalId, + upgradeTimestamp, + proposeTransparentUpgradeCalldata, + transparentUpgrade, + executeUpgradeCalldata + }; + + fs.writeFileSync(getL2TransactionsFileName(environment), JSON.stringify(transactions, null, 2)); + console.log('Default upgrade transactions are generated'); +} + +async function sendTransaction( + calldata: BytesLike, + privateKey: string, + l1rpc: string, + zksyncAddress: string, + environment: string, + gasPrice: ethers.BigNumber, + nonce: number +) { + const wallet = getWallet(l1rpc, privateKey); + zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; + gasPrice = gasPrice ?? (await wallet.provider.getGasPrice()); + nonce = nonce ?? (await wallet.getTransactionCount()); + const tx = await wallet.sendTransaction({ + to: zksyncAddress, + data: calldata, + value: 0, + gasLimit: 10_000_000, + gasPrice, + nonce + }); + console.log('Transaction hash: ', tx.hash); + await tx.wait(); + console.log('Transaction is executed'); +} + +function getWallet(l1rpc, privateKey) { + if (!l1rpc) { + l1rpc = web3Url(); + } + const provider = new ethers.providers.JsonRpcProvider(l1rpc); + + return privateKey + ? new ethers.Wallet(privateKey, provider) + : ethers.Wallet.fromMnemonic( + process.env.MNEMONIC ? process.env.MNEMONIC : ethTestConfig.mnemonic, + "m/44'/60'/0'/0/1" + ).connect(provider); +} + +async function proposeUpgrade( + privateKey: string, + l1rpc: string, + zksyncAddress: string, + environment: string, + gasPrice: ethers.BigNumber, + nonce: number +) { + const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); + const proposeTransparentUpgradeCalldata = transactions.proposeTransparentUpgradeCalldata; + console.log(`Proposing upgrade for protocolVersion ${transactions.protocolVersion}`); + await sendTransaction( + proposeTransparentUpgradeCalldata, + privateKey, + l1rpc, + zksyncAddress, + environment, + gasPrice, + nonce + ); +} + +async function executeUpgrade( + privateKey: string, + l1rpc: string, + zksyncAddress: string, + environment: string, + gasPrice: ethers.BigNumber, + nonce: number +) { + const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); + const executeUpgradeCalldata = transactions.executeUpgradeCalldata; + console.log(`Execute upgrade for protocolVersion ${transactions.protocolVersion}`); + await sendTransaction(executeUpgradeCalldata, privateKey, l1rpc, zksyncAddress, environment, gasPrice, nonce); +} + +async function cancelUpgrade( + privateKey: string, + l1rpc: string, + zksyncAddress: string, + environment: string, + gasPrice: ethers.BigNumber, + nonce: number, + execute: boolean +) { + zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; + let wallet = getWallet(l1rpc, privateKey); + let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); + const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); + + const transparentUpgrade = transactions.transparentUpgrade; + const diamondUpgradeProposalId = transactions.diamondUpgradeProposalId; + + const proposalHash = await zkSync.upgradeProposalHash( + transparentUpgrade, + diamondUpgradeProposalId, + ethers.constants.HashZero + ); + + console.log(`Cancel upgrade with hash: ${proposalHash}`); + let cancelUpgradeCalldata = zkSync.interface.encodeFunctionData('cancelUpgradeProposal', [proposalHash]); + if (execute) { + await sendTransaction(cancelUpgradeCalldata, privateKey, l1rpc, zksyncAddress, environment, gasPrice, nonce); + } else { + console.log(`Cancel upgrade calldata: ${cancelUpgradeCalldata}`); + } +} + +async function getNewDiamondUpgradeProposalId(l1rpc: string, zksyncAddress: string) { + zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; + // We don't care about the wallet here, we just need to make a get call. + let wallet = getWallet(l1rpc, undefined); + let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); + let proposalId = await zkSync.getCurrentProposalId(); + proposalId = proposalId.add(1); + console.log( + `New proposal id: ${proposalId} for ${zksyncAddress} network: ${JSON.stringify( + await wallet.provider.getNetwork() + )}` + ); + return proposalId; +} + +export const command = new Command('transactions').description( + 'prepare the transactions and their calldata for the upgrade' +); + +command + .command('build-default') + .requiredOption('--upgrade-timestamp ') + .option('--upgrade-address ') + .option('--environment ') + .option('--new-allow-list ') + .option('--l2-upgrader-address ') + .option('--diamond-upgrade-proposal-id ') + .option('--l1rpc ') + .option('--zksync-address ') + .action(async (options) => { + let diamondUpgradeProposalId = options.diamondUpgradeProposalId; + if (!diamondUpgradeProposalId) { + diamondUpgradeProposalId = await getNewDiamondUpgradeProposalId(options.l1rpc, options.zksyncAddress); + } + + buildDefaultUpgradeTx( + options.environment, + diamondUpgradeProposalId, + options.upgradeAddress, + options.l2UpgraderAddress, + options.upgradeTimestamp, + options.newAllowList + ); + }); + +command + .command('propose-upgrade') + .option('--environment ') + .option('--private-key ') + .option('--zksync-address ') + .option('--gas-price ') + .option('--nonce ') + .option('--l1rpc ') + .action(async (options) => { + await proposeUpgrade( + options.privateKey, + options.l1rpc, + options.zksyncAddress, + options.environment, + options.gasPrice, + options.nonce + ); + }); + +command + .command('execute-upgrade') + .option('--environment ') + .option('--private-key ') + .option('--zksync-address ') + .option('--gas-price ') + .option('--nonce ') + .option('--l1rpc ') + .action(async (options) => { + await executeUpgrade( + options.privateKey, + options.l1rpc, + options.zksyncAddress, + options.environment, + options.gasPrice, + options.nonce + ); + }); + +command + .command('cancel-upgrade') + .option('--environment ') + .option('--private-key ') + .option('--zksync-address ') + .option('--gas-price ') + .option('--nonce ') + .option('--l1rpc ') + .option('--execute') + .action(async (options) => { + await cancelUpgrade( + options.privateKey, + options.l1rpc, + options.zksyncAddress, + options.environment, + options.gasPrice, + options.nonce, + options.execute + ); + }); diff --git a/infrastructure/protocol-upgrade/src/utils.ts b/infrastructure/protocol-upgrade/src/utils.ts new file mode 100644 index 000000000000..6f8c88c89956 --- /dev/null +++ b/infrastructure/protocol-upgrade/src/utils.ts @@ -0,0 +1,58 @@ +import fs from 'fs'; +import { BytesLike } from 'ethers'; + +export const DEFAULT_UPGRADE_PATH = process.env.ZKSYNC_HOME + '/etc/upgrades'; +export const DEFAULT_L2CONTRACTS_FOR_UPGRADE_PATH = process.env.ZKSYNC_HOME + '/contracts/zksync/contracts/upgrades'; +export const DEFAULT_L1CONTRACTS_FOR_UPGRADE_PATH = process.env.ZKSYNC_HOME + '/contracts/ethereum/contracts/upgrades'; + +export function getTimestampInSeconds() { + return Math.floor(Date.now() / 1000); +} + +export function getCryptoFileName(environment): string { + return getUpgradePath(environment) + '/crypto.json'; +} + +export function getFacetCutsFileName(environment): string { + return getUpgradePath(environment) + '/facetCuts.json'; +} + +export function getFacetsFileName(environment): string { + return getUpgradePath(environment) + '/facets.json'; +} + +export function getL2UpgradeFileName(environment): string { + return getUpgradePath(environment) + '/l2Upgrade.json'; +} + +export function getL2TransactionsFileName(environment): string { + return getUpgradePath(environment) + '/transactions.json'; +} + +export function getNameOfTheLastUpgrade(): string { + return fs.readdirSync(DEFAULT_UPGRADE_PATH).sort().reverse()[0]; +} + +export function getCommonDataFileName(): string { + return getCommonUpgradePath() + '/common.json'; +} + +export function getCommonUpgradePath(): string { + const currentUpgrade = getNameOfTheLastUpgrade(); + return `${DEFAULT_UPGRADE_PATH}/${currentUpgrade}/`; +} + +export function getUpgradePath(environment: string): string { + const upgradeEnvironment = environment ?? 'localhost'; + const path = `${getCommonUpgradePath()}/${upgradeEnvironment}`; + if (!fs.existsSync(path)) { + fs.mkdirSync(path, { recursive: true }); + } + return path; +} + +export interface VerifierParams { + recursionNodeLevelVkHash: BytesLike; + recursionLeafLevelVkHash: BytesLike; + recursionCircuitsSetVksHash: BytesLike; +} diff --git a/infrastructure/openzeppelin-tests-preparation/tsconfig.json b/infrastructure/protocol-upgrade/tsconfig.json similarity index 86% rename from infrastructure/openzeppelin-tests-preparation/tsconfig.json rename to infrastructure/protocol-upgrade/tsconfig.json index 25645d23a739..613eda0f6e14 100644 --- a/infrastructure/openzeppelin-tests-preparation/tsconfig.json +++ b/infrastructure/protocol-upgrade/tsconfig.json @@ -10,7 +10,4 @@ "preserveSymlinks": true, "preserveWatchOutput": true }, - "files": [ - "src/index.ts" - ] } diff --git a/infrastructure/reading-tool/README.md b/infrastructure/reading-tool/README.md deleted file mode 100644 index 334f7ea88070..000000000000 --- a/infrastructure/reading-tool/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Tool for reading test config - -An utility to read data from test configs for ts-tests and zksync.js. Currently, it reads data from 'etc/test_config/' -and 'etc/tokens/'. - -## Compile - -``` -yarn run build -``` diff --git a/infrastructure/reading-tool/package.json b/infrastructure/reading-tool/package.json deleted file mode 100644 index 7e4a59c483c1..000000000000 --- a/infrastructure/reading-tool/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "reading-tool", - "version": "1.0.0", - "license": "MIT", - "main": "build/index.js", - "private": true, - "devDependencies": { - "ts-node": "^10.1.0", - "typescript": "^4.3.5" - }, - "scripts": { - "build": "tsc", - "watch": "tsc --watch" - } -} diff --git a/infrastructure/reading-tool/src/index.ts b/infrastructure/reading-tool/src/index.ts deleted file mode 100644 index bbaa3ef5cd5d..000000000000 --- a/infrastructure/reading-tool/src/index.ts +++ /dev/null @@ -1,47 +0,0 @@ -import * as fs from 'fs'; - -function configPath(postfix: string) { - return `${process.env.ZKSYNC_HOME}/etc/test_config/${postfix}`; -} - -function loadConfig(path: string) { - return JSON.parse( - fs.readFileSync(path, { - encoding: 'utf-8' - }) - ); -} - -export function loadTestConfig(withWithdrawalHelpers: boolean) { - const ethConstantPath = configPath('constant/eth.json'); - const ethConfig = loadConfig(ethConstantPath); - - if (withWithdrawalHelpers) { - const withdrawalHelpersConfigPath = configPath('volatile/withdrawal-helpers.json'); - const withdrawalHelpersConfig = loadConfig(withdrawalHelpersConfigPath); - return { - eth: ethConfig, - withdrawalHelpers: withdrawalHelpersConfig - }; - } else { - return { - eth: ethConfig - }; - } -} - -export type Token = { - name: string; - symbol: string; - decimals: number; - address: string; -}; - -export function getTokens(network: string): Token[] { - const configPath = `${process.env.ZKSYNC_HOME}/etc/tokens/${network}.json`; - return JSON.parse( - fs.readFileSync(configPath, { - encoding: 'utf-8' - }) - ); -} diff --git a/infrastructure/reading-tool/tsconfig.json b/infrastructure/reading-tool/tsconfig.json deleted file mode 100644 index 2e0816945ae7..000000000000 --- a/infrastructure/reading-tool/tsconfig.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "compilerOptions": { - "module": "commonjs", - "target": "es6", - - "outDir": "./build", - "esModuleInterop": true, - "declaration": true, - - "preserveSymlinks": true, - "preserveWatchOutput": true - }, - "files": [ - "./src/index.ts" - ] -} diff --git a/infrastructure/zk/src/clean.ts b/infrastructure/zk/src/clean.ts index eef65ef15a01..9d2602bc082a 100644 --- a/infrastructure/zk/src/clean.ts +++ b/infrastructure/zk/src/clean.ts @@ -30,13 +30,14 @@ export const command = new Command('clean') } if (cmd.all || cmd.artifacts) { - clean(`artifacts`); + clean('core/tests/ts-integration/artifacts-zk'); + clean('core/tests/ts-integration/cache-zk'); } if (cmd.all || cmd.database) { const dbPaths = process.env.ZKSYNC_ENV?.startsWith('ext-node') ? [process.env.EN_MERKLE_TREE_PATH!] - : [process.env.DATABASE_STATE_KEEPER_DB_PATH!, process.env.DATABASE_NEW_MERKLE_TREE_SSD_PATH!]; + : [process.env.DATABASE_STATE_KEEPER_DB_PATH!, process.env.DATABASE_MERKLE_TREE_PATH!]; for (const dbPath of dbPaths) { clean(path.dirname(dbPath)); } diff --git a/infrastructure/zk/src/config.ts b/infrastructure/zk/src/config.ts index a7ddb3c9b2f2..efe8c7b9c4cc 100644 --- a/infrastructure/zk/src/config.ts +++ b/infrastructure/zk/src/config.ts @@ -26,7 +26,9 @@ const CONFIG_FILES = [ 'house_keeper.toml', 'fri_prover.toml', 'fri_witness_generator.toml', - 'fri_prover_group.toml' + 'fri_prover_group.toml', + 'proof_data_handler.toml', + 'fri_witness_vector_generator.toml' ]; function loadConfigFile(path: string) { @@ -112,7 +114,7 @@ export function compileConfig(environment?: string) { const outputFileName = `etc/env/${environment}.env`; fs.writeFileSync(outputFileName, outputFileContents); - console.log('Configs compiled'); + console.log(`Configs compiled for ${environment}`); } export const command = new Command('config').description('config management'); diff --git a/infrastructure/zk/src/contract.ts b/infrastructure/zk/src/contract.ts index 0bde0d5d9d36..c0c4d04d97fb 100644 --- a/infrastructure/zk/src/contract.ts +++ b/infrastructure/zk/src/contract.ts @@ -39,7 +39,7 @@ export async function initializeValidator(args: any[] = []) { const isLocalSetup = process.env.ZKSYNC_LOCAL_SETUP; const baseCommandL1 = isLocalSetup ? `yarn --cwd /contracts/ethereum` : `yarn l1-contracts`; - await utils.spawn(`${baseCommandL1} initialize-validator ${args.join(' ')} | tee initilizeValidator.log`); + await utils.spawn(`${baseCommandL1} initialize-validator ${args.join(' ')} | tee initializeValidator.log`); } export async function initializeL1AllowList(args: any[] = []) { @@ -48,10 +48,21 @@ export async function initializeL1AllowList(args: any[] = []) { const isLocalSetup = process.env.ZKSYNC_LOCAL_SETUP; const baseCommandL1 = isLocalSetup ? `yarn --cwd /contracts/ethereum` : `yarn l1-contracts`; - await utils.spawn(`${baseCommandL1} initialize-allow-list ${args.join(' ')} | tee initilizeL1AllowList.log`); + await utils.spawn(`${baseCommandL1} initialize-allow-list ${args.join(' ')} | tee initializeL1AllowList.log`); } -export async function deployL2(args: any[] = []) { +export async function initializeWethToken(args: any[] = []) { + await utils.confirmAction(); + + const isLocalSetup = process.env.ZKSYNC_LOCAL_SETUP; + const baseCommandL1 = isLocalSetup ? `yarn --cwd /contracts/ethereum` : `yarn l1-contracts`; + + await utils.spawn( + `${baseCommandL1} initialize-l2-weth-token instant-call ${args.join(' ')} | tee initializeWeth.log` + ); +} + +export async function deployL2(args: any[] = [], includePaymaster?: boolean) { await utils.confirmAction(); const isLocalSetup = process.env.ZKSYNC_LOCAL_SETUP; @@ -66,17 +77,21 @@ export async function deployL2(args: any[] = []) { await utils.spawn(`${baseCommandL1} initialize-bridges ${args.join(' ')} | tee deployL2.log`); - await utils.spawn(`${baseCommandL2} deploy-testnet-paymaster ${args.join(' ')} | tee -a deployL2.log`); + if (includePaymaster) { + await utils.spawn(`${baseCommandL2} deploy-testnet-paymaster ${args.join(' ')} | tee -a deployL2.log`); + } await utils.spawn(`${baseCommandL2} deploy-l2-weth ${args.join(' ')} | tee -a deployL2.log`); + await utils.spawn(`${baseCommandL2} deploy-force-deploy-upgrader ${args.join(' ')} | tee -a deployL2.log`); + const l2DeployLog = fs.readFileSync('deployL2.log').toString(); const l2DeploymentEnvVars = [ - 'CONTRACTS_L2_ETH_BRIDGE_ADDR', 'CONTRACTS_L2_ERC20_BRIDGE_ADDR', 'CONTRACTS_L2_TESTNET_PAYMASTER_ADDR', 'CONTRACTS_L2_WETH_TOKEN_IMPL_ADDR', - 'CONTRACTS_L2_WETH_TOKEN_PROXY_ADDR' + 'CONTRACTS_L2_WETH_TOKEN_PROXY_ADDR', + 'CONTRACTS_L2_DEFAULT_UPGRADE_ADDR' ]; updateContractsEnv(l2DeployLog, l2DeploymentEnvVars); @@ -85,8 +100,6 @@ export async function deployL2(args: any[] = []) { const l1DeployLog = fs.readFileSync('deployL1.log').toString(); const l1DeploymentEnvVars = ['CONTRACTS_L2_WETH_BRIDGE_ADDR']; updateContractsEnv(l1DeployLog, l1DeploymentEnvVars); - - await utils.spawn(`${baseCommandL1} initialize-l2-weth-token instant-call ${args.join(' ')} | tee -a deployL2.log`); } export async function deployL1(args: any[]) { @@ -99,8 +112,10 @@ export async function deployL1(args: any[]) { await utils.spawn(`${baseCommand} deploy-no-build ${args.join(' ')} | tee deployL1.log`); const deployLog = fs.readFileSync('deployL1.log').toString(); const envVars = [ + 'CONTRACTS_CREATE2_FACTORY_ADDR', 'CONTRACTS_DIAMOND_CUT_FACET_ADDR', 'CONTRACTS_DIAMOND_UPGRADE_INIT_ADDR', + 'CONTRACTS_DEFAULT_UPGRADE_ADDR', 'CONTRACTS_GOVERNANCE_FACET_ADDR', 'CONTRACTS_MAILBOX_FACET_ADDR', 'CONTRACTS_EXECUTOR_FACET_ADDR', @@ -114,7 +129,8 @@ export async function deployL1(args: any[]) { 'CONTRACTS_L1_ERC20_BRIDGE_IMPL_ADDR', 'CONTRACTS_L1_WETH_BRIDGE_IMPL_ADDR', 'CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR', - 'CONTRACTS_L1_ALLOW_LIST_ADDR' + 'CONTRACTS_L1_ALLOW_LIST_ADDR', + 'CONTRACTS_L1_MULTICALL3_ADDR' ]; const updatedContracts = updateContractsEnv(deployLog, envVars); @@ -128,6 +144,10 @@ export async function redeployL1(args: any[]) { await verifyL1Contracts(); } +export async function deployVerifier(args: any[]) { + await deployL1([...args, '--only-verifier']); +} + export const command = new Command('contract').description('contract management'); command @@ -137,9 +157,9 @@ command .action(redeployL1); command.command('deploy [deploy-opts...]').allowUnknownOption(true).description('deploy contracts').action(deployL1); command.command('build').description('build contracts').action(build); -command.command('initilize-validator').description('initialize validator').action(initializeValidator); +command.command('initialize-validator').description('initialize validator').action(initializeValidator); command - .command('initilize-l1-allow-list-contract') + .command('initialize-l1-allow-list-contract') .description('initialize L1 allow list contract') .action(initializeL1AllowList); command.command('verify').description('verify L1 contracts').action(verifyL1Contracts); diff --git a/infrastructure/zk/src/database.ts b/infrastructure/zk/src/database.ts index e450c0229022..f1e04e41bf23 100644 --- a/infrastructure/zk/src/database.ts +++ b/infrastructure/zk/src/database.ts @@ -48,7 +48,7 @@ export async function setup() { } await utils.spawn('cargo sqlx database create'); await utils.spawn('cargo sqlx migrate run'); - if (process.env.DATABASE_URL == localDbUrl) { + if (process.env.DATABASE_URL!.startsWith(localDbUrl)) { await utils.spawn('cargo sqlx prepare --check -- --tests || cargo sqlx prepare -- --tests'); } diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 91b308f7a922..db8e0fb5b6a4 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -13,7 +13,9 @@ const IMAGES = [ 'zk-environment', 'circuit-synthesizer', 'witness-generator', - 'prover-fri' + 'prover-fri', + 'prover-gpu-fri', + 'witness-vector-generator' ]; const UNIX_TIMESTAMP = Date.now(); @@ -65,7 +67,9 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'prover-v2', 'circuit-synthesizer', 'witness-generator', - 'prover-fri' + 'prover-fri', + 'prover-gpu-fri', + 'witness-vector-generator' ].includes(image) ? ['latest2.0', `2.0-${imageTagSha}`, `2.0-${imageTagShaTS}`] : [`latest2.0`]; diff --git a/infrastructure/zk/src/env.ts b/infrastructure/zk/src/env.ts index 963cf81c2026..cda73ddf6f5a 100644 --- a/infrastructure/zk/src/env.ts +++ b/infrastructure/zk/src/env.ts @@ -4,25 +4,27 @@ import dotenv from 'dotenv'; import * as utils from './utils'; import * as config from './config'; +export const getAvailableEnvsFromFiles = () => { + const envs = new Set(); + + fs.readdirSync(`etc/env`).forEach((file) => { + if (!file.startsWith('.') && (file.endsWith('.env') || file.endsWith('.toml'))) { + envs.add(file.replace(/\..*$/, '')); + } + }); + return envs; +}; export function get(print: boolean = false) { const current = `etc/env/.current`; const inCurrent = fs.existsSync(current) && fs.readFileSync(current).toString().trim(); + const currentEnv = (process.env.ZKSYNC_ENV = process.env.ZKSYNC_ENV || inCurrent || (process.env.IN_DOCKER ? 'docker' : 'dev')); - if (print) { - const envs = new Set(['dev', currentEnv]); - if (inCurrent) { - envs.add(inCurrent); - } - - fs.readdirSync(`etc/env`).forEach((file) => { - if (!file.startsWith('.') && (file.endsWith('.env') || file.endsWith('.toml'))) { - envs.add(file.replace(/\..*$/, '')); - } - }); + const envs = getAvailableEnvsFromFiles(); - envs.forEach((env) => { + if (print) { + [...envs].sort().forEach((env) => { if (env === currentEnv) { console.log(`* ${env}`); } else { @@ -127,6 +129,20 @@ export function modify(variable: string, assignedVariable: string) { reload(); } +// merges .init.env with current env file so all configs are in the same place +export function mergeInitToEnv() { + const env = dotenv.parse(fs.readFileSync(process.env.ENV_FILE!)); + const initEnv = dotenv.parse(fs.readFileSync('etc/env/.init.env')); + for (const initVar in initEnv) { + env[initVar] = initEnv[initVar]; + } + let output = ''; + for (const envVar in env) { + output += `${envVar}=${env[envVar]}\n`; + } + fs.writeFileSync(process.env.ENV_FILE!, output); +} + export const command = new Command('env') .arguments('[env_name]') .description('get or set zksync environment') diff --git a/infrastructure/zk/src/hyperchain_wizard.ts b/infrastructure/zk/src/hyperchain_wizard.ts new file mode 100644 index 000000000000..6c666b0a1195 --- /dev/null +++ b/infrastructure/zk/src/hyperchain_wizard.ts @@ -0,0 +1,440 @@ +import { Command } from 'commander'; +import enquirer from 'enquirer'; +import { BigNumber, ethers } from 'ethers'; +import chalk from 'chalk'; +import { announced, submoduleUpdate } from './init'; +import * as server from './server'; +import * as contract from './contract'; +import * as run from './run/run'; +import * as compiler from './compiler'; +import * as db from './database'; +import { clean } from './clean'; +import * as env from './env'; +import { compileConfig } from './config'; +import * as fs from 'fs'; + +const title = chalk.blueBright; +const warning = chalk.yellowBright; +const error = chalk.redBright; +const announce = chalk.yellow; + +// An init command that allows configuring and spinning up a new Hyperchain network +async function initHyperchain() { + await announced('Initializng Hyperchain creation', setupConfiguration()); + + await announced('Drop postgres db', db.drop()); + await announced('Setup postgres db', db.setup()); + await announced('Clean rocksdb', clean('db')); + await announced('Clean backups', clean('backups')); + await announced('Building L1 and L2 contracts', contract.build()); + + await announced('Deploy test tokens', initializeTestERC20s()); + await announced('Running server genesis setup', server.genesisFromSources()); + + const deployerPrivateKey = process.env.DEPLOYER_PRIVATE_KEY; + const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; + const governorAddress = process.env.GOVERNOR_ADDRESS; + + await announced( + 'Deploying L1 contracts', + contract.redeployL1(['--private-key', deployerPrivateKey, '--governor-address', governorAddress]) + ); + + await announced('Initializing validator', contract.initializeValidator(['--private-key', governorPrivateKey])); + await announced('Initialize L1 allow list', contract.initializeL1AllowList(['--private-key', governorPrivateKey])); + await announced('Deploying L2 contracts', contract.deployL2(['--private-key', deployerPrivateKey], false)); + + await announced('Initialize WETH Token', initializeWethTokenForHyperchain()); + + env.mergeInitToEnv(); + + console.log(announce(`\nYour Hyperchain configuration is available at ${process.env.ENV_FILE}\n`)); +} + +async function setupConfiguration() { + const CONFIGURE = 'Configure new chain'; + const USE_EXISTING = 'Use existing configuration'; + const questions = [ + { + message: 'Do you want to configure a new chain or use an existing configuration?', + name: 'config', + type: 'select', + choices: [CONFIGURE, USE_EXISTING] + } + ]; + + const results: any = await enquirer.prompt(questions); + + if (results.config === CONFIGURE) { + await announced('Setting Hyperchain metadata', setHyperchainMetadata()); + await announced('Validating information and balances to deploy Hyperchain', checkReadinessToDeploy()); + await announced('Checkout system-contracts submodule', submoduleUpdate()); + await announced('Compiling system contracts', compiler.compileSystemContracts()); + await announced('Compiling JS packages', run.yarn()); + await announced('Compile l2 contracts', compiler.compileAll()); + } else { + const envs = env.getAvailableEnvsFromFiles(); + + const envQuestions = [ + { + message: 'Which environment do you want to use?', + name: 'env', + type: 'select', + choices: [...envs].sort() + } + ]; + + const envResults: any = await enquirer.prompt(envQuestions); + env.set(envResults.env); + } +} + +async function setHyperchainMetadata() { + const BASE_NETWORKS = ['localhost', 'sepolia', 'goerli', 'mainnet']; + const GENERATE_KEYS = 'Generate keys'; + const INSERT_KEYS = 'Insert keys'; + const questions = [ + { + message: 'What is your hyperchain name?', + name: 'chainName', + type: 'input' + }, + { + message: 'What is your hyperchain id? Make sure this is not used by other chains.', + name: 'chainId', + type: 'input' + }, + { + message: 'To which L1 Network will your hyperchain rollup to?', + name: 'l1Chain', + type: 'select', + choices: BASE_NETWORKS + }, + { + message: 'What is the RPC url for the L1 Network?', + name: 'l1Rpc', + type: 'input' + }, + { + message: + 'Do you want to generate new addresses/keys for the Deployer, Governor and ETh Operator, or insert your own keys?', + name: 'generateKeys', + type: 'select', + choices: [GENERATE_KEYS, INSERT_KEYS] + } + ]; + + const results: any = await enquirer.prompt(questions); + + let deployer, governor, ethOperator, feeReceiver: ethers.Wallet | undefined; + let feeReceiverAddress; + + if (results.generateKeys === GENERATE_KEYS) { + deployer = ethers.Wallet.createRandom(); + governor = ethers.Wallet.createRandom(); + ethOperator = ethers.Wallet.createRandom(); + feeReceiver = ethers.Wallet.createRandom(); + feeReceiverAddress = feeReceiver.address; + } else { + const keyQuestions = [ + { + message: 'Private key of the L1 Deployer (the one that deploys the contracts)', + name: 'deployerKey', + type: 'password' + }, + { + message: 'Private key of the L1 Governor (the one that can upgrade the contracts)', + name: 'governorKey', + type: 'password' + }, + { + message: 'Private key of the L1 ETH Operator (the one that rolls up the batches)', + name: 'ethOperator', + type: 'password' + }, + { + message: 'Address of L2 fee receiver (the one that collects fees)', + name: 'feeReceiver', + type: 'input' + } + ]; + + const keyResults: any = await enquirer.prompt(keyQuestions); + + deployer = new ethers.Wallet(keyResults.deployerKey); + governor = new ethers.Wallet(keyResults.governorKey); + ethOperator = new ethers.Wallet(keyResults.ethOperator); + feeReceiver = undefined; + feeReceiverAddress = keyResults.feeReceiver; + } + + console.log('\n'); + + printAddressInfo('Deployer', deployer.address); + printAddressInfo('Governor', governor.address); + printAddressInfo('ETH Operator', ethOperator.address); + printAddressInfo('Fee receiver', feeReceiverAddress); + + console.log( + warning( + 'Private keys for these wallets are available in the .env file for you chain. Make sure that you have a copy in a safe place.\n' + ) + ); + + const verifyQuestions = [ + { + message: 'Do You want to verify your L1 contracts? You will need a etherscan API key for it.', + name: 'verify', + type: 'confirm' + } + ]; + + const verifyResults: any = await enquirer.prompt(verifyQuestions); + + if (verifyResults.verify) { + const etherscanQuestions = [ + { + message: 'Please provide your Etherscan API Key.', + name: 'etherscanKey', + type: 'input' + } + ]; + + const etherscanResults: any = await enquirer.prompt(etherscanQuestions); + + wrapEnvModify('MISC_ETHERSCAN_API_KEY', etherscanResults.etherscanKey); + } + + if (governor.address == deployer.address) { + throw Error(error('Governor and Deployer cannot be the same')); + } + + const environment = getEnv(results.chainName); + + await compileConfig(environment); + env.set(environment); + + const ethChainId = getL1Id(results.l1Chain); + + wrapEnvModify('ETH_CLIENT_CHAIN_ID', ethChainId.toString()); + wrapEnvModify('ETH_CLIENT_WEB3_URL', results.l1Rpc); + wrapEnvModify('CHAIN_ETH_NETWORK', results.l1Chain); + wrapEnvModify('CHAIN_ETH_ZKSYNC_NETWORK', results.chainName); + wrapEnvModify('CHAIN_ETH_ZKSYNC_NETWORK_ID', results.chainId); + wrapEnvModify('ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY', ethOperator.privateKey); + wrapEnvModify('ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR', ethOperator.address); + wrapEnvModify('DEPLOYER_PRIVATE_KEY', deployer.privateKey); + wrapEnvModify('GOVERNOR_PRIVATE_KEY', governor.privateKey); + wrapEnvModify('GOVERNOR_ADDRESS', governor.address); + wrapEnvModify('CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR', feeReceiverAddress); + if (feeReceiver) { + wrapEnvModify('FEE_RECEIVER_PRIVATE_KEY', feeReceiver.privateKey); + } + + // For now force delay to 20 seconds to ensure batch execution doesn't not happen in same block as batch proving + // This bug will be fixed on the smart contract soon + wrapEnvModify('CONTRACTS_VALIDATOR_TIMELOCK_EXECUTION_DELAY', '0'); + wrapEnvModify('ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS', '20'); + + env.load(); +} + +function printAddressInfo(name: string, address: string) { + console.log(title(name)); + console.log(`Address - ${address}`); + console.log(''); +} + +async function initializeTestERC20s() { + const questions = [ + { + message: 'Do you want to deploy some test ERC20s to your Hyperchain (only use on testing scenarios)?', + name: 'deployERC20s', + type: 'confirm' + } + ]; + + const results: any = await enquirer.prompt(questions); + + if (results.deployERC20s) { + const privateKey = process.env.DEPLOYER_PRIVATE_KEY; + await announced( + 'Deploying localhost ERC20 tokens', + run.deployERC20('dev', '', '', '', ['--private-key', privateKey, '--envFile', process.env.ZKSYNC_ENV!]) + ); + console.log( + warning( + `The addresses for the tokens can be found on the /etc/tokens/${getEnv( + process.env.CHAIN_ETH_NETWORK! + )}.json file. The deployer address is the owner of the token contracts.` + ) + ); + } +} + +async function initializeWethTokenForHyperchain() { + const questions = [ + { + message: 'Do you want to deploy a Wrapped ETH Bridge?', + name: 'deployWeth', + type: 'confirm' + } + ]; + + const results: any = await enquirer.prompt(questions); + + if (results.deployWeth) { + const tokens = getTokens(process.env.ZKSYNC_ENV!); + + let baseWethToken = tokens.find((token: { symbol: string }) => token.symbol == 'WETH')?.address; + + if (!baseWethToken) { + const wethQuestions = [ + { + message: 'What is the address of the Wrapped ETH on the base chain?', + name: 'l1Weth', + type: 'input' + } + ]; + + const wethResults: any = await enquirer.prompt(wethQuestions); + + baseWethToken = wethResults.l1Weth; + + if (fs.existsSync(`/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`)) { + tokens.push({ + name: 'Wrapped Ether', + symbol: 'WETH', + decimals: 18, + address: baseWethToken! + }); + fs.writeFileSync( + `/etc/tokens/${getEnv(process.env.ZKSYNC_ENV!)}.json`, + JSON.stringify(tokens, null, 4) + ); + } + } + + wrapEnvModify('CONTRACTS_L1_WETH_TOKEN_ADDR', baseWethToken!); + + const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; + + await announced( + 'Initializing L2 WETH token', + contract.initializeWethToken(['--private-key', governorPrivateKey]) + ); + } +} + +// The current env.modify requires to write down the variable name twice. This wraps it so the caller only writes the name and the value +function wrapEnvModify(variable: string, assignedVariable: string) { + env.modify(variable, `${variable}=${assignedVariable}`); +} + +// Make sure all env information is available and wallets are funded +async function checkReadinessToDeploy() { + const provider = new ethers.providers.JsonRpcProvider(process.env.ETH_CLIENT_WEB3_URL!); + + const deployer = new ethers.Wallet(process.env.DEPLOYER_PRIVATE_KEY!, provider); + const governor = new ethers.Wallet(process.env.GOVERNOR_PRIVATE_KEY!, provider); + const ethOperator = new ethers.Wallet(process.env.ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY!, provider); + + async function checkAllWalletBalances(): Promise { + console.log('Checking balances...'); + const checkPromises = []; + checkPromises.push(checkBalance(deployer, ethers.utils.parseEther('0.3'))); + checkPromises.push(checkBalance(governor, ethers.utils.parseEther('0.1'))); + checkPromises.push(checkBalance(ethOperator, ethers.utils.parseEther('0.5'))); + const results = await Promise.all(checkPromises); + return results.every((result) => result); + } + + while (true) { + if (await checkAllWalletBalances()) { + return; + } + + const TRY_AGAIN = 'Try again'; + const EXIT = "I'll restart later"; + + const fundQuestions = [ + { + message: + 'Please fund the wallets so that they have enough balance (Deployer - 0.3ETH, Governor - 0.1ETH, and ETH Operator - 0.5ETH)?', + name: 'fund', + type: 'select', + choices: [TRY_AGAIN, EXIT] + } + ]; + + const fundResults: any = await enquirer.prompt(fundQuestions); + + if (fundResults.fund === EXIT) { + console.log('Exiting Hyperchain initializer.'); + process.exit(0); + } + } +} + +async function checkBalance(wallet: ethers.Wallet, expectedBalance: BigNumber): Promise { + const balance = await wallet.getBalance(); + if (balance.lt(expectedBalance)) { + console.log( + `Wallet ${ + wallet.address + } has insufficient funds. Expected ${expectedBalance.toString()}, got ${balance.toString()}` + ); + return false; + } + return true; +} + +function getL1Id(chainName: string) { + switch (chainName) { + case 'localhost': + return 9; + case 'sepolia': + return 11155111; + case 'goerli': + return 5; + case 'mainnet': + return 1; + default: + throw Error('Unknown base layer chain'); + } +} + +function getEnv(chainName: string) { + return String(chainName) + .normalize('NFKD') // split accented characters into their base characters and diacritical marks + .replace(/[\u0300-\u036f]/g, '') // remove all the accents, which happen to be all in the \u03xx UNICODE block. + .trim() // trim leading or trailing whitespace + .toLowerCase() // convert to lowercase + .replace(/[^a-z0-9 -]/g, '') // remove non-alphanumeric characters + .replace(/\s+/g, '-') // replace spaces with hyphens + .replace(/-+/g, '-'); // remove consecutive hyphens +} + +type L1Token = { + name: string; + symbol: string; + decimals: number; + address: string; +}; + +export function getTokens(network: string): L1Token[] { + const configPath = `${process.env.ZKSYNC_HOME}/etc/tokens/${network}.json`; + try { + return JSON.parse( + fs.readFileSync(configPath, { + encoding: 'utf-8' + }) + ); + } catch (e) { + return []; + } +} + +export const initHyperchainCommand = new Command('initHyperchain') + .description('Initializes a new hyperchain network') + .action(initHyperchain); diff --git a/infrastructure/zk/src/index.ts b/infrastructure/zk/src/index.ts index 805add4e45c1..3772b6699b3f 100644 --- a/infrastructure/zk/src/index.ts +++ b/infrastructure/zk/src/index.ts @@ -8,6 +8,7 @@ import { command as up } from './up'; import { command as down } from './down'; import { command as contract } from './contract'; import { initCommand as init, reinitCommand as reinit, lightweightInitCommand as lightweightInit } from './init'; +import { initHyperchainCommand as initHyperchain } from './hyperchain_wizard'; import { command as run } from './run/run'; import { command as test } from './test/test'; import { command as docker } from './docker'; @@ -31,6 +32,7 @@ const COMMANDS = [ init, reinit, lightweightInit, + initHyperchain, run, test, fmt, diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index 0580e022dd5a..376147f923bf 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -38,11 +38,14 @@ export async function init(skipSubmodulesCheckout: boolean) { await announced('Clean backups', clean('backups')); await announced('Building contracts', contract.build()); await announced('Deploying localhost ERC20 tokens', run.deployERC20('dev')); + await announced('Deploying L1 verifier', contract.deployVerifier([])); + await announced('Reloading env', env.reload()); await announced('Running server genesis setup', server.genesisFromSources()); await announced('Deploying L1 contracts', contract.redeployL1([])); await announced('Initializing validator', contract.initializeValidator()); await announced('Initialize L1 allow list', contract.initializeL1AllowList()); - await announced('Deploying L2 contracts', contract.deployL2()); + await announced('Deploying L2 contracts', contract.deployL2([], true)); + await announced('Initializing L2 WETH token', contract.initializeWethToken()); } // A smaller version of `init` that "resets" the localhost environment, for which `init` was already called before. @@ -56,26 +59,32 @@ export async function reinit() { await announced('Clean rocksdb', clean('db')); await announced('Clean backups', clean('backups')); await announced('Building contracts', contract.build()); + await announced('Deploying L1 verifier', contract.deployVerifier([])); + await announced('Reloading env', env.reload()); await announced('Running server genesis setup', server.genesisFromSources()); await announced('Deploying L1 contracts', contract.redeployL1([])); await announced('Initializing validator', contract.initializeValidator()); await announced('Initializing L1 Allow list', contract.initializeL1AllowList()); - await announced('Deploying L2 contracts', contract.deployL2()); + await announced('Deploying L2 contracts', contract.deployL2([], true)); + await announced('Initializing L2 WETH token', contract.initializeWethToken()); } // A lightweight version of `init` that sets up local databases, generates genesis and deploys precompiled contracts export async function lightweightInit() { await announced('Clean rocksdb', clean('db')); await announced('Clean backups', clean('backups')); + await announced('Deploying L1 verifier', contract.deployVerifier([])); + await announced('Reloading env', env.reload()); await announced('Running server genesis setup', server.genesisFromBinary()); await announced('Deploying L1 contracts', contract.redeployL1([])); await announced('Initializing validator', contract.initializeValidator()); await announced('Initializing L1 Allow list', contract.initializeL1AllowList()); - await announced('Deploying L2 contracts', contract.deployL2()); + await announced('Deploying L2 contracts', contract.deployL2([], true)); + await announced('Initializing L2 WETH token', contract.initializeWethToken()); } // Wrapper that writes an announcement and completion notes for each executed task. -async function announced(fn: string, promise: Promise | void) { +export async function announced(fn: string, promise: Promise | void) { const announceLine = `${entry('>')} ${announce(fn)}`; const separator = '-'.repeat(fn.length + 2); // 2 is the length of "> ". console.log(`\n` + separator); // So it's easier to see each individual step in the console. @@ -96,7 +105,7 @@ function createVolumes() { fs.mkdirSync(`${process.env.ZKSYNC_HOME}/volumes/postgres`, { recursive: true }); } -async function submoduleUpdate() { +export async function submoduleUpdate() { await utils.exec('git submodule init'); await utils.exec('git submodule update'); } diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts index 70ffccd8cc29..e895fb23a02c 100644 --- a/infrastructure/zk/src/run/run.ts +++ b/infrastructure/zk/src/run/run.ts @@ -7,8 +7,19 @@ import * as dataRestore from './data-restore'; export { dataRestore }; -export async function deployERC20(command: 'dev' | 'new', name?: string, symbol?: string, decimals?: string) { +export async function deployERC20( + command: 'dev' | 'new', + name?: string, + symbol?: string, + decimals?: string, + args: any = [] +) { if (command == 'dev') { + let destinationFile = 'localhost'; + if (args.includes('--envFile')) { + destinationFile = args[args.indexOf('--envFile') + 1]; + args.splice(args.indexOf('--envFile'), 2); + } await utils.spawn(`yarn --silent --cwd contracts/ethereum deploy-erc20 add-multi ' [ { "name": "DAI", "symbol": "DAI", "decimals": 18 }, @@ -27,7 +38,7 @@ export async function deployERC20(command: 'dev' | 'new', name?: string, symbol? { "name": "GNTL", "symbol": "GNTW", "decimals": 18 }, { "name": "MLTTL", "symbol": "MLTTW", "decimals": 18 }, { "name": "Wrapped Ether", "symbol": "WETH", "decimals": 18, "implementation": "WETH9"} - ]' > ./etc/tokens/localhost.json`); + ]' ${args.join(' ')} > ./etc/tokens/${destinationFile}.json`); } else if (command == 'new') { await utils.spawn( `yarn --silent --cwd contracts/ethereum deploy-erc20 add --token-name ${name} --symbol ${symbol} --decimals ${decimals}` @@ -69,10 +80,6 @@ export async function revertReason(txHash: string, web3url?: string) { await utils.spawn(`yarn l1-contracts ts-node scripts/revert-reason.ts ${txHash} ${web3url || ''}`); } -export async function explorer() { - await utils.spawn('yarn explorer serve'); -} - export async function exitProof(...args: string[]) { await utils.spawn(`cargo run --example generate_exit_proof --release -- ${args.join(' ')}`); } @@ -128,7 +135,6 @@ export async function cross_en_checker() { export const command = new Command('run').description('run miscellaneous applications').addCommand(dataRestore.command); command.command('test-accounts').description('print ethereum test accounts').action(testAccounts); -command.command('explorer').description('run zksync explorer locally').action(explorer); command.command('yarn').description('install all JS dependencies').action(yarn); command.command('cat-logs [exit_code]').description('print server and prover logs').action(catLogs); diff --git a/infrastructure/zk/src/server.ts b/infrastructure/zk/src/server.ts index b7114fde28df..70674bda03c7 100644 --- a/infrastructure/zk/src/server.ts +++ b/infrastructure/zk/src/server.ts @@ -7,10 +7,10 @@ import { unloadInit } from './env'; import * as path from 'path'; import * as db from './database'; -export async function server(rebuildTree: boolean, openzeppelinTests: boolean, components?: string) { +export async function server(rebuildTree: boolean, uring: boolean, components?: string) { let options = ''; - if (openzeppelinTests) { - options += '--features=openzeppelin_tests'; + if (uring) { + options += '--features=rocksdb/io-uring'; } if (rebuildTree || components) { options += ' --'; @@ -122,13 +122,13 @@ export const serverCommand = new Command('server') .description('start zksync server') .option('--genesis', 'generate genesis data via server') .option('--rebuild-tree', 'rebuilds merkle tree from database logs', 'rebuild_tree') - .option('--openzeppelin-tests', `enables 'openzeppelin_tests' feature`) + .option('--uring', 'enables uring support for RocksDB') .option('--components ', 'comma-separated list of components to run') .action(async (cmd: Command) => { if (cmd.genesis) { await genesisFromSources(); } else { - await server(cmd.rebuildTree, cmd.openzeppelinTests, cmd.components); + await server(cmd.rebuildTree, cmd.uring, cmd.components); } }); diff --git a/infrastructure/zk/src/test/integration.ts b/infrastructure/zk/src/test/integration.ts index 238c412ceec9..cecc6fb49d80 100644 --- a/infrastructure/zk/src/test/integration.ts +++ b/infrastructure/zk/src/test/integration.ts @@ -19,6 +19,11 @@ export async function api(bail: boolean = false) { await utils.spawn('yarn ts-integration api-test' + flag); } +export async function contractVerification(bail: boolean = false) { + const flag = bail ? ' --bail' : ''; + await utils.spawn('yarn ts-integration contract-verification-test' + flag); +} + export async function server(options: string[] = []) { if (process.env.ZKSYNC_ENV?.startsWith('ext-node')) { process.env.ZKSYNC_WEB3_API_URL = `http://127.0.0.1:${process.env.EN_HTTP_PORT}`; @@ -161,6 +166,14 @@ command await api(cmd.bail); }); +command + .command('contract-verification') + .description('run contract verification tests') + .option('--bail') + .action(async (cmd: Command) => { + await contractVerification(cmd.bail); + }); + command .command('testkit [options...]') .allowUnknownOption(true) diff --git a/infrastructure/zk/src/test/test.ts b/infrastructure/zk/src/test/test.ts index 821a59714448..4017bda32459 100644 --- a/infrastructure/zk/src/test/test.ts +++ b/infrastructure/zk/src/test/test.ts @@ -26,23 +26,11 @@ export async function rust(options: string[]) { await utils.spawn(cmd); } -export async function openzeppelin() { - process.chdir(`${process.env.ZKSYNC_HOME}/etc/openzeppelin-contracts`); - await utils.spawn('yarn'); - process.chdir(`${process.env.ZKSYNC_HOME}/infrastructure/openzeppelin-tests-preparation`); - await utils.spawn('yarn && yarn start'); - process.chdir(`${process.env.ZKSYNC_HOME}/etc/openzeppelin-contracts`); - await utils.spawn('yarn test'); - - process.chdir(process.env.ZKSYNC_HOME as string); -} - export const command = new Command('test').description('run test suites').addCommand(integration.command); command.command('js').description('run unit-tests for javascript packages').action(js); command.command('prover').description('run unit-tests for the prover').action(prover); command.command('l1-contracts').description('run unit-tests for the layer 1 smart contracts').action(l1Contracts); -command.command('openzeppelin').description(`run openzeppelin contracts' tests`).action(openzeppelin); command .command('rust [command...]') .allowUnknownOption() diff --git a/infrastructure/zk/src/utils.ts b/infrastructure/zk/src/utils.ts index 27c9a92f25ea..47f843fa3bad 100644 --- a/infrastructure/zk/src/utils.ts +++ b/infrastructure/zk/src/utils.ts @@ -21,7 +21,6 @@ const IGNORED_DIRS = [ 'typechain', 'binaryen', 'system-contracts', - 'openzeppelin-contracts', 'artifacts-zk', 'cache-zk' ]; diff --git a/package.json b/package.json index c4fdf76dcb19..87c50527f021 100644 --- a/package.json +++ b/package.json @@ -11,12 +11,11 @@ "etc/contracts-test-data", "etc/ERC20", "infrastructure/zk", - "infrastructure/reading-tool", "infrastructure/local-setup-preparation", - "infrastructure/openzeppelin-tests-preparation", "core/tests/revert-test", "core/tests/upgrade-test", - "core/tests/ts-integration" + "core/tests/ts-integration", + "infrastructure/protocol-upgrade" ], "nohoist": [ "**/@types/jest", @@ -25,7 +24,6 @@ }, "scripts": { "build:zksync-sdk-web3": "yarn web3 build", - "build:reading-tool": "yarn reading-tool build", "web3": "yarn workspace zksync-web3", "local-prep": "yarn workspace local-setup-preparation", "l1-contracts": "yarn workspace l1-zksync-contracts", @@ -34,7 +32,6 @@ "upgrade-test": "yarn workspace upgrade-test", "ts-integration": "yarn workspace ts-integration", "zk": "yarn workspace zk", - "reading-tool": "yarn workspace reading-tool", "init-build": "yarn npm-run-all --parallel build:*" }, "devDependencies": { diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md new file mode 100644 index 000000000000..00d80d30a75e --- /dev/null +++ b/prover/CHANGELOG.md @@ -0,0 +1,82 @@ +# Changelog + +## [5.30.1](https://github.com/matter-labs/zksync-2-dev/compare/prover-v5.30.0...prover-v5.30.1) (2023-08-11) + + +### Bug Fixes + +* **prover-fri:** GPU proving use init_cs_for_external_proving with hints ([#2347](https://github.com/matter-labs/zksync-2-dev/issues/2347)) ([ca6866c](https://github.com/matter-labs/zksync-2-dev/commit/ca6866c6ef68a25e650ce8e5a4e97c42e064f292)) + +## [5.30.0](https://github.com/matter-labs/zksync-2-dev/compare/prover-v5.29.0...prover-v5.30.0) (2023-08-09) + + +### Features + +* **db:** Configure statement timeout for Postgres ([#2317](https://github.com/matter-labs/zksync-2-dev/issues/2317)) ([afdbb6b](https://github.com/matter-labs/zksync-2-dev/commit/afdbb6b94d9e43b9659ff5d3428f2d9a7827b29f)) +* **house-keeper:** refactor periodic job to be reusable by adding in lib ([#2333](https://github.com/matter-labs/zksync-2-dev/issues/2333)) ([ad72a16](https://github.com/matter-labs/zksync-2-dev/commit/ad72a1691b661b2b4eeaefd29375a8987b485715)) +* **prover-fri:** Add concurrent circuit synthesis for FRI GPU prover ([#2326](https://github.com/matter-labs/zksync-2-dev/issues/2326)) ([aef3491](https://github.com/matter-labs/zksync-2-dev/commit/aef3491cd6af01840dd4fe5b7e530028916ffa8f)) + + +### Bug Fixes + +* **prover:** Kill prover process for edge-case in crypto thread code ([#2334](https://github.com/matter-labs/zksync-2-dev/issues/2334)) ([f2b5e1a](https://github.com/matter-labs/zksync-2-dev/commit/f2b5e1a2fcbe3053e372f15992e592bc0c32a88f)) + +## [5.29.0](https://github.com/matter-labs/zksync-2-dev/compare/prover-v5.28.2...prover-v5.29.0) (2023-08-07) + + +### Features + +* **prover-fri-gpu:** use witness vector for proving ([#2310](https://github.com/matter-labs/zksync-2-dev/issues/2310)) ([0f9d5ee](https://github.com/matter-labs/zksync-2-dev/commit/0f9d5eea9b053abcc380c74a61d24031f9f79563)) + +## [5.28.2](https://github.com/matter-labs/zksync-2-dev/compare/prover-v5.28.1...prover-v5.28.2) (2023-08-04) + + +### Bug Fixes + +* **doc:** update vk setup data generator doc ([#2322](https://github.com/matter-labs/zksync-2-dev/issues/2322)) ([bda18b0](https://github.com/matter-labs/zksync-2-dev/commit/bda18b0670827e7a190146147724cb1a754c55e9)) + +## [5.28.1](https://github.com/matter-labs/zksync-2-dev/compare/prover-v5.28.0...prover-v5.28.1) (2023-08-04) + + +### Bug Fixes + +* **docs:** Add doc for FRI prover and update doc for vk-setup data ([#2311](https://github.com/matter-labs/zksync-2-dev/issues/2311)) ([5e3b706](https://github.com/matter-labs/zksync-2-dev/commit/5e3b7069cb53bdd229c44014533742afa26bd247)) + +## [5.28.0](https://github.com/matter-labs/zksync-2-dev/compare/prover-v5.27.0...prover-v5.28.0) (2023-08-04) + + +### Features + +* **crypto-update:** update crypto deps zkevm_circuits + boojum to fix Main VM failures ([#2255](https://github.com/matter-labs/zksync-2-dev/issues/2255)) ([d0f2f87](https://github.com/matter-labs/zksync-2-dev/commit/d0f2f876e3c477b0eccf9646a89ca2b0f9855736)) +* **prover-fri:** Added vk commitment generator in CI ([#2265](https://github.com/matter-labs/zksync-2-dev/issues/2265)) ([8ad75e0](https://github.com/matter-labs/zksync-2-dev/commit/8ad75e04b0a49dee34c6fa7e3b81a21392afa186)) +* **prover-fri:** Integrate GPU proving ([#2269](https://github.com/matter-labs/zksync-2-dev/issues/2269)) ([1c6ed33](https://github.com/matter-labs/zksync-2-dev/commit/1c6ed33781553f989ed73dd2ca6547040066a940)) +* **prover-server-split:** Enable prover UT in CI ([#2253](https://github.com/matter-labs/zksync-2-dev/issues/2253)) ([79df2a1](https://github.com/matter-labs/zksync-2-dev/commit/79df2a1a147b00fc394be315bc9a3b4cb1fe7bea)) +* **prover-server-split:** unify cargo.lock for prover component ([#2248](https://github.com/matter-labs/zksync-2-dev/issues/2248)) ([0393463](https://github.com/matter-labs/zksync-2-dev/commit/0393463b15ac98a1bbf0156198e0d27d3aa92412)) +* **setup-data-fri:** use improved method for GpuSetup data ([#2305](https://github.com/matter-labs/zksync-2-dev/issues/2305)) ([997efed](https://github.com/matter-labs/zksync-2-dev/commit/997efedc3eb6655d58a2ca7e52fe38badeada518)) +* Update RockDB bindings ([#2208](https://github.com/matter-labs/zksync-2-dev/issues/2208)) ([211f548](https://github.com/matter-labs/zksync-2-dev/commit/211f548fa9945b7ed5328026e526cd72c09f6a94)) +* **vk-setup-data-fri:** expose GPU setup & seggegate GPU setup loading based on feature flag ([#2271](https://github.com/matter-labs/zksync-2-dev/issues/2271)) ([bfcab21](https://github.com/matter-labs/zksync-2-dev/commit/bfcab21c8656ce9c6524aef119a01b5013404cac)) + + +### Bug Fixes + +* **api:** Fix bytes deserialization by bumping web3 crate version ([#2240](https://github.com/matter-labs/zksync-2-dev/issues/2240)) ([59ef24a](https://github.com/matter-labs/zksync-2-dev/commit/59ef24afa6ceddf506a9ac7c4b1e9fc292311095)) +* **prover:** Panics in `send_report` will make provers crash ([#2273](https://github.com/matter-labs/zksync-2-dev/issues/2273)) ([85974d3](https://github.com/matter-labs/zksync-2-dev/commit/85974d3f9482307e0dbad0ec179e80886dafa42e)) + +## [5.25.0](https://github.com/matter-labs/zksync-2-dev/compare/prover-v5.24.0...prover-v5.25.0) (2023-08-02) + + +### Features + +* **crypto-update:** update crypto deps zkevm_circuits + boojum to fix Main VM failures ([#2255](https://github.com/matter-labs/zksync-2-dev/issues/2255)) ([d0f2f87](https://github.com/matter-labs/zksync-2-dev/commit/d0f2f876e3c477b0eccf9646a89ca2b0f9855736)) +* **prover-fri:** Added vk commitment generator in CI ([#2265](https://github.com/matter-labs/zksync-2-dev/issues/2265)) ([8ad75e0](https://github.com/matter-labs/zksync-2-dev/commit/8ad75e04b0a49dee34c6fa7e3b81a21392afa186)) +* **prover-fri:** Integrate GPU proving ([#2269](https://github.com/matter-labs/zksync-2-dev/issues/2269)) ([1c6ed33](https://github.com/matter-labs/zksync-2-dev/commit/1c6ed33781553f989ed73dd2ca6547040066a940)) +* **prover-server-split:** Enable prover UT in CI ([#2253](https://github.com/matter-labs/zksync-2-dev/issues/2253)) ([79df2a1](https://github.com/matter-labs/zksync-2-dev/commit/79df2a1a147b00fc394be315bc9a3b4cb1fe7bea)) +* **prover-server-split:** unify cargo.lock for prover component ([#2248](https://github.com/matter-labs/zksync-2-dev/issues/2248)) ([0393463](https://github.com/matter-labs/zksync-2-dev/commit/0393463b15ac98a1bbf0156198e0d27d3aa92412)) +* Update RockDB bindings ([#2208](https://github.com/matter-labs/zksync-2-dev/issues/2208)) ([211f548](https://github.com/matter-labs/zksync-2-dev/commit/211f548fa9945b7ed5328026e526cd72c09f6a94)) +* **vk-setup-data-fri:** expose GPU setup & seggegate GPU setup loading based on feature flag ([#2271](https://github.com/matter-labs/zksync-2-dev/issues/2271)) ([bfcab21](https://github.com/matter-labs/zksync-2-dev/commit/bfcab21c8656ce9c6524aef119a01b5013404cac)) + + +### Bug Fixes + +* **api:** Fix bytes deserialization by bumping web3 crate version ([#2240](https://github.com/matter-labs/zksync-2-dev/issues/2240)) ([59ef24a](https://github.com/matter-labs/zksync-2-dev/commit/59ef24afa6ceddf506a9ac7c4b1e9fc292311095)) +* **prover:** Panics in `send_report` will make provers crash ([#2273](https://github.com/matter-labs/zksync-2-dev/issues/2273)) ([85974d3](https://github.com/matter-labs/zksync-2-dev/commit/85974d3f9482307e0dbad0ec179e80886dafa42e)) diff --git a/prover/Cargo.toml b/prover/Cargo.toml new file mode 100644 index 000000000000..0e1b27fd334d --- /dev/null +++ b/prover/Cargo.toml @@ -0,0 +1,22 @@ +[workspace] +members = [ + # lib + "prover_fri_utils", + "prover_fri_types", + + # binaries + "prover", + "circuit_synthesizer", + "setup_key_generator_and_server", + "witness_generator", + "vk_setup_data_generator_server_fri", + "prover_fri", + "witness_vector_generator" +] + +resolver = "2" + +# for `perf` profiling +[profile.perf] +inherits = "release" +debug = true diff --git a/prover/circuit_synthesizer/Cargo.toml b/prover/circuit_synthesizer/Cargo.toml new file mode 100644 index 000000000000..d82909fda3e7 --- /dev/null +++ b/prover/circuit_synthesizer/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "zksync_circuit_synthesizer" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "zksync_circuit_synthesizer" +path = "src/main.rs" + +[dependencies] +zksync_dal = { path = "../../core/lib/dal", version = "1.0" } +zksync_types = { path = "../../core/lib/types", version = "1.0" } +zksync_queued_job_processor = { path = "../../core/lib/queued_job_processor", version = "1.0" } +zksync_config = { path = "../../core/lib/config", version = "1.0" } +zksync_object_store = { path = "../../core/lib/object_store", version = "1.0" } +zksync_utils = { path = "../../core/lib/utils", version = "1.0" } +zksync_prover_fri_utils = { path = "../prover_fri_utils", version = "1.0" } +vlog = { path = "../../core/lib/vlog", version = "1.0" } +prometheus_exporter = { path = "../../core/lib/prometheus_exporter", version = "1.0" } +zksync_prover_utils = { path = "../../core/lib/prover_utils", version = "1.0" } +zksync_verification_key_generator_and_server = { path = "../../core/bin/verification_key_generator_and_server", version = "1.0" } + +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3"} + +prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["legacy"], default-features=false} + +structopt = "0.3.26" +tokio = { version = "1.23.0", features = ["full"] } +futures = "0.3" +ctrlc = { version = "3.1", features = ["termination"] } +local-ip-address = "0.5.0" +metrics = "0.20" diff --git a/core/bin/circuit_synthesizer/rust-toolchain.toml b/prover/circuit_synthesizer/rust-toolchain.toml similarity index 100% rename from core/bin/circuit_synthesizer/rust-toolchain.toml rename to prover/circuit_synthesizer/rust-toolchain.toml diff --git a/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs b/prover/circuit_synthesizer/src/circuit_synthesizer.rs similarity index 82% rename from core/bin/circuit_synthesizer/src/circuit_synthesizer.rs rename to prover/circuit_synthesizer/src/circuit_synthesizer.rs index 5f6842592e2d..2ea348282d9f 100644 --- a/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs +++ b/prover/circuit_synthesizer/src/circuit_synthesizer.rs @@ -1,8 +1,3 @@ -use std::io::copy; -use std::io::ErrorKind; -use std::io::Read; -use std::net::SocketAddr; -use std::net::TcpStream; use std::option::Option; use std::time::Duration; use std::time::Instant; @@ -20,12 +15,13 @@ use zkevm_test_harness::witness::oracle::VmWitnessOracle; use zksync_config::configs::prover_group::ProverGroupConfig; use zksync_config::configs::CircuitSynthesizerConfig; use zksync_config::ProverConfigs; -use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; use zksync_dal::ConnectionPool; use zksync_object_store::{CircuitKey, ObjectStore, ObjectStoreError, ObjectStoreFactory}; +use zksync_prover_fri_utils::socket_utils::send_assembly; use zksync_prover_utils::numeric_index_to_circuit_name; use zksync_prover_utils::region_fetcher::{get_region, get_zone}; use zksync_queued_job_processor::{async_trait, JobProcessor}; +use zksync_types::{protocol_version::L1VerifierConfig, proofs::{GpuProverInstanceStatus, SocketAddress}}; #[derive(Debug)] pub enum CircuitSynthesizerError { @@ -40,6 +36,7 @@ pub struct CircuitSynthesizer { allowed_circuit_types: Option>, region: String, zone: String, + vk_commitments: L1VerifierConfig, prover_connection_pool: ConnectionPool, } @@ -48,6 +45,7 @@ impl CircuitSynthesizer { config: CircuitSynthesizerConfig, prover_groups: ProverGroupConfig, store_factory: &ObjectStoreFactory, + vk_commitments: L1VerifierConfig, prover_connection_pool: ConnectionPool, ) -> Result { let is_specialized = prover_groups.is_specialized_group_id(config.prover_group_id); @@ -81,6 +79,7 @@ impl CircuitSynthesizer { .map(|x| x.into_iter().map(|x| x.1).collect()), region: get_region().await, zone: get_zone().await, + vk_commitments, prover_connection_pool, }) } @@ -124,16 +123,20 @@ impl JobProcessor for CircuitSynthesizer { "Attempting to fetch job types: {:?}", self.allowed_circuit_types ); - let mut storage = self.prover_connection_pool.access_storage().await; + let protocol_versions = storage + .protocol_versions_dal() + .protocol_version_for(&self.vk_commitments) + .await; + let prover_job = match &self.allowed_circuit_types { Some(types) => { storage .prover_dal() - .get_next_prover_job_by_circuit_types(types.clone()) + .get_next_prover_job_by_circuit_types(types.clone(), &protocol_versions) .await } - None => storage.prover_dal().get_next_prover_job().await, + None => storage.prover_dal().get_next_prover_job(&protocol_versions).await, }?; let circuit_key = CircuitKey { @@ -152,12 +155,7 @@ impl JobProcessor for CircuitSynthesizer { Some((prover_job.id, input)) } - async fn save_failure( - &self, - job_id: Self::JobId, - _started_at: Instant, - error: String, - ) { + async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { self.prover_connection_pool .access_storage() .await @@ -221,7 +219,7 @@ impl JobProcessor for CircuitSynthesizer { self.region.clone(), self.zone.clone(), ) - .await; + .await; if result.is_ok() { return; @@ -245,63 +243,6 @@ impl JobProcessor for CircuitSynthesizer { } } -fn send_assembly( - job_id: u32, - serialized: &mut Vec, - address: &SocketAddress, -) -> Result<(Duration, u64), String> { - vlog::trace!( - "Sending assembly to {}:{}, job id {{{job_id}}}", - address.host, - address.port - ); - - let socket_address = SocketAddr::new(address.host, address.port); - let started_at = Instant::now(); - let mut error_messages = vec![]; - - for _ in 0..10 { - match TcpStream::connect(socket_address) { - Ok(mut stream) => { - return send(&mut serialized.as_slice(), &mut stream) - .map(|result| (started_at.elapsed(), result)) - .map_err(|err| format!("Could not send assembly to prover: {err:?}")); - } - Err(err) => { - error_messages.push(format!("{err:?}")); - } - } - } - - Err(format!( - "Could not establish connection with prover after several attempts: {error_messages:?}" - )) -} - -fn send(read: &mut impl Read, tcp: &mut TcpStream) -> std::io::Result { - let mut attempts = 10; - let mut last_result = Ok(0); - - while attempts > 0 { - match copy(read, tcp) { - Ok(copied) => return Ok(copied), - Err(err) if can_be_retried(err.kind()) => { - attempts -= 1; - last_result = Err(err); - } - Err(err) => return Err(err), - } - - std::thread::sleep(Duration::from_millis(50)); - } - - last_result -} - -fn can_be_retried(err: ErrorKind) -> bool { - matches!(err, ErrorKind::TimedOut | ErrorKind::ConnectionRefused) -} - async fn handle_send_result( result: &Result<(Duration, u64), String>, job_id: u32, diff --git a/core/bin/circuit_synthesizer/src/main.rs b/prover/circuit_synthesizer/src/main.rs similarity index 84% rename from core/bin/circuit_synthesizer/src/main.rs rename to prover/circuit_synthesizer/src/main.rs index 4ae27a73aa49..6191b0ad521b 100644 --- a/core/bin/circuit_synthesizer/src/main.rs +++ b/prover/circuit_synthesizer/src/main.rs @@ -9,6 +9,7 @@ use zksync_dal::connection::DbVariant; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; +use zksync_verification_key_server::get_cached_commitments; use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::circuit_synthesizer::CircuitSynthesizer; @@ -28,19 +29,21 @@ async fn main() { vlog::init(); let opt = Opt::from_args(); let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); - let pool = ConnectionPool::new(None, DbVariant::Prover).await; + let pool = ConnectionPool::builder(DbVariant::Prover).build().await; + let vk_commitments = get_cached_commitments(); let circuit_synthesizer = CircuitSynthesizer::new( config.clone(), ProverGroupConfig::from_env(), &ObjectStoreFactory::from_env(), + vk_commitments, pool, ) - .await - .unwrap_or_else(|err| { - vlog::error!("Could not initialize synthesizer: {err:?}"); - panic!("Could not initialize synthesizer: {err:?}"); - }); + .await + .unwrap_or_else(|err| { + vlog::error!("Could not initialize synthesizer: {err:?}"); + panic!("Could not initialize synthesizer: {err:?}"); + }); let (stop_sender, stop_receiver) = watch::channel(false); @@ -51,7 +54,7 @@ async fn main() { stop_signal_sender.send(()).ok(); } }) - .expect("Error setting Ctrl+C handler"); + .expect("Error setting Ctrl+C handler"); vlog::info!("Starting circuit synthesizer"); let prometheus_config = PrometheusConfig { diff --git a/core/bin/prover/Cargo.toml b/prover/prover/Cargo.toml similarity index 60% rename from core/bin/prover/Cargo.toml rename to prover/prover/Cargo.toml index 703a8744a7ac..a75202e43623 100644 --- a/core/bin/prover/Cargo.toml +++ b/prover/prover/Cargo.toml @@ -11,16 +11,17 @@ categories = ["cryptography"] publish = false # We don't want to publish our binaries. [dependencies] -zksync_dal = { path = "../../lib/dal", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } -zksync_utils = {path = "../../lib/utils", version = "1.0" } -zksync_prover_utils = {path = "../../lib/prover_utils", version = "1.0" } -zksync_circuit_breaker = {path = "../../lib/circuit_breaker", version = "1.0" } -zksync_eth_client = {path = "../../lib/eth_client", version = "1.0" } -prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } -zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server", version = "1.0" } -zksync_object_store = { path = "../../lib/object_store", version = "1.0" } +zksync_dal = { path = "../../core/lib/dal", version = "1.0" } +zksync_config = { path = "../../core/lib/config", version = "1.0" } +zksync_utils = {path = "../../core/lib/utils", version = "1.0" } +zksync_prover_utils = {path = "../../core/lib/prover_utils", version = "1.0" } +zksync_circuit_breaker = {path = "../../core/lib/circuit_breaker", version = "1.0" } +zksync_eth_client = {path = "../../core/lib/eth_client", version = "1.0" } +zksync_types = { path = "../../core/lib/types", version = "1.0" } +prometheus_exporter = { path = "../../core/lib/prometheus_exporter", version = "1.0" } +vlog = { path = "../../core/lib/vlog", version = "1.0" } +zksync_verification_key_generator_and_server = { path = "../../core/bin/verification_key_generator_and_server", version = "1.0" } +zksync_object_store = { path = "../../core/lib/object_store", version = "1.0" } setup_key_generator_and_server = { path = "../setup_key_generator_and_server", version = "1.0" } @@ -36,7 +37,7 @@ ctrlc = { version = "3.1", features = ["termination"] } thiserror = "1.0" chrono = "0.4" serde_json = "1.0" -ethabi = "16.0.0" +ethabi = "18.0.0" metrics = "0.20" hex = "0.4" serde = { version = "1.0", features = ["derive"] } diff --git a/core/bin/prover/README.md b/prover/prover/README.md similarity index 100% rename from core/bin/prover/README.md rename to prover/prover/README.md diff --git a/core/bin/prover/rust-toolchain.toml b/prover/prover/rust-toolchain.toml similarity index 100% rename from core/bin/prover/rust-toolchain.toml rename to prover/prover/rust-toolchain.toml diff --git a/core/bin/prover/src/artifact_provider.rs b/prover/prover/src/artifact_provider.rs similarity index 100% rename from core/bin/prover/src/artifact_provider.rs rename to prover/prover/src/artifact_provider.rs diff --git a/core/bin/prover/src/main.rs b/prover/prover/src/main.rs similarity index 80% rename from core/bin/prover/src/main.rs rename to prover/prover/src/main.rs index 110260630a9b..eb9987191196 100644 --- a/core/bin/prover/src/main.rs +++ b/prover/prover/src/main.rs @@ -1,5 +1,6 @@ use std::env; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; +use tokio::sync::Mutex; use api::gpu_prover; use local_ip_address::local_ip; @@ -7,20 +8,14 @@ use prover_service::run_prover::run_prover_with_remote_synthesizer; use queues::Buffer; use tokio::{sync::oneshot, task::JoinHandle}; -use zksync_circuit_breaker::{vks::VksChecker, CircuitBreakerChecker}; use zksync_config::{ - configs::chain::CircuitBreakerConfig, configs::{api::PrometheusConfig, prover_group::ProverGroupConfig, AlertsConfig}, - ApiConfig, ContractsConfig, ETHClientConfig, ProverConfig, ProverConfigs, + ApiConfig, ProverConfig, ProverConfigs, }; -use zksync_dal::{ - connection::DbVariant, - gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}, - ConnectionPool, -}; -use zksync_eth_client::clients::http::QueryClient; +use zksync_dal::{connection::DbVariant, ConnectionPool}; use zksync_object_store::ObjectStoreFactory; use zksync_prover_utils::region_fetcher::{get_region, get_zone}; +use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::artifact_provider::ProverArtifactProvider; @@ -36,7 +31,7 @@ mod socket_listener; mod synthesized_circuit_provider; async fn graceful_shutdown() { - let pool = ConnectionPool::new(Some(1), DbVariant::Prover).await; + let pool = ConnectionPool::singleton(DbVariant::Prover).build().await; let host = local_ip().expect("Failed obtaining local IP address"); let port = ProverConfigs::from_env().non_gpu.assembly_receiver_port; let region = get_region().await; @@ -119,25 +114,6 @@ async fn main() { &prover_config.key_download_url, ); env::set_var("CRS_FILE", prover_config.initial_setup_key_path.clone()); - vlog::trace!("initial setup keys loaded, preparing eth_client + circuit breaker"); - let eth_client_config = ETHClientConfig::from_env(); - let circuit_breaker_config = CircuitBreakerConfig::from_env(); - let eth_client = QueryClient::new(ð_client_config.web3_url).unwrap(); - let contracts_config = ContractsConfig::from_env(); - let circuit_breaker_checker = CircuitBreakerChecker::new( - vec![Box::new(VksChecker::new( - &circuit_breaker_config, - eth_client, - contracts_config.diamond_proxy_addr, - ))], - &circuit_breaker_config, - ); - circuit_breaker_checker - .check() - .await - .expect("Circuit breaker triggered"); - - let (cb_sender, cb_receiver) = futures::channel::oneshot::channel(); // We don't have a graceful shutdown process for the prover, so `_stop_sender` is unused. // Though we still need to create a channel because circuit breaker expects `stop_receiver`. let (_stop_sender, stop_receiver) = tokio::sync::watch::channel(false); @@ -156,9 +132,6 @@ async fn main() { prometheus_config.listener_port, None, )); - tasks.push(tokio::spawn( - circuit_breaker_checker.run(cb_sender, stop_receiver), - )); let assembly_queue = Buffer::new(prover_config.assembly_queue_capacity); let shared_assembly_queue = Arc::new(Mutex::new(assembly_queue)); @@ -175,7 +148,7 @@ async fn main() { local_ip, prover_config.assembly_receiver_port, producer, - ConnectionPool::new(Some(1), DbVariant::Prover).await, + ConnectionPool::singleton(DbVariant::Prover).build().await, prover_config.specialized_prover_group_id, region.clone(), zone.clone(), @@ -185,7 +158,7 @@ async fn main() { let params = ProverParams::new(&prover_config); let store_factory = ObjectStoreFactory::from_env(); - let circuit_provider_pool = ConnectionPool::new(Some(1), DbVariant::Prover).await; + let circuit_provider_pool = ConnectionPool::singleton(DbVariant::Prover).build().await; tasks.push(tokio::task::spawn_blocking(move || { let rt_handle = tokio::runtime::Handle::current(); let synthesized_circuit_provider = SynthesizedCircuitProvider::new( @@ -218,12 +191,7 @@ async fn main() { // This is necessary because of blocking prover. See end of functions for more details. std::process::exit(0); }, - error = cb_receiver => { - if let Ok(error_msg) = error { - vlog::warn!("Circuit breaker received, shutting down. Reason: {}", error_msg); - } - }, - } + }; // BEWARE, HERE BE DRAGONS. // The process hangs here if we panic outside `run_prover_with_remote_synthesizer`. diff --git a/core/bin/prover/src/prover.rs b/prover/prover/src/prover.rs similarity index 67% rename from core/bin/prover/src/prover.rs rename to prover/prover/src/prover.rs index e3d2aaf56e0a..9025c71a92e6 100644 --- a/core/bin/prover/src/prover.rs +++ b/prover/prover/src/prover.rs @@ -7,8 +7,10 @@ use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncProof; use zkevm_test_harness::pairing::bn256::Bn256; use zksync_config::ProverConfig; +use zksync_dal::StorageProcessor; use zksync_dal::{connection::DbVariant, ConnectionPool}; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory}; +use zksync_types::proofs::ProverJobMetadata; #[derive(Debug)] pub struct ProverReporter { @@ -29,7 +31,7 @@ impl ProverReporter { store_factory: &ObjectStoreFactory, rt_handle: Handle, ) -> Self { - let pool = rt_handle.block_on(ConnectionPool::new(Some(1), DbVariant::Prover)); + let pool = rt_handle.block_on(ConnectionPool::singleton(DbVariant::Prover).build()); Self { pool, config, @@ -66,32 +68,70 @@ impl ProverReporter { let mut connection = self.pool.access_storage().await; let mut transaction = connection.start_transaction().await; - transaction + // BEWARE, HERE BE DRAGONS. + // `send_report` method is called in an operating system thread, + // which is in charge of saving proof output (ok, errored, etc.). + // The code that calls it is in a thread that does not check it's status. + // If the thread panics, proofs will be generated, but their status won't be saved. + // So a prover will work like this: + // Pick task, execute task, prepare task to be saved, be restarted as nothing happens. + // The error prevents the "fake" work by killing the prover, which causes it to restart. + // A proper fix would be to have the thread signal it was dead or be watched from outside. + // Given we want to deprecate old prover, this is the quick and dirty hack I'm not proud of. + let result = transaction .prover_dal() .save_proof(job_id, duration, serialized, &self.processed_by) .await; - let _prover_job_metadata = transaction - .prover_dal() - .get_prover_job_by_id(job_id) - .await - .unwrap_or_else(|| panic!("No job with id: {} exist", job_id)); - + if let Err(e) = result { + vlog::warn!("panicked inside heavy-ops thread: {e:?}; exiting..."); + std::process::exit(-1); + } + self.get_prover_job_metadata_by_id_and_exit_if_error(&mut transaction, job_id) + .await; transaction.commit().await; }); } fn get_circuit_type(&self, job_id: usize) -> String { let prover_job_metadata = self.rt_handle.block_on(async { - self.pool - .access_storage() - .await - .prover_dal() - .get_prover_job_by_id(job_id as u32) + let mut connection = self.pool.access_storage().await; + self.get_prover_job_metadata_by_id_and_exit_if_error(&mut connection, job_id as u32) .await - .unwrap_or_else(|| panic!("No job with id: {} exist", job_id)) }); prover_job_metadata.circuit_type } + + async fn get_prover_job_metadata_by_id_and_exit_if_error( + &self, + connection: &mut StorageProcessor<'_>, + job_id: u32, + ) -> ProverJobMetadata { + // BEWARE, HERE BE DRAGONS. + // `send_report` method is called in an operating system thread, + // which is in charge of saving proof output (ok, errored, etc.). + // The code that calls it is in a thread that does not check it's status. + // If the thread panics, proofs will be generated, but their status won't be saved. + // So a prover will work like this: + // Pick task, execute task, prepare task to be saved, be restarted as nothing happens. + // The error prevents the "fake" work by killing the prover, which causes it to restart. + // A proper fix would be to have the thread signal it was dead or be watched from outside. + // Given we want to deprecate old prover, this is the quick and dirty hack I'm not proud of. + let result = connection.prover_dal().get_prover_job_by_id(job_id).await; + let prover_job_metadata = match result { + Ok(option) => option, + Err(e) => { + vlog::warn!("panicked inside heavy-ops thread: {e:?}; exiting..."); + std::process::exit(-1); + } + }; + match prover_job_metadata { + Some(val) => val, + None => { + vlog::error!("No job with id: {} exist; exiting...", job_id); + std::process::exit(-1); + } + } + } } impl JobReporter for ProverReporter { @@ -104,14 +144,30 @@ impl JobReporter for ProverReporter { error ); self.rt_handle.block_on(async { - self.pool + let result = self + .pool .access_storage() .await .prover_dal() .save_proof_error(job_id as u32, error, self.config.max_attempts) .await; + // BEWARE, HERE BE DRAGONS. + // `send_report` method is called in an operating system thread, + // which is in charge of saving proof output (ok, errored, etc.). + // The code that calls it is in a thread that does not check it's status. + // If the thread panics, proofs will be generated, but their status won't be saved. + // So a prover will work like this: + // Pick task, execute task, prepare task to be saved, be restarted as nothing happens. + // The error prevents the "fake" work by killing the prover, which causes it to restart. + // A proper fix would be to have the thread signal it was dead or be watched from outside. + // Given we want to deprecate old prover, this is the quick and dirty hack I'm not proud of. + if let Err(e) = result { + vlog::warn!("panicked inside heavy-ops thread: {e:?}; exiting..."); + std::process::exit(-1); + } }); } + ProofGenerated(job_id, duration, proof, index) => { self.handle_successful_proof_generation(job_id, proof, duration, index); } @@ -130,6 +186,7 @@ impl JobReporter for ProverReporter { "circuit_type" => circuit_type, ); } + JobResult::AssemblyFinalized(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); vlog::trace!( @@ -166,6 +223,7 @@ impl JobReporter for ProverReporter { "circuit_type" => circuit_type ); } + JobResult::AssemblyEncoded(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); vlog::trace!( @@ -180,6 +238,7 @@ impl JobReporter for ProverReporter { "circuit_type" => circuit_type, ); } + JobResult::AssemblyDecoded(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); vlog::trace!( @@ -194,6 +253,7 @@ impl JobReporter for ProverReporter { "circuit_type" => circuit_type, ); } + JobResult::FailureWithDebugging(job_id, circuit_id, assembly, error) => { vlog::trace!( "Failed assembly decoding for job-id {} and circuit-type: {}. error: {}", @@ -209,6 +269,7 @@ impl JobReporter for ProverReporter { .block_on(put_task) .expect("Failed saving debug assembly to GCS"); } + JobResult::AssemblyTransferred(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); vlog::trace!( @@ -223,6 +284,7 @@ impl JobReporter for ProverReporter { "circuit_type" => circuit_type, ); } + JobResult::ProverWaitedIdle(prover_id, duration) => { vlog::trace!( "Prover wait idle time: {:?} for prover-id: {:?}", @@ -231,10 +293,12 @@ impl JobReporter for ProverReporter { ); metrics::histogram!("server.prover.prover_wait_idle_time", duration,); } + JobResult::SetupLoaderWaitedIdle(duration) => { vlog::trace!("Setup load wait idle time: {:?}", duration); metrics::histogram!("server.prover.setup_load_wait_wait_idle_time", duration,); } + JobResult::SchedulerWaitedIdle(duration) => { vlog::trace!("Scheduler wait idle time: {:?}", duration); metrics::histogram!("server.prover.scheduler_wait_idle_time", duration,); diff --git a/core/bin/prover/src/prover_params.rs b/prover/prover/src/prover_params.rs similarity index 100% rename from core/bin/prover/src/prover_params.rs rename to prover/prover/src/prover_params.rs diff --git a/core/bin/prover/src/socket_listener.rs b/prover/prover/src/socket_listener.rs similarity index 83% rename from core/bin/prover/src/socket_listener.rs rename to prover/prover/src/socket_listener.rs index 1a4a1a567c69..69e49a21cb2c 100644 --- a/core/bin/prover/src/socket_listener.rs +++ b/prover/prover/src/socket_listener.rs @@ -2,10 +2,13 @@ use crate::synthesized_circuit_provider::SharedAssemblyQueue; use queues::IsQueue; use std::net::{IpAddr, SocketAddr}; use std::time::Instant; -use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; use zksync_dal::ConnectionPool; +use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; -use tokio::{io::copy, net::{TcpListener, TcpStream}}; +use tokio::{ + io::copy, + net::{TcpListener, TcpStream}, +}; #[allow(clippy::too_many_arguments)] pub async fn incoming_socket_listener( @@ -29,7 +32,7 @@ pub async fn incoming_socket_listener( .unwrap_or_else(|_| panic!("Failed binding address: {:?}", listening_address)); let address = SocketAddress { host, port }; - let queue_capacity = queue.lock().unwrap().capacity(); + let queue_capacity = queue.lock().await.capacity(); pool.access_storage() .await .gpu_prover_queue_dal() @@ -49,7 +52,7 @@ pub async fn incoming_socket_listener( let stream = match listener.accept().await { Ok(stream) => stream.0, Err(e) => { - panic!("could not accept connection: {e:?}"); + panic!("could not accept connection: {:?}", e); } }; vlog::trace!( @@ -81,16 +84,19 @@ async fn handle_incoming_file( ) { let mut assembly: Vec = vec![]; let started_at = Instant::now(); - copy(&mut stream, &mut assembly).await.expect("Failed reading from stream"); + copy(&mut stream, &mut assembly) + .await + .expect("Failed reading from stream"); let file_size_in_gb = assembly.len() / (1024 * 1024 * 1024); vlog::trace!( "Read file of size: {}GB from stream took: {} seconds", file_size_in_gb, started_at.elapsed().as_secs() ); + // acquiring lock from queue and updating db must be done atomically otherwise it results in TOCTTOU + // Time-of-Check to Time-of-Use + let mut assembly_queue = queue.lock().await; let (queue_free_slots, status) = { - let mut assembly_queue = queue.lock().unwrap(); - assembly_queue .add(assembly) .expect("Failed saving assembly to queue"); diff --git a/core/bin/prover/src/synthesized_circuit_provider.rs b/prover/prover/src/synthesized_circuit_provider.rs similarity index 92% rename from core/bin/prover/src/synthesized_circuit_provider.rs rename to prover/prover/src/synthesized_circuit_provider.rs index 1a9ac811369d..bb20391a3bae 100644 --- a/core/bin/prover/src/synthesized_circuit_provider.rs +++ b/prover/prover/src/synthesized_circuit_provider.rs @@ -1,13 +1,14 @@ use std::io::Cursor; use std::io::Read; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; +use tokio::sync::Mutex; use prover_service::RemoteSynthesizer; use queues::{Buffer, IsQueue}; use tokio::runtime::Handle; -use zksync_dal::gpu_prover_queue_dal::SocketAddress; use zksync_dal::ConnectionPool; +use zksync_types::proofs::SocketAddress; pub type SharedAssemblyQueue = Arc>>>; @@ -42,7 +43,7 @@ impl SynthesizedCircuitProvider { impl RemoteSynthesizer for SynthesizedCircuitProvider { fn try_next(&mut self) -> Option> { - let mut assembly_queue = self.queue.lock().unwrap(); + let mut assembly_queue = self.rt_handle.block_on(async { self.queue.lock().await }); let is_full = assembly_queue.capacity() == assembly_queue.size(); return match assembly_queue.remove() { Ok(blob) => { diff --git a/prover/prover_fri/Cargo.toml b/prover/prover_fri/Cargo.toml new file mode 100644 index 000000000000..dd3e64e1295c --- /dev/null +++ b/prover/prover_fri/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "zksync_prover_fri" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +zksync_types = { path = "../../core/lib/types", version = "1.0" } +zksync_dal = { path = "../../core/lib/dal", version = "1.0" } +zksync_config = { path = "../../core/lib/config", version = "1.0" } +prometheus_exporter = { path = "../../core/lib/prometheus_exporter", version = "1.0" } +vlog = { path = "../../core/lib/vlog", version = "1.0" } +zksync_object_store = { path = "../../core/lib/object_store", version = "1.0" } +zksync_prover_utils = {path = "../../core/lib/prover_utils", version = "1.0" } +zksync_queued_job_processor = { path = "../../core/lib/queued_job_processor", version = "1.0" } +zksync_prover_fri_utils = { path = "../prover_fri_utils", version = "1.0" } +zksync_prover_fri_types = { path = "../prover_fri_types", version = "1.0" } +zksync_utils = { path = "../../core/lib/utils", version = "1.0" } +vk_setup_data_generator_server_fri = { path = "../vk_setup_data_generator_server_fri", version = "1.0" } +shivini = {git = "https://github.com/matter-labs/shivini.git", branch ="main", optional = true, features = ["circuit_definitions", "zksync"] } + +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0" } +circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", features = ["log_tracing"]} + +tokio = { version = "1", features = ["time"] } +futures = { version = "0.3", features = ["compat"] } +ctrlc = { version = "3.1", features = ["termination"] } +metrics = "0.20.0" +serde = { version = "1.0", features = ["derive"] } +async-trait = "0.1" +queues = "1.1.0" +local-ip-address = "0.5.0" + +[features] +default = [] +gpu = ["shivini", "vk_setup_data_generator_server_fri/gpu"] diff --git a/prover/prover_fri/README.md b/prover/prover_fri/README.md new file mode 100644 index 000000000000..801c30be3ce7 --- /dev/null +++ b/prover/prover_fri/README.md @@ -0,0 +1,9 @@ +# FRI Prover + +## running cpu prover + +`zk f cargo +nightly-2023-05-31 run --release --bin zksync_prover_fri` + +## running gpu prover(requires CUDA 12.0+) + +`zk f cargo +nightly-2023-05-31 run --release -features "gpu" --bin zksync_prover_fri` diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/prover_fri/src/gpu_prover_job_processor.rs new file mode 100644 index 000000000000..aa20bf0331b3 --- /dev/null +++ b/prover/prover_fri/src/gpu_prover_job_processor.rs @@ -0,0 +1,300 @@ +#[cfg(feature = "gpu")] +pub mod gpu_prover { + use std::collections::HashMap; + use std::{sync::Arc, time::Instant}; + + use queues::IsQueue; + use tokio::task::JoinHandle; + use zksync_prover_fri_types::circuit_definitions::base_layer_proof_config; + use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::round_function::AbsorptionModeOverwrite; + use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::sponge::GoldilocksPoseidon2Sponge; + use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::pow::NoPow; + use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::setup::FinalizationHintsForProver; + use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::transcript::GoldilocksPoisedon2Transcript; + use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; + use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerProof; + use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerProof; + use zksync_vk_setup_data_server_fri::GpuProverSetupData; + + use zksync_config::configs::fri_prover_group::{CircuitIdRoundTuple, FriProverGroupConfig}; + use zksync_config::configs::FriProverConfig; + use zksync_dal::ConnectionPool; + use zksync_object_store::ObjectStore; + use zksync_prover_fri_types::{ + CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, + }; + use zksync_queued_job_processor::{async_trait, JobProcessor}; + use zksync_vk_setup_data_server_fri::get_setup_data_for_circuit_type; + use { + shivini::gpu_prove_from_external_witness_data, + shivini::synthesis_utils::init_base_layer_cs_for_repeated_proving, + shivini::synthesis_utils::init_recursive_layer_cs_for_repeated_proving, + shivini::ProverContext, zksync_vk_setup_data_server_fri::GoldilocksGpuProverSetupData, + }; + + use crate::utils::{ + save_proof, setup_metadata_to_setup_data_key, verify_proof, ProverArtifacts, + SharedWitnessVectorQueue, F, H, + }; + + type DefaultTranscript = GoldilocksPoisedon2Transcript; + type DefaultTreeHasher = GoldilocksPoseidon2Sponge; + + pub enum SetupLoadMode { + FromMemory(HashMap>), + FromDisk, + } + + pub struct Prover { + blob_store: Box, + public_blob_store: Box, + config: Arc, + prover_connection_pool: ConnectionPool, + setup_load_mode: SetupLoadMode, + // Only pick jobs for the configured circuit id and aggregation rounds. + // Empty means all jobs are picked. + circuit_ids_for_round_to_be_proven: Vec, + witness_vector_queue: SharedWitnessVectorQueue, + prover_context: ProverContext, + } + + impl Prover { + pub fn new( + blob_store: Box, + public_blob_store: Box, + config: FriProverConfig, + prover_connection_pool: ConnectionPool, + setup_load_mode: SetupLoadMode, + circuit_ids_for_round_to_be_proven: Vec, + witness_vector_queue: SharedWitnessVectorQueue, + ) -> Self { + Prover { + blob_store, + public_blob_store, + config: Arc::new(config), + prover_connection_pool, + setup_load_mode, + circuit_ids_for_round_to_be_proven, + witness_vector_queue, + prover_context: ProverContext::create() + .expect("failed initializing gpu prover context"), + } + } + + fn get_setup_data(&self, key: ProverServiceDataKey) -> Arc { + match &self.setup_load_mode { + SetupLoadMode::FromMemory(cache) => cache + .get(&key) + .expect("Setup data not found in cache") + .clone(), + SetupLoadMode::FromDisk => { + let started_at = Instant::now(); + let artifact: GoldilocksGpuProverSetupData = + get_setup_data_for_circuit_type(key.clone()); + metrics::histogram!( + "prover_fri.prover.gpu_setup_data_load_time", + started_at.elapsed(), + "circuit_type" => key.circuit_id.to_string(), + ); + Arc::new(artifact) + } + } + } + + pub fn prove( + job: WitnessVectorArtifacts, + config: Arc, + setup_data: Arc, + ) -> ProverArtifacts { + let worker = Worker::new(); + let started_at = Instant::now(); + let (cs, proof_config, circuit_id) = match job.prover_job.circuit_wrapper.clone() { + CircuitWrapper::Base(base_circuit) => { + let circuit_id = base_circuit.numeric_circuit_type(); + let cs = init_base_layer_cs_for_repeated_proving( + base_circuit, + &setup_data.finalization_hint, + ); + (cs, base_layer_proof_config(), circuit_id) + } + CircuitWrapper::Recursive(recursive_circuit) => { + let circuit_id = recursive_circuit.numeric_circuit_type(); + let cs = init_recursive_layer_cs_for_repeated_proving( + recursive_circuit, + &setup_data.finalization_hint, + ); + (cs, base_layer_proof_config(), circuit_id) + } + }; + vlog::info!( + "Successfully generated assembly without witness vector for job: {}, took: {:?}", + job.prover_job.job_id, + started_at.elapsed() + ); + metrics::histogram!( + "prover_fri.prover.gpu_assembly_generation_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string() + ); + let started_at = Instant::now(); + let proof = gpu_prove_from_external_witness_data::< + _, + DefaultTranscript, + DefaultTreeHasher, + NoPow, + _, + >( + cs, + &job.witness_vector, + proof_config, + &setup_data.setup, + &setup_data.vk, + (), + &worker, + ) + .unwrap_or_else(|_| { + panic!( + "failed generating GPU proof for id: {}", + job.prover_job.job_id + ) + }); + metrics::histogram!( + "prover_fri.prover.gpu_proof_generation_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string() + ); + verify_proof( + &job.prover_job.circuit_wrapper, + &proof, + &setup_data.vk, + job.prover_job.job_id, + ); + let proof_wrapper = match &job.prover_job.circuit_wrapper { + CircuitWrapper::Base(_) => { + FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) + } + CircuitWrapper::Recursive(circuit) => FriProofWrapper::Recursive( + ZkSyncRecursionLayerProof::from_inner(circuit_id, proof), + ), + }; + ProverArtifacts::new(job.prover_job.block_number, proof_wrapper) + } + } + + #[async_trait] + impl JobProcessor for Prover { + type Job = WitnessVectorArtifacts; + type JobId = u32; + type JobArtifacts = ProverArtifacts; + + // we use smaller number here as the polling in done from the in-memory queue not DB + const POLLING_INTERVAL_MS: u64 = 200; + const MAX_BACKOFF_MS: u64 = 1_000; + const SERVICE_NAME: &'static str = "FriGpuProver"; + + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut queue = self.witness_vector_queue.lock().await; + match queue.remove() { + Err(_) => None, + Ok(item) => { + vlog::info!("Started GPU proving for job: {:?}", item.prover_job.job_id); + Some((item.prover_job.job_id, item)) + } + } + } + + async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { + self.prover_connection_pool + .access_storage() + .await + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + } + + async fn process_job( + &self, + job: Self::Job, + _started_at: Instant, + ) -> JoinHandle { + let config = Arc::clone(&self.config); + let setup_data = self.get_setup_data(job.prover_job.setup_data_key.clone()); + tokio::task::spawn_blocking(move || Self::prove(job, config, setup_data)) + } + + async fn save_result( + &self, + job_id: Self::JobId, + started_at: Instant, + artifacts: Self::JobArtifacts, + ) { + metrics::histogram!( + "prover_fri.prover.gpu_total_proving_time", + started_at.elapsed(), + ); + let mut storage_processor = self.prover_connection_pool.access_storage().await; + save_proof( + job_id, + started_at, + artifacts, + &*self.blob_store, + &*self.public_blob_store, + &mut storage_processor, + ) + .await; + } + } + + pub fn load_setup_data_cache(config: &FriProverConfig) -> SetupLoadMode { + match config.setup_load_mode { + zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, + zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { + let mut cache = HashMap::new(); + vlog::info!( + "Loading setup data cache for group {}", + &config.specialized_group_id + ); + let prover_setup_metadata_list = FriProverGroupConfig::from_env() + .get_circuit_ids_for_group_id(config.specialized_group_id) + .expect( + "At least one circuit should be configured for group when running in FromMemory mode", + ); + vlog::info!( + "for group {} configured setup metadata are {:?}", + &config.specialized_group_id, + prover_setup_metadata_list + ); + for prover_setup_metadata in prover_setup_metadata_list { + let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); + let setup_data = get_setup_data_for_circuit_type(key.clone()); + cache.insert(key, Arc::new(setup_data)); + } + SetupLoadMode::FromMemory(cache) + } + } + } + + pub fn init_finalization_hints_cache( + config: &FriProverConfig, + ) -> HashMap> { + let mut cache = HashMap::new(); + vlog::info!( + "Loading finalization hint for group {}", + &config.specialized_group_id + ); + let group_config = FriProverGroupConfig::from_env(); + let prover_setup_metadata_list = match config.setup_load_mode { + zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => group_config.get_all_circuit_ids(), + zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => + group_config.get_circuit_ids_for_group_id(config.specialized_group_id).expect( + "At least one circuit should be configured for group when running in FromMemory mode") + }; + + for prover_setup_metadata in prover_setup_metadata_list { + let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); + let setup_data: GpuProverSetupData = get_setup_data_for_circuit_type(key.clone()); + cache.insert(key, Arc::new(setup_data.finalization_hint)); + } + cache + } +} diff --git a/prover/prover_fri/src/lib.rs b/prover/prover_fri/src/lib.rs new file mode 100644 index 000000000000..5fdb260d40d6 --- /dev/null +++ b/prover/prover_fri/src/lib.rs @@ -0,0 +1,3 @@ +#![feature(generic_const_exprs)] +pub mod prover_job_processor; +pub mod utils; diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs new file mode 100644 index 000000000000..e284dc06f29c --- /dev/null +++ b/prover/prover_fri/src/main.rs @@ -0,0 +1,163 @@ +#![feature(generic_const_exprs)] + +use tokio::sync::watch::Receiver; +use tokio::sync::{oneshot}; +use tokio::task::JoinHandle; + +use zksync_config::configs::fri_prover_group::{CircuitIdRoundTuple, FriProverGroupConfig}; +use zksync_config::configs::{FriProverConfig, PrometheusConfig}; +use zksync_config::{ApiConfig, ObjectStoreConfig}; +use zksync_dal::connection::DbVariant; +use zksync_dal::ConnectionPool; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; + +use zksync_queued_job_processor::JobProcessor; +use zksync_utils::wait_for_tasks::wait_for_tasks; + +mod gpu_prover_job_processor; +mod prover_job_processor; +mod utils; +mod socket_listener; + +#[tokio::main] +async fn main() { + vlog::init(); + let sentry_guard = vlog::init_sentry(); + let prover_config = FriProverConfig::from_env(); + let prometheus_config = PrometheusConfig { + listener_port: prover_config.prometheus_port, + ..ApiConfig::from_env().prometheus + }; + + match sentry_guard { + Some(_) => vlog::info!( + "Starting Sentry url: {}", + std::env::var("MISC_SENTRY_URL").unwrap(), + ), + None => vlog::info!("No sentry url configured"), + } + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .expect("Error setting Ctrl+C handler"); + + let (stop_sender, stop_receiver) = tokio::sync::watch::channel(false); + let blob_store = ObjectStoreFactory::from_env(); + let public_blob_store = ObjectStoreFactory::new(ObjectStoreConfig::public_from_env()) + .create_store() + .await; + + vlog::info!("Starting FRI proof generation"); + let pool = ConnectionPool::builder(DbVariant::Prover).build().await; + let circuit_ids_for_round_to_be_proven = FriProverGroupConfig::from_env() + .get_circuit_ids_for_group_id(prover_config.specialized_group_id) + .unwrap_or(vec![]); + + let prover_tasks = get_prover_tasks( + prover_config, + stop_receiver, + blob_store, + public_blob_store, + pool, + circuit_ids_for_round_to_be_proven, + ) + .await; + + let mut tasks = vec![prometheus_exporter::run_prometheus_exporter( + prometheus_config.listener_port, + None, + )]; + tasks.extend(prover_tasks); + + let particular_crypto_alerts = None; + let graceful_shutdown = None::>; + let tasks_allowed_to_finish = false; + tokio::select! { + _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, + _ = stop_signal_receiver => { + vlog::info!("Stop signal received, shutting down"); + }, + } + + stop_sender.send(true).ok(); +} + +#[cfg(not(feature = "gpu"))] +async fn get_prover_tasks( + prover_config: FriProverConfig, + stop_receiver: Receiver, + store_factory: ObjectStoreFactory, + public_blob_store: Box, + pool: ConnectionPool, + circuit_ids_for_round_to_be_proven: Vec, +) -> Vec> { + use crate::prover_job_processor::{load_setup_data_cache, Prover}; + + let setup_load_mode = load_setup_data_cache(&prover_config); + let prover = Prover::new( + store_factory.create_store().await, + public_blob_store, + prover_config, + pool, + setup_load_mode, + circuit_ids_for_round_to_be_proven, + ); + vec![tokio::spawn(prover.run(stop_receiver, None))] +} + +#[cfg(feature = "gpu")] +async fn get_prover_tasks( + prover_config: FriProverConfig, + stop_receiver: Receiver, + store_factory: ObjectStoreFactory, + public_blob_store: Box, + pool: ConnectionPool, + circuit_ids_for_round_to_be_proven: Vec, +) -> Vec> { + use queues::Buffer; + use tokio::sync::Mutex; + use std::sync::Arc; + use gpu_prover_job_processor::gpu_prover; + use zksync_prover_utils::region_fetcher::get_zone; + use socket_listener::SocketListener; + use zksync_types::proofs::SocketAddress; + use local_ip_address::local_ip; + + let setup_load_mode = gpu_prover::load_setup_data_cache(&prover_config); + let witness_vector_queue = Buffer::new(prover_config.queue_capacity); + let shared_witness_vector_queue = Arc::new(Mutex::new(witness_vector_queue)); + let consumer = shared_witness_vector_queue.clone(); + let prover = gpu_prover::Prover::new( + store_factory.create_store().await, + public_blob_store, + prover_config.clone(), + pool.clone(), + setup_load_mode, + circuit_ids_for_round_to_be_proven.clone(), + consumer, + ); + let zone = get_zone().await; + let producer = shared_witness_vector_queue.clone(); + let local_ip = local_ip().expect("Failed obtaining local IP address"); + let address = SocketAddress { + host: local_ip, + port: prover_config.witness_vector_receiver_port, + }; + vlog::info!("local IP address is: {:?}", local_ip); + let socket_listener = SocketListener::new( + address, + producer, + pool.clone(), + prover_config.specialized_group_id, + zone + ); + vec![ + tokio::spawn(socket_listener.listen_incoming_connections(stop_receiver.clone())), + tokio::spawn(prover.run(stop_receiver, None)) + ] +} diff --git a/prover/prover_fri/src/prover_job_processor.rs b/prover/prover_fri/src/prover_job_processor.rs new file mode 100644 index 000000000000..b5331640224a --- /dev/null +++ b/prover/prover_fri/src/prover_job_processor.rs @@ -0,0 +1,265 @@ +use std::collections::HashMap; +use std::{sync::Arc, time::Instant}; + +use tokio::task::JoinHandle; +use zksync_prover_fri_types::circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::pow::NoPow; +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ + ZkSyncBaseLayerCircuit, ZkSyncBaseLayerProof, +}; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerProof, ZkSyncRecursiveLayerCircuit, +}; +use zksync_prover_fri_types::circuit_definitions::{ + base_layer_proof_config, recursion_layer_proof_config, ZkSyncDefaultRoundFunction, +}; + +use zkevm_test_harness::prover_utils::{prove_base_layer_circuit, prove_recursion_layer_circuit}; + +use zksync_config::configs::fri_prover_group::{CircuitIdRoundTuple, FriProverGroupConfig}; +use zksync_config::configs::FriProverConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStore; +use zksync_prover_fri_types::{CircuitWrapper, FriProofWrapper, ProverJob, ProverServiceDataKey}; +use zksync_prover_fri_utils::fetch_next_circuit; +use zksync_queued_job_processor::{async_trait, JobProcessor}; +use zksync_vk_setup_data_server_fri::{ + get_cpu_setup_data_for_circuit_type, GoldilocksProverSetupData, +}; + +use crate::utils::{save_proof, setup_metadata_to_setup_data_key, verify_proof, ProverArtifacts}; + +pub enum SetupLoadMode { + FromMemory(HashMap>), + FromDisk, +} + +pub struct Prover { + blob_store: Box, + public_blob_store: Box, + config: Arc, + prover_connection_pool: ConnectionPool, + setup_load_mode: SetupLoadMode, + // Only pick jobs for the configured circuit id and aggregation rounds. + // Empty means all jobs are picked. + circuit_ids_for_round_to_be_proven: Vec, +} + +impl Prover { + pub fn new( + blob_store: Box, + public_blob_store: Box, + config: FriProverConfig, + prover_connection_pool: ConnectionPool, + setup_load_mode: SetupLoadMode, + circuit_ids_for_round_to_be_proven: Vec, + ) -> Self { + Prover { + blob_store, + public_blob_store, + config: Arc::new(config), + prover_connection_pool, + setup_load_mode, + circuit_ids_for_round_to_be_proven, + } + } + + fn get_setup_data(&self, key: ProverServiceDataKey) -> Arc { + match &self.setup_load_mode { + SetupLoadMode::FromMemory(cache) => cache + .get(&key) + .expect("Setup data not found in cache") + .clone(), + SetupLoadMode::FromDisk => { + let started_at = Instant::now(); + let artifact: GoldilocksProverSetupData = + get_cpu_setup_data_for_circuit_type(key.clone()); + metrics::histogram!( + "prover_fri.prover.setup_data_load_time", + started_at.elapsed(), + "circuit_type" => key.circuit_id.to_string(), + ); + Arc::new(artifact) + } + } + } + + pub fn prove( + job: ProverJob, + config: Arc, + setup_data: Arc, + ) -> ProverArtifacts { + let proof = match job.circuit_wrapper { + CircuitWrapper::Base(base_circuit) => { + Self::prove_base_layer(job.job_id, base_circuit, config, setup_data) + } + CircuitWrapper::Recursive(recursive_circuit) => { + Self::prove_recursive_layer(job.job_id, recursive_circuit, config, setup_data) + } + }; + ProverArtifacts::new(job.block_number, proof) + } + + fn prove_recursive_layer( + job_id: u32, + circuit: ZkSyncRecursiveLayerCircuit, + _config: Arc, + artifact: Arc, + ) -> FriProofWrapper { + let worker = Worker::new(); + let circuit_id = circuit.numeric_circuit_type(); + let started_at = Instant::now(); + let proof = prove_recursion_layer_circuit::( + circuit.clone(), + &worker, + recursion_layer_proof_config(), + &artifact.setup_base, + &artifact.setup, + &artifact.setup_tree, + &artifact.vk, + &artifact.vars_hint, + &artifact.wits_hint, + &artifact.finalization_hint, + ); + metrics::histogram!( + "prover_fri.prover.proof_generation_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string(), + "layer" => "recursive", + ); + verify_proof( + &CircuitWrapper::Recursive(circuit), + &proof, + &artifact.vk, + job_id, + ); + FriProofWrapper::Recursive(ZkSyncRecursionLayerProof::from_inner(circuit_id, proof)) + } + + fn prove_base_layer( + job_id: u32, + circuit: ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, + >, + _config: Arc, + artifact: Arc, + ) -> FriProofWrapper { + let worker = Worker::new(); + let circuit_id = circuit.numeric_circuit_type(); + let started_at = Instant::now(); + let proof = prove_base_layer_circuit::( + circuit.clone(), + &worker, + base_layer_proof_config(), + &artifact.setup_base, + &artifact.setup, + &artifact.setup_tree, + &artifact.vk, + &artifact.vars_hint, + &artifact.wits_hint, + &artifact.finalization_hint, + ); + metrics::histogram!( + "prover_fri.prover.proof_generation_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string(), + "layer" => "base", + ); + verify_proof(&CircuitWrapper::Base(circuit), &proof, &artifact.vk, job_id); + FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) + } +} + +#[async_trait] +impl JobProcessor for Prover { + type Job = ProverJob; + type JobId = u32; + type JobArtifacts = ProverArtifacts; + const SERVICE_NAME: &'static str = "FriCpuProver"; + + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut storage = self.prover_connection_pool.access_storage().await; + let mut fri_prover_dal = storage.fri_prover_jobs_dal(); + let prover_job = fetch_next_circuit( + &mut fri_prover_dal, + &*self.blob_store, + &self.circuit_ids_for_round_to_be_proven, + ) + .await?; + Some((prover_job.job_id, prover_job)) + } + + async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { + self.prover_connection_pool + .access_storage() + .await + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + } + + async fn process_job( + &self, + job: Self::Job, + _started_at: Instant, + ) -> JoinHandle { + let config = Arc::clone(&self.config); + let setup_data = self.get_setup_data(job.setup_data_key.clone()); + tokio::task::spawn_blocking(move || Self::prove(job, config, setup_data)) + } + + async fn save_result( + &self, + job_id: Self::JobId, + started_at: Instant, + artifacts: Self::JobArtifacts, + ) { + metrics::histogram!( + "prover_fri.prover.cpu_total_proving_time", + started_at.elapsed(), + ); + let mut storage_processor = self.prover_connection_pool.access_storage().await; + save_proof( + job_id, + started_at, + artifacts, + &*self.blob_store, + &*self.public_blob_store, + &mut storage_processor, + ) + .await; + } +} + +pub fn load_setup_data_cache(config: &FriProverConfig) -> SetupLoadMode { + match config.setup_load_mode { + zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, + zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { + let mut cache = HashMap::new(); + vlog::info!( + "Loading setup data cache for group {}", + &config.specialized_group_id + ); + let prover_setup_metadata_list = FriProverGroupConfig::from_env() + .get_circuit_ids_for_group_id(config.specialized_group_id) + .expect( + "At least one circuit should be configured for group when running in FromMemory mode", + ); + vlog::info!( + "for group {} configured setup metadata are {:?}", + &config.specialized_group_id, + prover_setup_metadata_list + ); + for prover_setup_metadata in prover_setup_metadata_list { + let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); + let setup_data = get_cpu_setup_data_for_circuit_type(key.clone()); + cache.insert(key, Arc::new(setup_data)); + } + SetupLoadMode::FromMemory(cache) + } + } +} diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/prover_fri/src/socket_listener.rs new file mode 100644 index 000000000000..6acfcdfce118 --- /dev/null +++ b/prover/prover_fri/src/socket_listener.rs @@ -0,0 +1,130 @@ +use queues::IsQueue; +use std::net::SocketAddr; +use std::time::Instant; +use zksync_dal::ConnectionPool; +use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; + +use crate::utils::SharedWitnessVectorQueue; +use tokio::{ + io::copy, + net::{TcpListener, TcpStream}, +}; +use tokio::sync::watch; +use zksync_object_store::bincode; +use zksync_prover_fri_types::WitnessVectorArtifacts; + +pub(crate) struct SocketListener { + address: SocketAddress, + queue: SharedWitnessVectorQueue, + pool: ConnectionPool, + specialized_prover_group_id: u8, + zone: String, +} + +impl SocketListener { + + pub fn new( + address: SocketAddress, + queue: SharedWitnessVectorQueue, + pool: ConnectionPool, + specialized_prover_group_id: u8, + zone: String, + ) -> Self { + Self { + address, + queue, + pool, + specialized_prover_group_id, + zone, + } + } + async fn init(&self) -> TcpListener { + let listening_address = SocketAddr::new(self.address.host, self.address.port); + vlog::info!( + "Starting assembly receiver at host: {}, port: {}", + self.address.host, + self.address.port + ); + let listener = TcpListener::bind(listening_address) + .await + .unwrap_or_else(|_| panic!("Failed binding address: {:?}", listening_address)); + + let _lock = self.queue.lock().await; + self.pool + .access_storage() + .await + .fri_gpu_prover_queue_dal() + .insert_prover_instance( + self.address.clone(), + self.specialized_prover_group_id, + self.zone.clone(), + ) + .await; + listener + } + + pub async fn listen_incoming_connections(self, stop_receiver: watch::Receiver) { + let listener = self.init().await; + let mut now = Instant::now(); + loop { + if *stop_receiver.borrow() { + vlog::warn!("Stop signal received, shutting down socket listener"); + return; + } + let stream = match listener.accept().await { + Ok(stream) => stream.0, + Err(e) => { + panic!("could not accept connection: {:?}", e); + } + }; + vlog::trace!( + "Received new assembly send connection, waited for {}ms.", + now.elapsed().as_millis() + ); + + self.handle_incoming_file(stream).await; + + now = Instant::now(); + } + } + + async fn handle_incoming_file(&self, mut stream: TcpStream) { + let mut assembly: Vec = vec![]; + let started_at = Instant::now(); + copy(&mut stream, &mut assembly) + .await + .expect("Failed reading from stream"); + let file_size_in_gb = assembly.len() / (1024 * 1024 * 1024); + vlog::trace!( + "Read file of size: {}GB from stream took: {} seconds", + file_size_in_gb, + started_at.elapsed().as_secs() + ); + metrics::histogram!( + "prover_fri.prover_fri.witness_vector_blob_time", + started_at.elapsed(), + "blob_size_in_gb" => file_size_in_gb.to_string(), + ); + let witness_vector = bincode::deserialize::(&assembly) + .expect("Failed deserializing witness vector"); + // acquiring lock from queue and updating db must be done atomically otherwise it results in TOCTTOU + // Time-of-Check to Time-of-Use + let mut queue = self.queue.lock().await; + + queue + .add(witness_vector) + .expect("Failed saving witness vector to queue"); + let status = if queue.capacity() == queue.size() { + GpuProverInstanceStatus::Full + } else { + GpuProverInstanceStatus::Available + }; + + self.pool + .access_storage() + .await + .fri_gpu_prover_queue_dal() + .update_prover_instance_status(self.address.clone(), status, self.zone.clone()) + .await; + } +} diff --git a/prover/prover_fri/src/utils.rs b/prover/prover_fri/src/utils.rs new file mode 100644 index 000000000000..2353bef04a70 --- /dev/null +++ b/prover/prover_fri/src/utils.rs @@ -0,0 +1,142 @@ +use std::sync::Arc; +use std::time::Instant; + +use queues::{Buffer}; +use tokio::sync::Mutex; +use zkevm_test_harness::prover_utils::{verify_base_layer_proof, verify_recursion_layer_proof}; +use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::round_function::AbsorptionModeOverwrite; +use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::sponge::GoldilocksPoseidon2Sponge; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::pow::NoPow; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::proof::Proof; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::verifier::VerificationKey; +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::{ + GoldilocksExt2, GoldilocksField, +}; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerProof; + +use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; +use zksync_dal::StorageProcessor; +use zksync_object_store::ObjectStore; +use zksync_prover_fri_types::{ + CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, +}; +use zksync_prover_fri_utils::get_base_layer_circuit_id_for_recursive_layer; + +use zksync_types::L1BatchNumber; + +pub type F = GoldilocksField; +pub type H = GoldilocksPoseidon2Sponge; +pub type EXT = GoldilocksExt2; + +pub type SharedWitnessVectorQueue = Arc>>; + +pub struct ProverArtifacts { + block_number: L1BatchNumber, + pub proof_wrapper: FriProofWrapper, +} + +impl ProverArtifacts { + pub fn new(block_number: L1BatchNumber, proof_wrapper: FriProofWrapper) -> Self { + Self { + block_number, + proof_wrapper, + } + } +} + +pub async fn save_proof( + job_id: u32, + started_at: Instant, + artifacts: ProverArtifacts, + blob_store: &dyn ObjectStore, + public_blob_store: &dyn ObjectStore, + storage_processor: &mut StorageProcessor<'_>, +) { + vlog::info!( + "Successfully proven job: {}, took: {:?}", + job_id, + started_at.elapsed() + ); + let proof = artifacts.proof_wrapper; + + // We save the scheduler proofs in public bucket, + // so that it can be verified independently while we're doing shadow proving + let circuit_type = match &proof { + FriProofWrapper::Base(base) => base.numeric_circuit_type(), + FriProofWrapper::Recursive(recursive_circuit) => match recursive_circuit { + ZkSyncRecursionLayerProof::SchedulerCircuit(_) => { + public_blob_store + .put(artifacts.block_number.0, &proof) + .await + .unwrap(); + recursive_circuit.numeric_circuit_type() + } + _ => recursive_circuit.numeric_circuit_type(), + }, + }; + + let blob_save_started_at = Instant::now(); + let blob_url = blob_store.put(job_id, &proof).await.unwrap(); + metrics::histogram!( + "prover_fri.prover.blob_save_time", + blob_save_started_at.elapsed(), + "circuit_type" => circuit_type.to_string(), + ); + + let mut transaction = storage_processor.start_transaction().await; + let job_metadata = transaction + .fri_prover_jobs_dal() + .save_proof(job_id, started_at.elapsed(), &blob_url) + .await; + if job_metadata.is_node_final_proof { + transaction + .fri_scheduler_dependency_tracker_dal() + .set_final_prover_job_id_for_l1_batch( + get_base_layer_circuit_id_for_recursive_layer(job_metadata.circuit_id), + job_id, + job_metadata.block_number, + ) + .await; + } + transaction.commit().await; +} + +pub fn verify_proof( + circuit_wrapper: &CircuitWrapper, + proof: &Proof, + vk: &VerificationKey, + job_id: u32, +) { + let started_at = Instant::now(); + let (is_valid, circuit_id) = match circuit_wrapper { + CircuitWrapper::Base(base_circuit) => ( + verify_base_layer_proof::(&base_circuit, proof, vk), + base_circuit.numeric_circuit_type(), + ), + CircuitWrapper::Recursive(recursive_circuit) => ( + verify_recursion_layer_proof::(&recursive_circuit, proof, vk), + recursive_circuit.numeric_circuit_type(), + ), + }; + metrics::histogram!( + "prover_fri.prover.proof_verification_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string(), + ); + if !is_valid { + vlog::error!( + "Failed to verify base layer proof for job-id: {} circuit_type {}", + job_id, + circuit_id + ); + } +} + +pub fn setup_metadata_to_setup_data_key( + setup_metadata: &CircuitIdRoundTuple, +) -> ProverServiceDataKey { + ProverServiceDataKey { + circuit_id: setup_metadata.circuit_id, + round: setup_metadata.aggregation_round.into(), + } +} diff --git a/prover/prover_fri/tests/basic_test.rs b/prover/prover_fri/tests/basic_test.rs new file mode 100644 index 000000000000..085b741fb3c4 --- /dev/null +++ b/prover/prover_fri/tests/basic_test.rs @@ -0,0 +1,70 @@ +use std::sync::Arc; + +use zksync_config::configs::FriProverConfig; +use zksync_config::ObjectStoreConfig; +use zksync_object_store::{bincode, FriCircuitKey, ObjectStoreFactory}; +use zksync_types::proofs::AggregationRound; +use zksync_types::L1BatchNumber; + +use serde::Serialize; +use zksync_prover_fri::prover_job_processor::Prover; +use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; +use zksync_vk_setup_data_server_fri::generate_cpu_base_layer_setup_data; + +fn compare_serialized(expected: &T, actual: &T) { + let serialized_expected = bincode::serialize(expected).unwrap(); + let serialized_actual = bincode::serialize(actual).unwrap(); + assert_eq!(serialized_expected, serialized_actual); +} + +async fn prover_and_assert_base_layer( + expected_proof_id: u32, + circuit_id: u8, + block_number: L1BatchNumber, + sequence_number: usize, +) { + let mut object_store_config = ObjectStoreConfig::from_env(); + object_store_config.file_backed_base_path = "./tests/data/".to_owned(); + let object_store = ObjectStoreFactory::new(object_store_config) + .create_store() + .await; + let expected_proof = object_store + .get(expected_proof_id) + .await + .expect("missing expected proof"); + + let aggregation_round = AggregationRound::BasicCircuits; + let blob_key = FriCircuitKey { + block_number, + circuit_id, + sequence_number, + depth: 0, + aggregation_round, + }; + let circuit_wrapper = object_store.get(blob_key).await.expect("circuit missing"); + let circuit = match &circuit_wrapper { + CircuitWrapper::Base(base) => base.clone(), + CircuitWrapper::Recursive(_) => { + panic!("Expected base layer circuit") + } + }; + let setup_data = Arc::new(generate_cpu_base_layer_setup_data(circuit)); + let setup_key = ProverServiceDataKey::new(circuit_id, aggregation_round); + let prover_job = ProverJob::new(block_number, expected_proof_id, circuit_wrapper, setup_key); + let artifacts = Prover::prove( + prover_job, + Arc::new(FriProverConfig::from_env()), + setup_data, + ); + compare_serialized(&expected_proof, &artifacts.proof_wrapper); +} + +// #[tokio::test] +// async fn test_base_layer_main_vm_proof_gen() { +// prover_and_assert_base_layer(5176866, 1, L1BatchNumber(128623), 1086).await; +// } + +#[tokio::test] +async fn test_base_layer_sha256_proof_gen() { + prover_and_assert_base_layer(1293714, 6, L1BatchNumber(114499), 479).await; +} diff --git a/prover/prover_fri/tests/data/proofs_fri/proof_1293714.bin b/prover/prover_fri/tests/data/proofs_fri/proof_1293714.bin new file mode 100644 index 000000000000..1c74544b6953 Binary files /dev/null and b/prover/prover_fri/tests/data/proofs_fri/proof_1293714.bin differ diff --git a/prover/prover_fri/tests/data/proofs_fri/proof_5176866.bin b/prover/prover_fri/tests/data/proofs_fri/proof_5176866.bin new file mode 100644 index 000000000000..25c31b0ab2bb Binary files /dev/null and b/prover/prover_fri/tests/data/proofs_fri/proof_5176866.bin differ diff --git a/prover/prover_fri/tests/data/prover_jobs_fri/114499_479_6_BasicCircuits_0.bin b/prover/prover_fri/tests/data/prover_jobs_fri/114499_479_6_BasicCircuits_0.bin new file mode 100644 index 000000000000..08d3efb362d0 Binary files /dev/null and b/prover/prover_fri/tests/data/prover_jobs_fri/114499_479_6_BasicCircuits_0.bin differ diff --git a/prover/prover_fri/tests/data/prover_jobs_fri/128623_1086_1_BasicCircuits_0.bin b/prover/prover_fri/tests/data/prover_jobs_fri/128623_1086_1_BasicCircuits_0.bin new file mode 100644 index 000000000000..14d95427c8c5 Binary files /dev/null and b/prover/prover_fri/tests/data/prover_jobs_fri/128623_1086_1_BasicCircuits_0.bin differ diff --git a/prover/prover_fri_types/Cargo.toml b/prover/prover_fri_types/Cargo.toml new file mode 100644 index 000000000000..f4734b00a152 --- /dev/null +++ b/prover/prover_fri_types/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "zksync_prover_fri_types" +version = "1.0.0" +edition = "2021" + + +[dependencies] +zksync_object_store = { path = "../../core/lib/object_store", version = "1.0" } +zksync_types = { path = "../../core/lib/types", version = "1.0" } + +circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", features = ["log_tracing"]} + +serde = { version = "1.0", features = ["derive"] } diff --git a/prover/prover_fri_types/README.md b/prover/prover_fri_types/README.md new file mode 100644 index 000000000000..7485656110e7 --- /dev/null +++ b/prover/prover_fri_types/README.md @@ -0,0 +1,8 @@ +# FRI Prover types + +Lib contains types used by FRI prover and shared among + +- FRI prover +- witness generator +- vk and setup data generator +- witness vector generator diff --git a/prover/prover_fri_types/src/lib.rs b/prover/prover_fri_types/src/lib.rs new file mode 100644 index 000000000000..5ca1735f5d19 --- /dev/null +++ b/prover/prover_fri_types/src/lib.rs @@ -0,0 +1,115 @@ +pub use circuit_definitions; + +use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use circuit_definitions::boojum::cs::implementations::witness::WitnessVec; +use circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerProof; +use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerProof; +use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; +use circuit_definitions::ZkSyncDefaultRoundFunction; + +use zksync_object_store::serialize_using_bincode; +use zksync_object_store::Bucket; +use zksync_object_store::FriCircuitKey; +use zksync_object_store::StoredObject; +use zksync_types::proofs::AggregationRound; +use zksync_types::L1BatchNumber; + +#[derive(serde::Serialize, serde::Deserialize, Clone)] +pub enum CircuitWrapper { + Base( + ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, + >, + ), + Recursive(ZkSyncRecursiveLayerCircuit), +} + +impl StoredObject for CircuitWrapper { + const BUCKET: Bucket = Bucket::ProverJobsFri; + type Key<'a> = FriCircuitKey; + + fn encode_key(key: Self::Key<'_>) -> String { + let FriCircuitKey { + block_number, + sequence_number, + circuit_id, + aggregation_round, + depth, + } = key; + format!("{block_number}_{sequence_number}_{circuit_id}_{aggregation_round:?}_{depth}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(serde::Serialize, serde::Deserialize)] +pub enum FriProofWrapper { + Base(ZkSyncBaseLayerProof), + Recursive(ZkSyncRecursionLayerProof), +} + +impl StoredObject for FriProofWrapper { + const BUCKET: Bucket = Bucket::ProofsFri; + type Key<'a> = u32; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("proof_{key}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(Clone, serde::Serialize, serde::Deserialize)] +pub struct WitnessVectorArtifacts { + pub witness_vector: WitnessVec, + pub prover_job: ProverJob, +} + +impl WitnessVectorArtifacts { + pub fn new(witness_vector: WitnessVec, prover_job: ProverJob) -> Self { + Self { + witness_vector, + prover_job, + } + } +} + +#[derive(Clone, serde::Serialize, serde::Deserialize)] +pub struct ProverJob { + pub block_number: L1BatchNumber, + pub job_id: u32, + pub circuit_wrapper: CircuitWrapper, + pub setup_data_key: ProverServiceDataKey, +} + +impl ProverJob { + pub fn new( + block_number: L1BatchNumber, + job_id: u32, + circuit_wrapper: CircuitWrapper, + setup_data_key: ProverServiceDataKey, + ) -> Self { + Self { + block_number, + job_id, + circuit_wrapper, + setup_data_key, + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] +pub struct ProverServiceDataKey { + pub circuit_id: u8, + pub round: AggregationRound, +} + +impl ProverServiceDataKey { + pub fn new(circuit_id: u8, round: AggregationRound) -> Self { + Self { circuit_id, round } + } +} diff --git a/prover/prover_fri_utils/Cargo.toml b/prover/prover_fri_utils/Cargo.toml new file mode 100644 index 000000000000..b44612f3a554 --- /dev/null +++ b/prover/prover_fri_utils/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "zksync_prover_fri_utils" +version = "1.0.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +zksync_object_store = { path = "../../core/lib/object_store", version = "1.0" } +zksync_config = { path = "../../core/lib/config", version = "1.0" } +zksync_types = { path = "../../core/lib/types", version = "1.0" } +zksync_prover_fri_types = { path = "../prover_fri_types", version = "1.0" } +zksync_dal = { path = "../../core/lib/dal", version = "1.0" } +vlog = { path = "../../core/lib/vlog", version = "1.0" } + +serde = { version = "1.0", features = ["derive"] } +metrics = "0.20.0" diff --git a/prover/prover_fri_utils/src/lib.rs b/prover/prover_fri_utils/src/lib.rs new file mode 100644 index 000000000000..8f962a28445c --- /dev/null +++ b/prover/prover_fri_utils/src/lib.rs @@ -0,0 +1,79 @@ +pub mod socket_utils; + +use std::time::Instant; + +use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; +use zksync_dal::fri_prover_dal::FriProverDal; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; + +use zksync_object_store::{FriCircuitKey, ObjectStore}; +use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; + +pub async fn fetch_next_circuit( + fri_prover_dal: &mut FriProverDal<'_, '_>, + blob_store: &dyn ObjectStore, + circuit_ids_for_round_to_be_proven: &Vec, +) -> Option { + let prover_job = match &circuit_ids_for_round_to_be_proven.is_empty() { + false => { + // Specialized prover: proving subset of configured circuits. + fri_prover_dal + .get_next_job_for_circuit_id_round(&circuit_ids_for_round_to_be_proven) + .await + } + true => { + // Generalized prover: proving all circuits. + fri_prover_dal.get_next_job().await + } + }?; + vlog::info!("Started processing prover job: {:?}", prover_job); + + let circuit_key = FriCircuitKey { + block_number: prover_job.block_number, + sequence_number: prover_job.sequence_number, + circuit_id: prover_job.circuit_id, + aggregation_round: prover_job.aggregation_round, + depth: prover_job.depth, + }; + let started_at = Instant::now(); + let input = blob_store + .get(circuit_key) + .await + .unwrap_or_else(|err| panic!("{err:?}")); + metrics::histogram!( + "prover_fri.prover.blob_fetch_time", + started_at.elapsed(), + "circuit_type" => prover_job.circuit_id.to_string(), + "aggregation_round" => format!("{:?}", prover_job.aggregation_round), + ); + let setup_data_key = ProverServiceDataKey { + circuit_id: prover_job.circuit_id, + round: prover_job.aggregation_round, + }; + Some(ProverJob::new( + prover_job.block_number, + prover_job.id, + input, + setup_data_key, + )) +} + +pub fn get_recursive_layer_circuit_id_for_base_layer(base_layer_circuit_id: u8) -> u8 { + let recursive_circuit_type = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(base_layer_circuit_id), + ); + recursive_circuit_type as u8 +} + +pub fn get_base_layer_circuit_id_for_recursive_layer(recursive_layer_circuit_id: u8) -> u8 { + recursive_layer_circuit_id - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8 +} + +pub fn get_numeric_circuit_id(circuit_wrapper: &CircuitWrapper) -> u8 { + match circuit_wrapper { + CircuitWrapper::Base(circuit) => circuit.numeric_circuit_type(), + CircuitWrapper::Recursive(circuit) => circuit.numeric_circuit_type(), + } +} diff --git a/prover/prover_fri_utils/src/socket_utils.rs b/prover/prover_fri_utils/src/socket_utils.rs new file mode 100644 index 000000000000..4637da310812 --- /dev/null +++ b/prover/prover_fri_utils/src/socket_utils.rs @@ -0,0 +1,64 @@ +use std::io::copy; +use std::io::ErrorKind; +use std::io::Read; +use std::net::SocketAddr; +use std::net::TcpStream; +use std::time::{Duration, Instant}; +use zksync_types::proofs::SocketAddress; + +pub fn send_assembly( + job_id: u32, + serialized: &mut Vec, + address: &SocketAddress, +) -> Result<(Duration, u64), String> { + vlog::trace!( + "Sending assembly to {}:{}, job id {{{job_id}}}", + address.host, + address.port + ); + + let socket_address = SocketAddr::new(address.host, address.port); + let started_at = Instant::now(); + let mut error_messages = vec![]; + + for _ in 0..10 { + match TcpStream::connect(socket_address) { + Ok(mut stream) => { + return send(&mut serialized.as_slice(), &mut stream) + .map(|result| (started_at.elapsed(), result)) + .map_err(|err| format!("Could not send assembly to prover: {err:?}")); + } + Err(err) => { + error_messages.push(format!("{err:?}")); + } + } + } + + Err(format!( + "Could not establish connection with prover after several attempts: {error_messages:?}" + )) +} + +fn send(read: &mut impl Read, tcp: &mut TcpStream) -> std::io::Result { + let mut attempts = 10; + let mut last_result = Ok(0); + + while attempts > 0 { + match copy(read, tcp) { + Ok(copied) => return Ok(copied), + Err(err) if can_be_retried(err.kind()) => { + attempts -= 1; + last_result = Err(err); + } + Err(err) => return Err(err), + } + + std::thread::sleep(Duration::from_millis(50)); + } + + last_result +} + +fn can_be_retried(err: ErrorKind) -> bool { + matches!(err, ErrorKind::TimedOut | ErrorKind::ConnectionRefused) +} diff --git a/prover/rust-toolchain b/prover/rust-toolchain new file mode 100644 index 000000000000..b20e56df4487 --- /dev/null +++ b/prover/rust-toolchain @@ -0,0 +1 @@ +nightly-2023-05-31 diff --git a/core/bin/setup_key_generator_and_server/Cargo.toml b/prover/setup_key_generator_and_server/Cargo.toml similarity index 84% rename from core/bin/setup_key_generator_and_server/Cargo.toml rename to prover/setup_key_generator_and_server/Cargo.toml index 87ce636f8791..f58895693555 100644 --- a/core/bin/setup_key_generator_and_server/Cargo.toml +++ b/prover/setup_key_generator_and_server/Cargo.toml @@ -18,9 +18,9 @@ name = "zksync_setup_key_generator" path = "src/main.rs" [dependencies] -zksync_types = {path = "../../lib/types", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } +zksync_types = {path = "../../core/lib/types", version = "1.0" } +vlog = { path = "../../core/lib/vlog", version = "1.0" } +zksync_config = { path = "../../core/lib/config", version = "1.0" } circuit_testing = {git = "https://github.com/matter-labs/era-circuit_testing.git", branch = "main"} api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.3", features=["gpu"], default-features=false} diff --git a/core/bin/setup_key_generator_and_server/data/.gitkeep b/prover/setup_key_generator_and_server/data/.gitkeep similarity index 100% rename from core/bin/setup_key_generator_and_server/data/.gitkeep rename to prover/setup_key_generator_and_server/data/.gitkeep diff --git a/core/bin/setup_key_generator_and_server/src/lib.rs b/prover/setup_key_generator_and_server/src/lib.rs similarity index 100% rename from core/bin/setup_key_generator_and_server/src/lib.rs rename to prover/setup_key_generator_and_server/src/lib.rs diff --git a/core/bin/setup_key_generator_and_server/src/main.rs b/prover/setup_key_generator_and_server/src/main.rs similarity index 100% rename from core/bin/setup_key_generator_and_server/src/main.rs rename to prover/setup_key_generator_and_server/src/main.rs diff --git a/core/bin/vk_setup_data_generator_server_fri/Cargo.toml b/prover/vk_setup_data_generator_server_fri/Cargo.toml similarity index 54% rename from core/bin/vk_setup_data_generator_server_fri/Cargo.toml rename to prover/vk_setup_data_generator_server_fri/Cargo.toml index 5e0e92abccda..7eb59dff834a 100644 --- a/core/bin/vk_setup_data_generator_server_fri/Cargo.toml +++ b/prover/vk_setup_data_generator_server_fri/Cargo.toml @@ -16,13 +16,20 @@ path = "src/setup_data_generator.rs" name = "zksync_vk_setup_data_server_fri" path = "src/lib.rs" +[[bin]] +name = "zksync_commitment_generator_fri" +path = "src/commitment_generator.rs" + [dependencies] -vlog = { path = "../../lib/vlog", version = "1.0" } -zksync_types = {path = "../../lib/types", version = "1.0" } +vlog = { path = "../../core/lib/vlog", version = "1.0" } +zksync_types = {path = "../../core/lib/types", version = "1.0" } +zksync_prover_utils = {path = "../../core/lib/prover_utils", version = "1.0" } +zksync_prover_fri_types = { path = "../prover_fri_types", version = "1.0" } zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0"} circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", features = ["log_tracing"]} -zksync_config = { path = "../../lib/config", version = "1.0" } +shivini = {git = "https://github.com/matter-labs/shivini.git", branch ="main", optional = true } +zksync_config = { path = "../../core/lib/config", version = "1.0" } serde_json = "1.0" serde = { version = "1.0", features = ["derive"] } @@ -30,3 +37,10 @@ serde_derive = "1.0" itertools = "0.10.5" bincode = "1" structopt = "0.3.26" + +[dev-dependencies] +proptest = "1.2.0" + +[features] +default = [] +gpu = ["shivini"] diff --git a/prover/vk_setup_data_generator_server_fri/README.md b/prover/vk_setup_data_generator_server_fri/README.md new file mode 100644 index 000000000000..2e30263d954b --- /dev/null +++ b/prover/vk_setup_data_generator_server_fri/README.md @@ -0,0 +1,17 @@ +# Setup data and VK generator and server + +## generating setup-data for specific circuit type + +`zk f cargo +nightly-2023-05-31 run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` + +## generating GPU setup-data for specific circuit type + +`zk f cargo +nightly-2023-05-31 run --features "gpu" --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` + +## Generating VK's + +`cargo +nightly-2023-05-31 run --release --bin zksync_vk_generator_fri` + +## generating VK commitment for existing VK's + +`cargo +nightly-2023-05-31 run --release --bin zksync_commitment_generator_fri` diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin new file mode 100644 index 000000000000..473d8afbb503 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin new file mode 100644 index 000000000000..b0212ea3a9a6 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin new file mode 100644 index 000000000000..de5e238dd912 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin new file mode 100644 index 000000000000..de5e238dd912 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin new file mode 100644 index 000000000000..4e84fea3f9ab Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin new file mode 100644 index 000000000000..844f50aa0c62 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin new file mode 100644 index 000000000000..f0b996996a44 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin new file mode 100644 index 000000000000..867b4ce0c270 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin new file mode 100644 index 000000000000..efed390e9107 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin new file mode 100644 index 000000000000..b8dbb8c2bda0 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin new file mode 100644 index 000000000000..35d82d21ed62 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin new file mode 100644 index 000000000000..64ca51d1b70d Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin new file mode 100644 index 000000000000..047cadfec080 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin new file mode 100644 index 000000000000..40624612c238 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin new file mode 100644 index 000000000000..048e794e82d1 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin new file mode 100644 index 000000000000..0cbd6e670c15 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin new file mode 100644 index 000000000000..c674dafa7243 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin new file mode 100644 index 000000000000..c674dafa7243 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin new file mode 100644 index 000000000000..3e449cec7cf0 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin new file mode 100644 index 000000000000..85792902bf69 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin new file mode 100644 index 000000000000..391b5cfb33cc Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin new file mode 100644 index 000000000000..c72dee2934ee Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin new file mode 100644 index 000000000000..1c7b70f8aac1 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin new file mode 100644 index 000000000000..dca15e362ce3 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin new file mode 100644 index 000000000000..a40c24dd80b0 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin new file mode 100644 index 000000000000..082fcccf6d4c Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin new file mode 100644 index 000000000000..81d6735e9789 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin differ diff --git a/prover/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin new file mode 100644 index 000000000000..a8f3489ae673 Binary files /dev/null and b/prover/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin differ diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json similarity index 73% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json index 62d4b5159cfd..8b980dde0932 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json @@ -157,100 +157,100 @@ }, "setup_merkle_tree_cap": [ [ - 49678851679356961, - 7782973250089517567, - 5742332748381659122, - 7021959466433668811 + 14828808970297586390, + 10812916566872682190, + 4814232139347019203, + 9176100102005882856 ], [ - 7173510129360491239, - 14879623732006243967, - 17462481942835663634, - 10566534887019251489 + 3967670424646093724, + 15187149346332822036, + 12510674113975494748, + 4510474866283045065 ], [ - 6111404110591510291, - 16968701421170349713, - 3258322406402816010, - 4065733755715147020 + 3090046568261325916, + 2517005306301042120, + 15367389528664824672, + 4112249640174889690 ], [ - 11846482936856554144, - 2147966313304461627, - 728534638266355796, - 95380962898885359 + 18105273888459951541, + 5232822348364097609, + 16713617721022374900, + 190722882016699057 ], [ - 16229166733732585403, - 681879112503008152, - 15066140174745708871, - 2481070927240530192 + 3680596367242563456, + 8277283738818682164, + 770687293026604966, + 964680746586707996 ], [ - 7420166149307347478, - 16882412417463011370, - 2676278458519593834, - 11896039619588737011 + 14252438150460413337, + 10138568641496080571, + 10299531489109681582, + 1029545029340913858 ], [ - 9967046125648558907, - 913185819453214883, - 13915100312430656654, - 3451781706223208121 + 15064118887360123896, + 5094380307043679103, + 14910118547805564561, + 10715877189078928458 ], [ - 11970577891442698507, - 6873264544724217019, - 12143450276430417018, - 10967230584499216609 + 15803708295742972434, + 11361281300374199895, + 17281542834964672336, + 4609037794875108477 ], [ - 9122945027646360633, - 6776986892003671741, - 16638557427754611081, - 9270157208179163878 + 17069406781160283989, + 1486103635977441667, + 5599688364977636665, + 2606216552412168601 ], [ - 864383518976280584, - 12660243649720946801, - 9037458929254917711, - 14557825576434269273 + 11440625988157319556, + 14165489000241104461, + 12815938030387403166, + 18358353209834817866 ], [ - 3250682114219598633, - 6757359290236006418, - 18440828022928773886, - 5144935506772537543 + 17484081080457701823, + 8488503007959107424, + 15436257938093142847, + 4434713360392963026 ], [ - 2931846103546480575, - 16357131371317801624, - 11786368493872353262, - 16190743419947458980 + 11228941610173378380, + 15586341149405816978, + 6641174723323244420, + 6502235669428985157 ], [ - 13988603584113133261, - 17909593593928730530, - 8426307689101617932, - 17024276855000805045 + 1780813236656786088, + 13705357356856822817, + 13823081051755218384, + 2628439960173921306 ], [ - 5063177493048024705, - 14492565639354808569, - 8414969388915699488, - 8783210495893660069 + 5781733601274220376, + 4396700195519547383, + 4802209023715066280, + 7053779784999063193 ], [ - 3181994260382982581, - 7353211204501032194, - 2789564250321287823, - 14671305422643991999 + 11266624277386388719, + 8947017045799184361, + 15630186476936326904, + 4970655490195943663 ], [ - 15178790861258436355, - 13029080993287351770, - 11765890450974641664, - 17689888130365913676 + 13604491581251560181, + 754251763827647964, + 85019175871498033, + 16264768579713941582 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json similarity index 73% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json index 7d1d36cc5d63..bd030f4a3942 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json @@ -157,100 +157,100 @@ }, "setup_merkle_tree_cap": [ [ - 12441068674128723436, - 11790418381101103423, - 6129861349451795186, - 4710685951407331081 + 769023300008107994, + 15035929950782434877, + 4767632476551610273, + 6003255068455690448 ], [ - 5096638018234605034, - 9297760046210706272, - 11038604827668536380, - 1473851171818850283 + 9026207300516234226, + 12638320530307573615, + 9548682324993884667, + 1669798832247316225 ], [ - 10176456143476370592, - 6347222948505884286, - 14243463427362932109, - 2973760622304770278 + 15771054686436993477, + 14867268062778845716, + 2755571000455103667, + 17012958053718147653 ], [ - 5822232151084468810, - 18342623242512353783, - 9991236151502413973, - 15507511860550425736 + 4796035851498319953, + 15578398003463831188, + 16487202842559859878, + 12241980977059723891 ], [ - 1818744887123817977, - 12543213684249900332, - 17104140515825257324, - 11236907728434185585 + 4905638728558119005, + 7863530509045382726, + 17089556944619165055, + 6881822366736890373 ], [ - 8045609137449889534, - 16202711686628756319, - 8684382310386899038, - 11976015809126046559 + 5605698221760842562, + 15309408538060590842, + 7774687445824112675, + 1029523717262265273 ], [ - 10733288510012516866, - 2076154788656020771, - 18044559881610029743, - 9096073016449196929 + 4602145677202369894, + 10437641120626639391, + 16191157018649573359, + 2145181286557866215 ], [ - 7905144671615160540, - 10783635808086751649, - 5523411776063518007, - 13403645893185907834 + 8347044010387916224, + 7660057627892565262, + 4087655568250966187, + 4920987872151258558 ], [ - 17764077820496881012, - 3635568206179979668, - 6426048422505024806, - 4265189848292747243 + 4652946618899021165, + 10106017231231912813, + 3800120974014235756, + 6675575778477161887 ], [ - 16526539683536908140, - 12588861570327833011, - 3214353082854373768, - 2777956265788849348 + 4980892440155162443, + 6801648544364465294, + 2944365492323162449, + 6942743875951446975 ], [ - 3217854840859818387, - 943498317256413059, - 1345084765986076822, - 15254121967033229193 + 17666291786065358473, + 11132525791177279380, + 1090211641846788491, + 18206157565187626653 ], [ - 12572651518442615530, - 3025471270830286975, - 1047774746509518234, - 1321385795793706983 + 11955322036584323772, + 9745237745974724322, + 7620783083675382303, + 6501674220304463161 ], [ - 16656078307211525871, - 5888489465261654229, - 3215923756560092884, - 1003958137542788275 + 14154028621322325960, + 12267966522963634693, + 16381614744195346959, + 10938579521199157178 ], [ - 5480821888469691123, - 10857988702136328279, - 1137095326815922962, - 12357465209764215246 + 5661196656360295299, + 16217006627182303897, + 15559803411312667053, + 14580126280029049348 ], [ - 2579074180679487806, - 18319237296113906693, - 1102892466010219312, - 16458320716783577649 + 9186970898669061808, + 692683705561232556, + 14664202853793025315, + 7113265307923171991 ], [ - 92254087489383590, - 7332092919119409047, - 15748242357100618434, - 2667394706391511758 + 256017097329808658, + 1298676672131862834, + 9342013003187223457, + 172944159302847111 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json similarity index 73% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json index 7ee86f8b4c2f..15680c68c303 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json @@ -157,100 +157,100 @@ }, "setup_merkle_tree_cap": [ [ - 12441068674128723436, - 11790418381101103423, - 6129861349451795186, - 4710685951407331081 + 769023300008107994, + 15035929950782434877, + 4767632476551610273, + 6003255068455690448 ], [ - 5096638018234605034, - 9297760046210706272, - 11038604827668536380, - 1473851171818850283 + 9026207300516234226, + 12638320530307573615, + 9548682324993884667, + 1669798832247316225 ], [ - 10176456143476370592, - 6347222948505884286, - 14243463427362932109, - 2973760622304770278 + 15771054686436993477, + 14867268062778845716, + 2755571000455103667, + 17012958053718147653 ], [ - 5822232151084468810, - 18342623242512353783, - 9991236151502413973, - 15507511860550425736 + 4796035851498319953, + 15578398003463831188, + 16487202842559859878, + 12241980977059723891 ], [ - 1818744887123817977, - 12543213684249900332, - 17104140515825257324, - 11236907728434185585 + 4905638728558119005, + 7863530509045382726, + 17089556944619165055, + 6881822366736890373 ], [ - 8045609137449889534, - 16202711686628756319, - 8684382310386899038, - 11976015809126046559 + 5605698221760842562, + 15309408538060590842, + 7774687445824112675, + 1029523717262265273 ], [ - 10733288510012516866, - 2076154788656020771, - 18044559881610029743, - 9096073016449196929 + 4602145677202369894, + 10437641120626639391, + 16191157018649573359, + 2145181286557866215 ], [ - 7905144671615160540, - 10783635808086751649, - 5523411776063518007, - 13403645893185907834 + 8347044010387916224, + 7660057627892565262, + 4087655568250966187, + 4920987872151258558 ], [ - 17764077820496881012, - 3635568206179979668, - 6426048422505024806, - 4265189848292747243 + 4652946618899021165, + 10106017231231912813, + 3800120974014235756, + 6675575778477161887 ], [ - 16526539683536908140, - 12588861570327833011, - 3214353082854373768, - 2777956265788849348 + 4980892440155162443, + 6801648544364465294, + 2944365492323162449, + 6942743875951446975 ], [ - 3217854840859818387, - 943498317256413059, - 1345084765986076822, - 15254121967033229193 + 17666291786065358473, + 11132525791177279380, + 1090211641846788491, + 18206157565187626653 ], [ - 12572651518442615530, - 3025471270830286975, - 1047774746509518234, - 1321385795793706983 + 11955322036584323772, + 9745237745974724322, + 7620783083675382303, + 6501674220304463161 ], [ - 16656078307211525871, - 5888489465261654229, - 3215923756560092884, - 1003958137542788275 + 14154028621322325960, + 12267966522963634693, + 16381614744195346959, + 10938579521199157178 ], [ - 5480821888469691123, - 10857988702136328279, - 1137095326815922962, - 12357465209764215246 + 5661196656360295299, + 16217006627182303897, + 15559803411312667053, + 14580126280029049348 ], [ - 2579074180679487806, - 18319237296113906693, - 1102892466010219312, - 16458320716783577649 + 9186970898669061808, + 692683705561232556, + 14664202853793025315, + 7113265307923171991 ], [ - 92254087489383590, - 7332092919119409047, - 15748242357100618434, - 2667394706391511758 + 256017097329808658, + 1298676672131862834, + 9342013003187223457, + 172944159302847111 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json similarity index 76% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json index 0d2da6f32cb7..54cda61bfca1 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json @@ -19,19 +19,19 @@ "public_inputs_locations": [ [ 0, - 1041222 + 1041180 ], [ 1, - 1041222 + 1041180 ], [ 2, - 1041222 + 1041180 ], [ 3, - 1041222 + 1041180 ] ], "extra_constant_polys_for_selectors": 3, @@ -183,100 +183,100 @@ }, "setup_merkle_tree_cap": [ [ - 3951964484115607655, - 16829660660304901588, - 14211539760499261401, - 2472527790054023464 + 11230239963091452912, + 13026461504664266360, + 16713169701215574185, + 12615976815403328075 ], [ - 10602340213073929688, - 15269906034841934319, - 1999722205715604105, - 11671865217787642490 + 3804170955286114995, + 17070428581913287652, + 16444458341617335831, + 12310477573463289452 ], [ - 16987932594962878624, - 3285981047520267163, - 4222968438268808943, - 2575664426357529034 + 15717060580753384291, + 5009332941172699339, + 5141249138356870627, + 14525082357218066066 ], [ - 2918713921338525202, - 9601231618190155084, - 4500898479717158082, - 4495729168148349252 + 5451473672302709994, + 14405751484257675144, + 6034423888032264287, + 14528639950002943854 ], [ - 11155257299174723909, - 15132078140316221530, - 2851783895665737890, - 13286182309288546626 + 11233253504932768792, + 9984746197426258635, + 4438596486612781127, + 17198259095221505968 ], [ - 13156126079595134586, - 3129292503659656130, - 13013338048823100513, - 1046712685787755983 + 5681291117413714556, + 18436270377115663121, + 11989211218541826903, + 2671135999160204746 ], [ - 11100676529077504927, - 16645386371265872013, - 18436391699309598214, - 18316046327508256708 + 963730224051018518, + 1293300388921029500, + 7261069736084660486, + 12414181044622900231 ], [ - 7941963338454253492, - 13992861888733776514, - 286345419062083924, - 7799721824685750420 + 17155210460432560694, + 920670927493907875, + 6658462737460123613, + 8253351903179999964 ], [ - 10685589255052466830, - 14117977365602684661, - 9968846665194915012, - 2894934014962806033 + 3039615529982926935, + 12254392109531368227, + 12274357209453453775, + 16608606384477787215 ], [ - 14717979199076289807, - 11834986281925334505, - 9871430568597302240, - 6077792418098417085 + 11218496188813210888, + 16107046895420213310, + 16285761395335573298, + 8624190103510841482 ], [ - 13903843835636142856, - 13458907111742740634, - 3266506741999179349, - 6451325180775835720 + 14835727297511074005, + 1164596723783439781, + 11276497358832644724, + 9219531475080512501 ], [ - 1714065376768082879, - 267154899590020848, - 7203223479936126047, - 1425749651129432103 + 3715985935119482043, + 12185867206854340138, + 7900628271499451412, + 8891356055003024224 ], [ - 16369853181249865160, - 8023509533469270339, - 1080007288885554015, - 8528063843972547976 + 17763963322580587554, + 218146194744968367, + 16033549148238902530, + 1522529898878047239 ], [ - 13901281976847998439, - 13482886048424426728, - 4714698685260429252, - 8450188099405888087 + 8120794419871565322, + 18267867130143702317, + 17178857528695612575, + 14839022417830798252 ], [ - 17148225032190900757, - 16136144457754963192, - 9054464804385647991, - 7309929357910934174 + 16480189677896973754, + 18441483621256548692, + 3982214183107947832, + 5099760740801601882 ], [ - 15279738581003263809, - 3233028670055847880, - 9675016189592150706, - 882792948810468198 + 10335714458962187072, + 8498294096277334786, + 8574103413352512596, + 9714850528124914412 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json similarity index 70% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json index f7ac8502f3ad..4537187b9b36 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json @@ -144,100 +144,100 @@ }, "setup_merkle_tree_cap": [ [ - 9025410449149543081, - 2150978872708352380, - 11929541854331362678, - 10889802961463093007 + 5926546619152935907, + 11291861669768573654, + 11100100891141895430, + 1040099038134319144 ], [ - 14367653170042876779, - 15711919146034996716, - 10192530351300036212, - 17779266089181903535 + 9405378490457870663, + 11971348617109093172, + 7779954465112100917, + 8521139113892942903 ], [ - 16518023242439166042, - 16125234777789979115, - 3731170852541886643, - 6478513303454611533 + 1041442145290466080, + 2626937507866398782, + 4297959424787982903, + 7963254695121664304 ], [ - 18022090193594520460, - 9824039918773778848, - 8099323107578446833, - 14138481655991300874 + 8679424872010178168, + 928230210029079843, + 17862919271344969949, + 9085342720844642067 ], [ - 9646131916658144639, - 14765462438355160604, - 12353948730635165989, - 17374238707731963259 + 2346566700143956389, + 751827788815495159, + 18018129704559687246, + 6344673729449349673 ], [ - 6466098066822358798, - 3802784570329552578, - 11192384635627240892, - 16889566382350703339 + 12798999539756004171, + 2962217720855368908, + 17815764746262544024, + 6141433679632029898 ], [ - 13295229914781218631, - 11477715700480687057, - 1029809241419010036, - 17026448985101402834 + 10612436896218340091, + 5382517797965219051, + 1440771605952502920, + 6120504474919675320 ], [ - 2928603244677043291, - 2590454321011930112, - 16594893027153225789, - 17268049387874967289 + 5639210895028949894, + 17579589483393163114, + 8531068549022389838, + 9055992165271810945 ], [ - 5231897347421383206, - 3542534855630287592, - 15172142009555909931, - 1424027296261247931 + 15625252378325581383, + 11791782086341113568, + 1976318982912441593, + 16561636205817299485 ], [ - 6943787726298694042, - 15335886870449394305, - 14785428951904960648, - 11215936320351406370 + 9291503982934971506, + 5967409911022700010, + 9096839168538146295, + 3004596177933970509 ], [ - 11447524278789270182, - 14266446056724893962, - 10914488308431466718, - 7364502792097837348 + 9243725287341188464, + 6878316427230924845, + 7270708110528992687, + 15417458474646493002 ], [ - 11359545309848431234, - 4980893295986349022, - 11473702556031439650, - 17861564638231497628 + 15577762808206668193, + 10775213926343901301, + 4900917235853777300, + 8940673145641313937 ], [ - 17663843964156179007, - 14833488899297277996, - 5714793925932097698, - 6902306052141283285 + 18157038451252266825, + 13776543473230491269, + 17449669960102455201, + 1902286122568749061 ], [ - 10270525424019036326, - 1923061535861034720, - 16424397298429761441, - 7171630776964282144 + 10247491007925641249, + 5411016508841956578, + 11766519965796614613, + 1073824923129670847 ], [ - 10524076026990794794, - 15223680225877637426, - 396032395140092130, - 7923171480236200520 + 10691592838471536401, + 16863854034452440410, + 16989985027265774429, + 10784858673090746367 ], [ - 6233273405562217643, - 4452358004773676392, - 6128591467452883036, - 3468440652866645203 + 5688638173552292266, + 2543022480770607266, + 1257951713416281965, + 6435312724052439304 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json similarity index 70% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json index e46c80f82e2f..48533211ab0c 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json @@ -144,100 +144,100 @@ }, "setup_merkle_tree_cap": [ [ - 8587183264607820083, - 7501401150925504063, - 602581967238805636, - 11668946895593393939 + 7131095197302553268, + 8153577698865827964, + 4503618255465567901, + 14277683095622422701 ], [ - 2157265850227756788, - 11032761412102625645, - 3754093130785871858, - 18006602563614829680 + 17227565432950328258, + 3812763700819798255, + 11013754802988076876, + 6868705666417715458 ], [ - 10655937569249931504, - 126923738105413051, - 7841305508214111486, - 8495811533395543706 + 6323710131523972374, + 2634684603167736396, + 18066615186100844391, + 12905391948029250777 ], [ - 4193341982694972421, - 11921991451443354914, - 6997364257885731873, - 17667686448643761264 + 3492005229273374536, + 6055742838160324534, + 1795486371688618704, + 13052026771106363342 ], [ - 9540865900297042782, - 6139586301331019438, - 6145465934548908233, - 7114335385952641784 + 10281812076992462791, + 7165153365649379245, + 13022274058059396511, + 13989909544832134378 ], [ - 15094840473303346456, - 7747511060131015936, - 12772480149916714485, - 18349138296645060984 + 12027415465257630320, + 3276226892300848010, + 8686471638009106913, + 14892455799109213586 ], [ - 12531028814399847357, - 7203517905344132405, - 3061962363713004033, - 17452361121681943835 + 2589896055307349461, + 2860115159278340436, + 16194627146103061118, + 7076143423549975584 ], [ - 3835698399746542066, - 8245837273981884818, - 11550626417605245298, - 10420313830541187114 + 13667404340259521763, + 6297649363425018745, + 16167424072873520384, + 3830963799067739016 ], [ - 12555999497449784434, - 1714733525282428144, - 17219304496440144341, - 1381616758422006774 + 16665883187665722508, + 314738727176100190, + 4253386482569091860, + 1926299543937236525 ], [ - 9988238398674725191, - 10677391130703045133, - 11464212366701391798, - 273721172960421145 + 7820355437968047327, + 6794680285466534678, + 2978730525228113593, + 3621380956903574094 ], [ - 16336643358125081536, - 9704885119523966469, - 12504901168607854020, - 1365225079498514628 + 4838056840641790939, + 16842388520310131551, + 11612178730147038952, + 2346195292789238934 ], [ - 5689583869880509287, - 884372117462576406, - 12127613936064786875, - 11036164135756420898 + 17810776396991797874, + 12063662987004325494, + 17932676844730723250, + 14283996529720835225 ], [ - 3756438513920537389, - 16008730255465618263, - 5676153503855975547, - 8859399175348528504 + 4982429352434514173, + 14856186579270143608, + 4051922516960412257, + 8367898317160268319 ], [ - 13012737189692792855, - 1923486022173657097, - 13451763503173292053, - 18260610382109438664 + 14584337208407353036, + 15866593405986269360, + 11704298830630250400, + 14576621862375131798 ], [ - 7568548647776403884, - 15888201829935950536, - 14213035939781028448, - 557143869736885619 + 3101118738129024336, + 4028118980088627608, + 9223187088487468736, + 3845581921289713376 ], [ - 32671974711448889, - 2434480108628229517, - 1058613992685145857, - 12709975455363119775 + 1013819453591993424, + 13784105701097110976, + 9114286772222497781, + 10710488663310041007 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json similarity index 73% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json index 59040a91d3e6..2823ffec627f 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json @@ -157,100 +157,100 @@ }, "setup_merkle_tree_cap": [ [ - 18353038824771834181, - 6180039936290620596, - 9566046629535886545, - 1194749282273701730 + 15152415627491818135, + 7374237605577523933, + 4465184016028924212, + 2035234088595773309 ], [ - 25931497575710606, - 4939580180726571178, - 9971692296422121050, - 10321682835888101865 + 5061557444963845839, + 12386849184623218277, + 13052284409578998174, + 2142376146067763313 ], [ - 14476617363912627618, - 18433740955496184028, - 1866402502799305529, - 13630516779228168836 + 14276061640225729061, + 3667833983304997823, + 15567500985315483409, + 10110252348727918570 ], [ - 5297717495027830927, - 10843206047833997321, - 12103660641452100213, - 962276586389403171 + 12534291856385391774, + 4931691717277748218, + 10385184271370113628, + 10333556044049798387 ], [ - 18191621713764018306, - 18034247738724721784, - 17062774240988584015, - 4793091222754364718 + 3902530530481162544, + 11439177580828992768, + 2956353447880097447, + 2078779115341733037 ], [ - 11975292609810709594, - 3410068686607534834, - 7176581702346144340, - 15010624823757225375 + 14361126694241130224, + 4508476390465174580, + 14814630315120762635, + 17198376643024594512 ], [ - 4386781545910081212, - 12096044536926128763, - 5099067130359909936, - 4702254698651040446 + 15399867818681370342, + 17613339377988571860, + 17463021656624906492, + 18043402438787219822 ], [ - 4564824446576585585, - 10282250482097501467, - 3576706676505948505, - 8070857080847133156 + 4721341021989730415, + 941889554702533152, + 4492052956383425703, + 11785343591616806540 ], [ - 4021025381485058227, - 15830498207667536258, - 11765654863279694638, - 8603645468978049764 + 14835583452718692456, + 9747287794601877160, + 13285319018669943605, + 15566660322778346733 ], [ - 12131291478449851192, - 884696930963928594, - 5603953053234603685, - 15160539006841845005 + 634069327924902339, + 7509671875950276664, + 17149763085975395897, + 17558106862399785122 ], [ - 137229235654780143, - 1982984178531442102, - 8969623252708511178, - 2605510294647382796 + 6504570481933973182, + 9863954755773054818, + 4192802816900646319, + 11708054020605968244 ], [ - 117138863215117992, - 11155889783333849588, - 11385131969922449424, - 2463919755275823513 + 5368022000476684675, + 11854447477637281190, + 773008757856055958, + 7428874382179860306 ], [ - 3448822069412323905, - 6853611764127119403, - 1354074653325845412, - 10520097888465643033 + 820566450151427404, + 14487105988932071384, + 5168970873173217247, + 16840718205559266321 ], [ - 12126792712339142861, - 3208769323001970463, - 8541345094141085129, - 5739333931443919780 + 15018168499898445860, + 15893129254829262789, + 1267456796490088156, + 14049704864991807107 ], [ - 6375726425445642922, - 15388895865216583137, - 7439749375147960286, - 16154657507801467365 + 3678472314386256573, + 4482269767107891177, + 2891258367538769098, + 10249141358181035242 ], [ - 5187536080858526742, - 8938762330808016184, - 441459701363466307, - 11617235719453000530 + 1175499750244297798, + 7441679809319866074, + 15706614384330332074, + 12399917843582101807 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_1_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_1_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_1_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_1_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_2_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_2_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_2_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_2_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json similarity index 74% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json index 1242852599aa..d117b3b0ade3 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json @@ -162,100 +162,100 @@ }, "setup_merkle_tree_cap": [ [ - 2430557199151730472, - 13851025833293931453, - 17414806893516082410, - 5061662678932524341 + 2692117301910616223, + 11862373594405581684, + 12092750955553531256, + 5108545912057877223 ], [ - 14805681278570572304, - 4387660327327875475, - 8111350906698296387, - 14076974072563325480 + 13191057082261271313, + 13461898869001565340, + 3144977546711926190, + 12887787173850649640 ], [ - 10148467155144800386, - 10828200166982881680, - 14976161870128776785, - 455207310829672888 + 8723842528870621436, + 9645915571392535116, + 6025366220407814286, + 4747467989456698429 ], [ - 1258747241285388938, - 14533915542892847175, - 11947483492490443559, - 18136160672641752159 + 7405718287975487752, + 15471426320802247554, + 8018397484818843188, + 1076292840128333912 ], [ - 18399548687624620221, - 1188113040663592329, - 14887047843403680931, - 14373371518617662284 + 15567092204640112640, + 13512378722158897717, + 16839350703341635379, + 6580006701757635256 ], [ - 8697036667032805923, - 6757156065833582242, - 6438944907880440651, - 4699569537038573259 + 478392572686859273, + 1879898158113218624, + 7956515309033309445, + 15667184770862761054 ], [ - 17755443425518132182, - 6748052206085081881, - 12550413684321582429, - 13208184919188659814 + 4738701842169478640, + 14432395387726327998, + 14827473518830139511, + 7026071202596813302 ], [ - 6748673664527653571, - 14319837795061250020, - 8674881656449995647, - 186839425215983320 + 1832914181581899534, + 12309614119776336180, + 1786307750405330285, + 9394109377731731297 ], [ - 4611201077078896801, - 12165300337241989192, - 6834829805650716536, - 7389817613944450096 + 11330017822804908986, + 17965075245400465236, + 178921019209832245, + 9010774195056378656 ], [ - 10116872626825123115, - 6146264092536253625, - 5929884222540147413, - 12657573273477702966 + 10066603459136751242, + 16922354046552351580, + 1488715132336554574, + 2488902959064634539 ], [ - 6925597909836314416, - 6304221625093437329, - 11202013801518338537, - 15296541511521458214 + 12764025501053651238, + 10583029583148326399, + 10919956138611547307, + 193732647159610859 ], [ - 14920901110496128138, - 13336137971580002245, - 2301350809681932102, - 10816850357256930117 + 10812330075474661907, + 11023893070918609227, + 14153054852108697346, + 3310659191720741717 ], [ - 3712128035625350334, - 4798834377815226954, - 9689670095699838466, - 13955528595570927929 + 12566885554555589997, + 5264949142237538963, + 10357889278039077105, + 1693879812388879198 ], [ - 480086305820392172, - 9166809339791846490, - 6250535256378342593, - 18200236880144340041 + 5143074524340781416, + 1340176837904332618, + 12593249647365922721, + 16619880365401544994 ], [ - 17764897482986219512, - 4500604943295237976, - 3430272853973605048, - 17227997223311301571 + 8116207797925146203, + 2436416957055038167, + 1598938366845903588, + 7153648406343743028 ], [ - 12693631692464428736, - 14060221248394451382, - 6315214478974430097, - 10534028391088917480 + 14400322751382246405, + 4576201222988375875, + 10482138496908129257, + 1696076921104575474 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_node_key.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_node_key.json diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json b/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json similarity index 75% rename from core/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json rename to prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json index 228a0e9fe926..f73530a2ca1a 100644 --- a/core/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json +++ b/prover/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json @@ -170,100 +170,100 @@ }, "setup_merkle_tree_cap": [ [ - 5188923951567823784, - 1839604164061896861, - 3760614143058722712, - 17614504340708244503 + 15230555879575926816, + 8670948681878777794, + 767116401361787080, + 13808751382541908272 ], [ - 7889899638667026800, - 14244435798393850379, - 15230145556400915502, - 12762495992738055897 + 1220486015450063297, + 9567900108378427313, + 18210974256257044632, + 18338726330920132716 ], [ - 1590798346328722678, - 14143092007536417439, - 10480901561038728792, - 3201431705395147463 + 7568154221767192295, + 11691578057855133612, + 9987210827513697170, + 17019942866370544662 ], [ - 2780378477031897976, - 11901528146276690135, - 1343277030558816196, - 6658753207411088573 + 14102673551475852761, + 3839757807646647049, + 8317169401280108378, + 14477318175428765566 ], [ - 11039463659901501365, - 8235548863391687887, - 1033553352576624721, - 12882010447949399432 + 10669246787368115713, + 11986124114638697341, + 373240888095551057, + 10600874540100090281 ], [ - 18078277235848158043, - 14794319235551634496, - 13982848369540832169, - 11146980369941489422 + 1967433817606548880, + 1252531621216687635, + 14092128528722989126, + 15316007954882781751 ], [ - 5423143341883663864, - 15258729611778297770, - 7733187200367671156, - 11434904591161598775 + 5731133612849813361, + 9439573956187051534, + 15220234372923263193, + 9871593147214385018 ], [ - 10914070908442174902, - 8055525792807466851, - 14391942428843610452, - 11749906933466154458 + 5432497552013782457, + 6217935098775351854, + 10788625265296640732, + 7626134139872594266 ], [ - 14580351359387308464, - 13254290427053014332, - 7257863927775762043, - 11078203905320069045 + 16209439837876908945, + 16958705495955599782, + 2620710932184338631, + 13207816187542048405 ], [ - 6123238811378029441, - 11756658038961859601, - 760000874907607862, - 678236515728235822 + 11540918781414391435, + 13215620469361541671, + 7261198944216226328, + 14101141177393020403 ], [ - 15657816790157674514, - 4104741954972330508, - 4150394799973679527, - 15124992265078810298 + 10951103916546600353, + 16291916249083597787, + 8020395928888095904, + 14831509381332343931 ], [ - 13825567788010925982, - 636544017935987097, - 2260460249587621344, - 10354042489703999934 + 14614496581821229034, + 570029684825245175, + 11368483572681932607, + 17857699424461379920 ], [ - 12710868603685796297, - 91862114057079406, - 5614554900380483346, - 131393259919990755 + 10549396205597068517, + 16251363364669954894, + 5619914240250798106, + 15384760685177493623 ], [ - 13185811107579017595, - 1006028503100864020, - 2087984259170414019, - 6445764843889735797 + 6443594760777705854, + 4350415958090847717, + 7924647710631862693, + 1595589969968983394 ], [ - 10414938568348349467, - 15415934042755645234, - 11692038010863343064, - 2402843492027871760 + 1575322136978699734, + 1714883637605030004, + 1403876268493429570, + 5816075577953274504 ], [ - 17752536940710015241, - 14329244239886245722, - 16349180633511906354, - 2663305413222761702 + 1910730620955478970, + 10199274156501303143, + 8240588740333284151, + 7977626984796160665 ] ] } diff --git a/core/bin/vk_setup_data_generator_server_fri/data/witness_artifacts.json b/prover/vk_setup_data_generator_server_fri/data/witness_artifacts.json similarity index 100% rename from core/bin/vk_setup_data_generator_server_fri/data/witness_artifacts.json rename to prover/vk_setup_data_generator_server_fri/data/witness_artifacts.json diff --git a/prover/vk_setup_data_generator_server_fri/src/commitment_generator.rs b/prover/vk_setup_data_generator_server_fri/src/commitment_generator.rs new file mode 100644 index 000000000000..71c595f47441 --- /dev/null +++ b/prover/vk_setup_data_generator_server_fri/src/commitment_generator.rs @@ -0,0 +1,60 @@ +use zkevm_test_harness::witness::recursive_aggregation::{ + compute_leaf_vks_and_params_commitment, compute_node_vk_commitment, +}; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; +use zksync_prover_utils::vk_commitment_helper::{ + get_toml_formatted_value, read_contract_toml, write_contract_toml, +}; +use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; +use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; + +fn main() { + vlog::info!("Starting commitment generation!"); + read_and_update_contract_toml(); +} + +fn read_and_update_contract_toml() { + let mut contract_doc = read_contract_toml(); + let (leaf_aggregation_commitment_hex, node_aggregation_commitment_hex) = generate_commitments(); + contract_doc["contracts"]["FRI_VK_COMMITMENT_LEAF"] = + get_toml_formatted_value(leaf_aggregation_commitment_hex); + contract_doc["contracts"]["FRI_VK_COMMITMENT_NODE"] = + get_toml_formatted_value(node_aggregation_commitment_hex); + vlog::info!("Updated toml content: {:?}", contract_doc.to_string()); + write_contract_toml(contract_doc); +} + +fn generate_commitments() -> (String, String) { + let leaf_vk_params = get_leaf_vk_params(); + let leaf_layer_params = leaf_vk_params + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + let leaf_vk_commitment = compute_leaf_vks_and_params_commitment(leaf_layer_params); + + let node_vk = get_recursive_layer_vk_for_circuit_type( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ); + let node_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + + let leaf_aggregation_commitment_hex = format!("{:?}", leaf_vk_commitment); + let node_aggregation_commitment_hex = format!("{:?}", node_vk_commitment); + vlog::info!("leaf aggregation commitment {:?}", node_vk_commitment); + vlog::info!("node aggregation commitment {:?}", node_vk_commitment); + ( + leaf_aggregation_commitment_hex, + node_aggregation_commitment_hex, + ) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_read_and_update_contract_toml() { + read_and_update_contract_toml(); + } +} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs b/prover/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs similarity index 98% rename from core/bin/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs rename to prover/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs index e2db59c79dfe..1ad3e873786c 100644 --- a/core/bin/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs +++ b/prover/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs @@ -1,13 +1,13 @@ -use circuit_definitions::circuit_definitions::base_layer::{ +use std::collections::HashMap; +use std::io::{Error, ErrorKind}; +use zkevm_test_harness::data_source::{BlockDataSource, SetupDataSource, SourceResult}; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ ZkSyncBaseLayerFinalizationHint, ZkSyncBaseLayerProof, ZkSyncBaseLayerVerificationKey, }; -use circuit_definitions::circuit_definitions::recursion_layer::{ +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ ZkSyncRecursionLayerFinalizationHint, ZkSyncRecursionLayerProof, ZkSyncRecursionLayerVerificationKey, }; -use std::collections::HashMap; -use std::io::{Error, ErrorKind}; -use zkevm_test_harness::data_source::{BlockDataSource, SetupDataSource, SourceResult}; pub struct InMemoryDataSource { ///data structures required for holding [`SetupDataSource`] result diff --git a/prover/vk_setup_data_generator_server_fri/src/lib.rs b/prover/vk_setup_data_generator_server_fri/src/lib.rs new file mode 100644 index 000000000000..8c3abe4a4272 --- /dev/null +++ b/prover/vk_setup_data_generator_server_fri/src/lib.rs @@ -0,0 +1,345 @@ +#![feature(generic_const_exprs)] +#![feature(allocator_api)] + +use std::fs; +use std::fs::File; +use std::io::Read; +use zksync_prover_fri_types::circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::round_function::AbsorptionModeOverwrite; +use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::sponge::GenericAlgebraicSponge; + +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::hints::{ + DenseVariablesCopyHint, DenseWitnessCopyHint, +}; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::polynomial_storage::{ + SetupBaseStorage, SetupStorage, +}; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::setup::FinalizationHintsForProver; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::verifier::VerificationKey; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::oracle::merkle_tree::MerkleTreeWithCap; +use zksync_prover_fri_types::circuit_definitions::boojum::cs::oracle::TreeHasher; +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use zksync_prover_fri_types::circuit_definitions::boojum::field::{PrimeField, SmallField}; + +use zksync_prover_fri_types::circuit_definitions::boojum::field::traits::field_like::PrimeFieldLikeVectorized; +use zksync_prover_fri_types::circuit_definitions::boojum::implementations::poseidon2::Poseidon2Goldilocks; +use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; + +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ + ZkSyncBaseLayerCircuit, ZkSyncBaseLayerVerificationKey, +}; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, +}; +use zksync_prover_fri_types::circuit_definitions::{ + ZkSyncDefaultRoundFunction, BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, +}; + +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use zkevm_test_harness::prover_utils::create_base_layer_setup_data; +use zksync_config::configs::FriProverConfig; +use zksync_types::proofs::AggregationRound; + +pub mod in_memory_setup_data_source; +pub mod utils; +use zksync_prover_fri_types::ProverServiceDataKey; +#[cfg(feature = "gpu")] +use {shivini::cs::GpuSetup, std::alloc::Global}; + +#[derive(Debug, Serialize, Deserialize)] +#[serde( + bound = "F: serde::Serialize + serde::de::DeserializeOwned, P: serde::Serialize + serde::de::DeserializeOwned" +)] +pub struct ProverSetupData< + F: PrimeField + SmallField, + P: PrimeFieldLikeVectorized, + H: TreeHasher, +> { + pub setup_base: SetupBaseStorage, + pub setup: SetupStorage, + #[serde(bound( + serialize = "H::Output: serde::Serialize", + deserialize = "H::Output: serde::de::DeserializeOwned" + ))] + pub vk: VerificationKey, + #[serde(bound( + serialize = "H::Output: serde::Serialize", + deserialize = "H::Output: serde::de::DeserializeOwned" + ))] + pub setup_tree: MerkleTreeWithCap, + pub vars_hint: DenseVariablesCopyHint, + pub wits_hint: DenseWitnessCopyHint, + pub finalization_hint: FinalizationHintsForProver, +} + +pub type GoldilocksProverSetupData = ProverSetupData< + GoldilocksField, + GoldilocksField, + GenericAlgebraicSponge< + GoldilocksField, + GoldilocksField, + 8, + 12, + 4, + Poseidon2Goldilocks, + AbsorptionModeOverwrite, + >, +>; + +#[cfg(feature = "gpu")] +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "F: serde::Serialize + serde::de::DeserializeOwned")] +pub struct GpuProverSetupData> { + pub setup: GpuSetup, + #[serde(bound( + serialize = "H::Output: serde::Serialize", + deserialize = "H::Output: serde::de::DeserializeOwned" + ))] + pub vk: VerificationKey, + pub finalization_hint: FinalizationHintsForProver, +} + +#[cfg(feature = "gpu")] +pub type GoldilocksGpuProverSetupData = GpuProverSetupData< + GoldilocksField, + GenericAlgebraicSponge< + GoldilocksField, + GoldilocksField, + 8, + 12, + 4, + Poseidon2Goldilocks, + AbsorptionModeOverwrite, + >, +>; + +pub enum ProverServiceDataType { + VerificationKey, + SetupData, + FinalizationHints, +} + +pub fn get_base_path() -> String { + let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| "/".into()); + format!( + "{}/prover/vk_setup_data_generator_server_fri/data", + zksync_home + ) +} + +pub fn get_file_path( + key: ProverServiceDataKey, + service_data_type: ProverServiceDataType, +) -> String { + let name = match key.round { + AggregationRound::BasicCircuits => { + format!("basic_{}", key.circuit_id) + } + AggregationRound::LeafAggregation => { + format!("leaf_{}", key.circuit_id) + } + AggregationRound::NodeAggregation => "node".to_string(), + AggregationRound::Scheduler => "scheduler".to_string(), + }; + match service_data_type { + ProverServiceDataType::VerificationKey => { + format!("{}/verification_{}_key.json", get_base_path(), name) + } + ProverServiceDataType::SetupData => { + format!( + "{}/setup_{}_data.bin", + FriProverConfig::from_env().setup_data_path, + name + ) + } + ProverServiceDataType::FinalizationHints => { + format!("{}/finalization_hints_{}.bin", get_base_path(), name) + } + } +} + +pub fn get_base_layer_vk_for_circuit_type(circuit_type: u8) -> ZkSyncBaseLayerVerificationKey { + let filepath = get_file_path( + ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits), + ProverServiceDataType::VerificationKey, + ); + vlog::info!("Fetching verification key from path: {}", filepath); + let text = std::fs::read_to_string(&filepath) + .unwrap_or_else(|_| panic!("Failed reading verification key from path: {}", filepath)); + serde_json::from_str::(&text).unwrap_or_else(|_| { + panic!( + "Failed deserializing verification key from path: {}", + filepath + ) + }) +} + +pub fn get_recursive_layer_vk_for_circuit_type( + circuit_type: u8, +) -> ZkSyncRecursionLayerVerificationKey { + let round = get_round_for_recursive_circuit_type(circuit_type); + let filepath = get_file_path( + ProverServiceDataKey::new(circuit_type, round), + ProverServiceDataType::VerificationKey, + ); + vlog::info!("Fetching verification key from path: {}", filepath); + let text = std::fs::read_to_string(&filepath) + .unwrap_or_else(|_| panic!("Failed reading verification key from path: {}", filepath)); + serde_json::from_str::(&text).unwrap_or_else(|_| { + panic!( + "Failed deserializing verification key from path: {}", + filepath + ) + }) +} + +pub fn get_round_for_recursive_circuit_type(circuit_type: u8) -> AggregationRound { + match circuit_type { + circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8 => { + AggregationRound::Scheduler + } + circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8 => { + AggregationRound::NodeAggregation + } + _ => AggregationRound::LeafAggregation, + } +} + +pub fn save_base_layer_vk(vk: ZkSyncBaseLayerVerificationKey) { + let circuit_type = vk.numeric_circuit_type(); + let filepath = get_file_path( + ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits), + ProverServiceDataType::VerificationKey, + ); + vlog::info!("saving basic verification key to: {}", filepath); + std::fs::write(filepath, serde_json::to_string_pretty(&vk).unwrap()).unwrap(); +} + +pub fn save_recursive_layer_vk(vk: ZkSyncRecursionLayerVerificationKey) { + let circuit_type = vk.numeric_circuit_type(); + let round = get_round_for_recursive_circuit_type(circuit_type); + let filepath = get_file_path( + ProverServiceDataKey::new(circuit_type, round), + ProverServiceDataType::VerificationKey, + ); + vlog::info!("saving recursive layer verification key to: {}", filepath); + std::fs::write(filepath, serde_json::to_string_pretty(&vk).unwrap()).unwrap(); +} + +pub fn get_cpu_setup_data_for_circuit_type( + key: ProverServiceDataKey, +) -> ProverSetupData +where + F: PrimeField + SmallField + Serialize + DeserializeOwned, + P: PrimeFieldLikeVectorized + Serialize + DeserializeOwned, + H: TreeHasher, + >::Output: Serialize + DeserializeOwned, +{ + let filepath = get_file_path(key.clone(), ProverServiceDataType::SetupData); + let mut file = File::open(filepath.clone()) + .unwrap_or_else(|_| panic!("Failed reading setup-data from path: {:?}", filepath)); + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer).unwrap_or_else(|_| { + panic!( + "Failed reading setup-data to buffer from path: {:?}", + filepath + ) + }); + vlog::info!("loading {:?} setup data from path: {}", key, filepath); + bincode::deserialize::>(&buffer).unwrap_or_else(|_| { + panic!( + "Failed deserializing setup-data at path: {:?} for circuit: {:?}", + filepath, key + ) + }) +} + +#[cfg(feature = "gpu")] +pub fn get_setup_data_for_circuit_type(key: ProverServiceDataKey) -> GpuProverSetupData +where + F: PrimeField + SmallField + Serialize + DeserializeOwned, + H: TreeHasher, + >::Output: Serialize + DeserializeOwned, +{ + let filepath = get_file_path(key.clone(), ProverServiceDataType::SetupData); + let mut file = File::open(filepath.clone()) + .unwrap_or_else(|_| panic!("Failed reading setup-data from path: {:?}", filepath)); + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer).unwrap_or_else(|_| { + panic!( + "Failed reading setup-data to buffer from path: {:?}", + filepath + ) + }); + vlog::info!("loading {:?} setup data from path: {}", key, filepath); + bincode::deserialize::>(&buffer).unwrap_or_else(|_| { + panic!( + "Failed deserializing setup-data at path: {:?} for circuit: {:?}", + filepath, key + ) + }) +} + +pub fn save_setup_data(key: ProverServiceDataKey, serialized_setup_data: &Vec) { + let filepath = get_file_path(key.clone(), ProverServiceDataType::SetupData); + vlog::info!("saving {:?} setup data to: {}", key, filepath); + std::fs::write(filepath.clone(), serialized_setup_data) + .unwrap_or_else(|_| panic!("Failed saving setup-data at path: {:?}", filepath)); +} + +pub fn generate_cpu_base_layer_setup_data( + circuit: ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, + >, +) -> GoldilocksProverSetupData { + let circuit_type = circuit.numeric_circuit_type(); + vlog::info!( + "starting setup data generator for base layer circuit: {}.", + circuit_type + ); + let worker = Worker::new(); + let (setup_base, setup, vk, setup_tree, vars_hint, wits_hint, finalization_hint) = + create_base_layer_setup_data( + circuit.clone(), + &worker, + BASE_LAYER_FRI_LDE_FACTOR, + BASE_LAYER_CAP_SIZE, + ); + let key = ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits); + let existing_finalization_hint = get_finalization_hints(key); + assert_eq!( + existing_finalization_hint, finalization_hint, + "finalization hint mismatch for circuit: {circuit_type}" + ); + let existing_vk = get_base_layer_vk_for_circuit_type(circuit_type); + assert_eq!( + existing_vk.into_inner(), + vk, + "vk mismatch for circuit: {circuit_type}" + ); + ProverSetupData { + setup_base, + setup, + vk, + setup_tree, + vars_hint, + wits_hint, + finalization_hint, + } +} + +pub fn save_finalization_hints(key: ProverServiceDataKey, hint: &FinalizationHintsForProver) { + let filepath = get_file_path(key.clone(), ProverServiceDataType::FinalizationHints); + vlog::info!("saving finalization hints for {:?} to: {}", key, filepath); + let serialized = bincode::serialize(&hint).expect("Failed to serialize finalization hints"); + fs::write(filepath, serialized).expect("Failed to write finalization hints to file"); +} +pub fn get_finalization_hints(key: ProverServiceDataKey) -> FinalizationHintsForProver { + let filepath = get_file_path(key, ProverServiceDataType::FinalizationHints); + let file = fs::read(filepath).expect("Failed to read finalization hints from file"); + bincode::deserialize::(&file) + .expect("Finalization hint deserialization failed") +} diff --git a/prover/vk_setup_data_generator_server_fri/src/main.rs b/prover/vk_setup_data_generator_server_fri/src/main.rs new file mode 100644 index 000000000000..ed5834b3073f --- /dev/null +++ b/prover/vk_setup_data_generator_server_fri/src/main.rs @@ -0,0 +1,123 @@ +#![feature(generic_const_exprs)] + +use crate::in_memory_setup_data_source::InMemoryDataSource; +use zkevm_test_harness::compute_setups::{ + generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs, +}; +use zkevm_test_harness::data_source::SetupDataSource; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; +use zksync_prover_fri_types::ProverServiceDataKey; +use zksync_types::proofs::AggregationRound; +use zksync_vk_setup_data_server_fri::{ + get_round_for_recursive_circuit_type, save_base_layer_vk, save_finalization_hints, + save_recursive_layer_vk, +}; + +mod in_memory_setup_data_source; +mod tests; +mod vk_generator; + +fn save_vks(source: &dyn SetupDataSource) { + for base_circuit_type in + (BaseLayerCircuitType::VM as u8)..=(BaseLayerCircuitType::L1MessagesHasher as u8) + { + let vk = source + .get_base_layer_vk(base_circuit_type) + .unwrap_or_else(|_| panic!("No vk exist for circuit type: {}", base_circuit_type)); + save_base_layer_vk(vk); + } + for leaf_circuit_type in (ZkSyncRecursionLayerStorageType::LeafLayerCircuitForMainVM as u8) + ..=(ZkSyncRecursionLayerStorageType::LeafLayerCircuitForL1MessagesHasher as u8) + { + let vk = source + .get_recursion_layer_vk(leaf_circuit_type) + .unwrap_or_else(|_| panic!("No vk exist for circuit type: {}", leaf_circuit_type)); + save_recursive_layer_vk(vk); + } + save_recursive_layer_vk( + source + .get_recursion_layer_node_vk() + .expect("No vk exist for node layer circuit"), + ); + save_recursive_layer_vk( + source + .get_recursion_layer_vk(ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8) + .expect("No vk exist for scheduler circuit"), + ); +} + +fn save_finalization_hints_using_source(source: &dyn SetupDataSource) { + for base_circuit_type in + (BaseLayerCircuitType::VM as u8)..=(BaseLayerCircuitType::L1MessagesHasher as u8) + { + let hint = source + .get_base_layer_finalization_hint(base_circuit_type) + .unwrap_or_else(|_| { + panic!( + "No finalization_hint exist for circuit type: {}", + base_circuit_type + ) + }) + .into_inner(); + let key = ProverServiceDataKey::new(base_circuit_type, AggregationRound::BasicCircuits); + save_finalization_hints(key, &hint); + } + for leaf_circuit_type in (ZkSyncRecursionLayerStorageType::LeafLayerCircuitForMainVM as u8) + ..=(ZkSyncRecursionLayerStorageType::LeafLayerCircuitForL1MessagesHasher as u8) + { + let hint = source + .get_recursion_layer_finalization_hint(leaf_circuit_type) + .unwrap_or_else(|_| { + panic!( + "No finalization hint exist for circuit type: {}", + leaf_circuit_type + ) + }) + .into_inner(); + let key = ProverServiceDataKey::new( + leaf_circuit_type, + get_round_for_recursive_circuit_type(leaf_circuit_type), + ); + save_finalization_hints(key, &hint); + } + + let node_hint = source + .get_recursion_layer_node_finalization_hint() + .expect("No finalization hint exist for node layer circuit") + .into_inner(); + save_finalization_hints( + ProverServiceDataKey::new( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + AggregationRound::NodeAggregation, + ), + &node_hint, + ); + + let scheduler_hint = source + .get_recursion_layer_finalization_hint( + ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, + ) + .expect("No finalization hint exist for scheduler layer circuit") + .into_inner(); + save_finalization_hints( + ProverServiceDataKey::new( + ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, + AggregationRound::Scheduler, + ), + &scheduler_hint, + ); +} + +fn generate_vks() { + let mut in_memory_source = InMemoryDataSource::new(); + generate_base_layer_vks_and_proofs(&mut in_memory_source).expect("Failed generating base vk's"); + generate_recursive_layer_vks_and_proofs(&mut in_memory_source) + .expect("Failed generating recursive vk's"); + save_finalization_hints_using_source(&in_memory_source); + save_vks(&in_memory_source); +} + +fn main() { + generate_vks(); +} diff --git a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs new file mode 100644 index 000000000000..c1c1502a3e9a --- /dev/null +++ b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs @@ -0,0 +1,184 @@ +use zksync_prover_fri_types::circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; +use zksync_prover_fri_types::circuit_definitions::{ + ZkSyncDefaultRoundFunction, BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, +}; + +use structopt::StructOpt; +use zkevm_test_harness::geometry_config::get_geometry_config; +use zkevm_test_harness::prover_utils::create_recursive_layer_setup_data; +use zksync_types::proofs::AggregationRound; +use zksync_vk_setup_data_server_fri::generate_cpu_base_layer_setup_data; +use zksync_vk_setup_data_server_fri::utils::{ + get_basic_circuits, get_leaf_circuits, get_node_circuit, get_scheduler_circuit, CYCLE_LIMIT, +}; +use zksync_vk_setup_data_server_fri::{ + get_finalization_hints, get_recursive_layer_vk_for_circuit_type, + get_round_for_recursive_circuit_type, save_setup_data, GoldilocksProverSetupData, + ProverSetupData, +}; + +use zksync_prover_fri_types::ProverServiceDataKey; +#[cfg(feature = "gpu")] +use { + shivini::cs::setup::GpuSetup, shivini::ProverContext, + zksync_vk_setup_data_server_fri::GpuProverSetupData, +}; + +#[derive(Debug, StructOpt)] +#[structopt( + name = "Generate setup data for individual circuit", + about = "Tool for generating setup data for individual circuit" +)] +struct Opt { + /// Numeric circuit type valid value are + /// 1. for base layer [1-13]. + /// 2. for recursive layer [1-15]. + #[structopt(long)] + numeric_circuit: u8, + /// Boolean representing whether to generate for base layer or for recursive layer. + #[structopt(short = "b", long = "is_base_layer")] + is_base_layer: bool, +} + +fn main() { + let opt = Opt::from_args(); + + #[cfg(feature = "gpu")] + generate_gpu_setup_data(opt.is_base_layer, opt.numeric_circuit); + + #[cfg(not(feature = "gpu"))] + generate_cpu_setup_data(opt.is_base_layer, opt.numeric_circuit); +} + +fn generate_cpu_setup_data(is_base_layer: bool, numeric_circuit: u8) { + match is_base_layer { + true => { + let circuit = get_base_layer_circuit(numeric_circuit); + let prover_setup_data = generate_cpu_base_layer_setup_data(circuit); + let serialized = + bincode::serialize(&prover_setup_data).expect("Failed serializing setup data"); + save_setup_data( + ProverServiceDataKey::new(numeric_circuit, AggregationRound::BasicCircuits), + &serialized, + ); + } + false => { + let circuit = get_recursive_circuit(numeric_circuit); + let prover_setup_data = generate_cpu_recursive_layer_setup_data(circuit); + let serialized = + bincode::serialize(&prover_setup_data).expect("Failed serializing setup data"); + let round = get_round_for_recursive_circuit_type(numeric_circuit); + save_setup_data( + ProverServiceDataKey::new(numeric_circuit, round), + &serialized, + ); + } + } +} + +fn get_base_layer_circuit( + id: u8, +) -> ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, +> { + get_basic_circuits(CYCLE_LIMIT, get_geometry_config()) + .into_iter() + .find(|circuit| id == circuit.numeric_circuit_type()) + .unwrap_or_else(|| panic!("No basic circuit found for id: {}", id)) +} + +fn get_recursive_circuit(id: u8) -> ZkSyncRecursiveLayerCircuit { + let mut recursive_circuits = get_leaf_circuits(); + recursive_circuits.push(get_node_circuit()); + recursive_circuits.push(get_scheduler_circuit()); + recursive_circuits + .into_iter() + .find(|circuit| id == circuit.numeric_circuit_type()) + .unwrap_or_else(|| panic!("No recursive circuit found for id: {}", id)) +} + +fn generate_cpu_recursive_layer_setup_data( + circuit: ZkSyncRecursiveLayerCircuit, +) -> GoldilocksProverSetupData { + let circuit_type = circuit.numeric_circuit_type(); + vlog::info!( + "starting setup data generator for recursive layer circuit: {}.", + circuit_type + ); + let worker = Worker::new(); + let (setup_base, setup, vk, setup_tree, vars_hint, wits_hint, finalization_hint) = + create_recursive_layer_setup_data( + circuit.clone(), + &worker, + BASE_LAYER_FRI_LDE_FACTOR, + BASE_LAYER_CAP_SIZE, + ); + let key = ProverServiceDataKey::new( + circuit_type, + get_round_for_recursive_circuit_type(circuit_type), + ); + let existing_finalization_hint = get_finalization_hints(key); + assert_eq!( + existing_finalization_hint, finalization_hint, + "finalization hint mismatch for circuit: {circuit_type}" + ); + let existing_vk = get_recursive_layer_vk_for_circuit_type(circuit_type); + assert_eq!( + existing_vk.into_inner(), + vk, + "vk mismatch for circuit: {circuit_type}" + ); + ProverSetupData { + setup_base, + setup, + vk: vk.clone(), + setup_tree, + vars_hint, + wits_hint, + finalization_hint, + } +} + +#[cfg(feature = "gpu")] +fn generate_gpu_setup_data(is_base_layer: bool, numeric_circuit: u8) { + let context = ProverContext::create().expect("failed initializing gpu prover context"); + let (cpu_setup_data, round) = match is_base_layer { + true => { + let circuit = get_base_layer_circuit(numeric_circuit); + ( + generate_cpu_base_layer_setup_data(circuit.clone()), + AggregationRound::BasicCircuits, + ) + } + false => { + let circuit = get_recursive_circuit(numeric_circuit); + ( + generate_cpu_recursive_layer_setup_data(circuit.clone()), + get_round_for_recursive_circuit_type(numeric_circuit), + ) + } + }; + let gpu_setup_data = GpuSetup::from_setup_and_hints( + cpu_setup_data.setup_base, + cpu_setup_data.setup_tree, + cpu_setup_data.vars_hint.clone(), + ) + .expect("failed creating GPU base layer setup data"); + let gpu_prover_setup_data = GpuProverSetupData { + setup: gpu_setup_data, + vk: cpu_setup_data.vk, + finalization_hint: cpu_setup_data.finalization_hint, + }; + let serialized = + bincode::serialize(&gpu_prover_setup_data).expect("Failed serializing setup data"); + save_setup_data( + ProverServiceDataKey::new(numeric_circuit, round), + &serialized, + ); +} diff --git a/prover/vk_setup_data_generator_server_fri/src/tests.rs b/prover/vk_setup_data_generator_server_fri/src/tests.rs new file mode 100644 index 000000000000..126ab088ea3a --- /dev/null +++ b/prover/vk_setup_data_generator_server_fri/src/tests.rs @@ -0,0 +1,108 @@ +#[cfg(test)] +mod tests { + use proptest::prelude::*; + use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ + base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, + }; + use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; + use zksync_prover_fri_types::ProverServiceDataKey; + use zksync_types::proofs::AggregationRound; + use zksync_vk_setup_data_server_fri::{ + get_base_layer_vk_for_circuit_type, get_base_path, get_file_path, get_finalization_hints, + get_recursive_layer_vk_for_circuit_type, get_round_for_recursive_circuit_type, + ProverServiceDataType, + }; + + fn all_possible_prover_service_data_key() -> impl Strategy { + let mut keys = Vec::with_capacity(30); + for circuit_type in 1..=13 { + keys.push(ProverServiceDataKey::new( + circuit_type, + AggregationRound::BasicCircuits, + )); + let recursive_circuit_type = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(circuit_type), + ) as u8; + keys.push(ProverServiceDataKey::new( + recursive_circuit_type, + AggregationRound::LeafAggregation, + )); + } + keys.push(ProverServiceDataKey::new(1, AggregationRound::Scheduler)); + keys.push(ProverServiceDataKey::new( + 2, + AggregationRound::NodeAggregation, + )); + + prop::sample::select(keys) + } + + proptest! { + #[test] + fn test_get_base_layer_vk_for_circuit_type(circuit_id in 1u8..13) { + let vk = get_base_layer_vk_for_circuit_type(circuit_id); + assert_eq!(circuit_id, vk.numeric_circuit_type()); + } + + #[test] + fn test_get_recursive_layer_vk_for_circuit_type(circuit_id in 1u8..15) { + let vk = get_recursive_layer_vk_for_circuit_type(circuit_id); + assert_eq!(circuit_id, vk.numeric_circuit_type()); + } + + #[test] + fn test_get_finalization_hints(key in all_possible_prover_service_data_key()) { + let result = get_finalization_hints(key); + + assert!(!result.row_finalization_hints.is_empty(), "Row finalization hints should not be empty"); + assert!(!result.public_inputs.is_empty(), "Public inputs should not be empty"); + + assert!(result.nop_gates_to_add > 0, "Nop gates to add should be more than 0"); + assert!(result.final_trace_len > 0, "Final trace length should be more than 0"); + } + + } + + // Test get_base_path method + #[test] + fn test_get_base_path() { + let base_path = get_base_path(); + assert!(!base_path.is_empty(), "Base path should not be empty"); + } + + // Test get_file_path method + #[test] + fn test_get_file_path() { + let key = ProverServiceDataKey::new(1, AggregationRound::BasicCircuits); + let file_path = get_file_path(key, ProverServiceDataType::VerificationKey); + assert!(!file_path.is_empty(), "File path should not be empty"); + } + + // Test ProverServiceDataKey::new method + #[test] + fn test_proverservicedatakey_new() { + let key = ProverServiceDataKey::new(1, AggregationRound::BasicCircuits); + assert_eq!( + key.circuit_id, 1, + "Circuit id should be equal to the given value" + ); + assert_eq!( + key.round, + AggregationRound::BasicCircuits, + "Round should be equal to the given value" + ); + } + + // Test get_round_for_recursive_circuit_type method + #[test] + fn test_get_round_for_recursive_circuit_type() { + let round = get_round_for_recursive_circuit_type( + ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, + ); + assert_eq!( + round, + AggregationRound::Scheduler, + "Round should be scheduler" + ); + } +} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/utils.rs b/prover/vk_setup_data_generator_server_fri/src/utils.rs similarity index 82% rename from core/bin/vk_setup_data_generator_server_fri/src/utils.rs rename to prover/vk_setup_data_generator_server_fri/src/utils.rs index 014aac6b2a40..de09c30b9660 100644 --- a/core/bin/vk_setup_data_generator_server_fri/src/utils.rs +++ b/prover/vk_setup_data_generator_server_fri/src/utils.rs @@ -1,34 +1,34 @@ use crate::{ get_base_layer_vk_for_circuit_type, get_base_path, get_recursive_layer_vk_for_circuit_type, }; -use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; -use circuit_definitions::boojum::gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness; -use circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; -use circuit_definitions::boojum::gadgets::traits::allocatable::CSAllocatable; -use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; -use circuit_definitions::circuit_definitions::recursion_layer::leaf_layer::ZkSyncLeafLayerRecursiveCircuit; -use circuit_definitions::circuit_definitions::recursion_layer::node_layer::ZkSyncNodeLayerRecursiveCircuit; -use circuit_definitions::circuit_definitions::recursion_layer::scheduler::SchedulerCircuit; -use circuit_definitions::circuit_definitions::recursion_layer::{ +use zksync_prover_fri_types::circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; +use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness; +use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; +use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::traits::allocatable::CSAllocatable; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::leaf_layer::ZkSyncLeafLayerRecursiveCircuit; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::node_layer::ZkSyncNodeLayerRecursiveCircuit; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::scheduler::SchedulerCircuit; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof, ZkSyncRecursiveLayerCircuit, RECURSION_ARITY, SCHEDULER_CAPACITY, }; -use circuit_definitions::zk_evm::bytecode_to_code_hash; -use circuit_definitions::zk_evm::testing::storage::InMemoryStorage; -use circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; -use circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::{ +use zksync_prover_fri_types::circuit_definitions::zk_evm::bytecode_to_code_hash; +use zksync_prover_fri_types::circuit_definitions::zk_evm::testing::storage::InMemoryStorage; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::{ RecursionLeafInput, RecursionLeafInstanceWitness, }; -use circuit_definitions::zkevm_circuits::recursion::leaf_layer::LeafLayerRecursionConfig; -use circuit_definitions::zkevm_circuits::recursion::node_layer::input::{ +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::LeafLayerRecursionConfig; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::node_layer::input::{ RecursionNodeInput, RecursionNodeInstanceWitness, }; -use circuit_definitions::zkevm_circuits::recursion::node_layer::NodeLayerRecursionConfig; -use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; -use circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; -use circuit_definitions::zkevm_circuits::scheduler::SchedulerConfig; -use circuit_definitions::{ +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::node_layer::NodeLayerRecursionConfig; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::SchedulerConfig; +use zksync_prover_fri_types::circuit_definitions::{ base_layer_proof_config, recursion_layer_proof_config, zk_evm, ZkSyncDefaultRoundFunction, }; use itertools::Itertools; diff --git a/core/bin/vk_setup_data_generator_server_fri/src/vk_generator.rs b/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs similarity index 55% rename from core/bin/vk_setup_data_generator_server_fri/src/vk_generator.rs rename to prover/vk_setup_data_generator_server_fri/src/vk_generator.rs index bd112dc4211a..6cd09f18a594 100644 --- a/core/bin/vk_setup_data_generator_server_fri/src/vk_generator.rs +++ b/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs @@ -1,14 +1,21 @@ -use circuit_definitions::boojum::worker::Worker; -use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerVerificationKey; -use circuit_definitions::{BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR}; use zkevm_test_harness::geometry_config::get_geometry_config; use zkevm_test_harness::prover_utils::{ create_base_layer_setup_data, create_recursive_layer_setup_data, }; +use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerVerificationKey; +use zksync_prover_fri_types::circuit_definitions::{ + BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, +}; use zksync_vk_setup_data_server_fri::utils::{get_basic_circuits, get_leaf_circuits, CYCLE_LIMIT}; -use zksync_vk_setup_data_server_fri::{save_base_layer_vk, save_recursive_layer_vk}; +use zksync_vk_setup_data_server_fri::{ + get_round_for_recursive_circuit_type, save_base_layer_vk, save_finalization_hints, + save_recursive_layer_vk, +}; -use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerVerificationKey; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerVerificationKey; +use zksync_prover_fri_types::ProverServiceDataKey; +use zksync_types::proofs::AggregationRound; fn main() { vlog::info!("starting vk generator"); @@ -19,7 +26,7 @@ pub fn generate_basic_circuit_vks() { let worker = Worker::new(); for circuit in get_basic_circuits(CYCLE_LIMIT, get_geometry_config()) { let circuit_type = circuit.numeric_circuit_type(); - let (_, _, vk, _, _, _, _) = create_base_layer_setup_data( + let (_, _, vk, _, _, _, finalization_hint) = create_base_layer_setup_data( circuit.clone(), &worker, BASE_LAYER_FRI_LDE_FACTOR, @@ -27,6 +34,8 @@ pub fn generate_basic_circuit_vks() { ); let typed_vk = ZkSyncBaseLayerVerificationKey::from_inner(circuit_type, vk); save_base_layer_vk(typed_vk); + let key = ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits); + save_finalization_hints(key, &finalization_hint); } } @@ -34,7 +43,7 @@ pub fn generate_leaf_layer_vks() { let worker = Worker::new(); for circuit in get_leaf_circuits() { let circuit_type = circuit.numeric_circuit_type(); - let (_setup_base, _setup, vk, _setup_tree, _vars_hint, _wits_hint, _finalization_hint) = + let (_setup_base, _setup, vk, _setup_tree, _vars_hint, _wits_hint, finalization_hint) = create_recursive_layer_setup_data( circuit.clone(), &worker, @@ -44,5 +53,10 @@ pub fn generate_leaf_layer_vks() { let typed_vk = ZkSyncRecursionLayerVerificationKey::from_inner(circuit_type, vk.clone()); save_recursive_layer_vk(typed_vk); + let key = ProverServiceDataKey::new( + circuit_type, + get_round_for_recursive_circuit_type(circuit_type), + ); + save_finalization_hints(key, &finalization_hint); } } diff --git a/core/bin/witness_generator/Cargo.toml b/prover/witness_generator/Cargo.toml similarity index 55% rename from core/bin/witness_generator/Cargo.toml rename to prover/witness_generator/Cargo.toml index 46b0fc88d614..9c3f5ddc092d 100644 --- a/core/bin/witness_generator/Cargo.toml +++ b/prover/witness_generator/Cargo.toml @@ -10,24 +10,21 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] publish = false # We don't want to publish our binaries. -[lib] -name = "zksync_witness_utils" -path = "src/utils.rs" - - [dependencies] -zksync_dal = { path = "../../lib/dal", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } -prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } -zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } -vm = { path = "../../lib/vm", version = "0.1.0" } -zksync_object_store = { path = "../../lib/object_store", version = "1.0" } -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_state = { path = "../../lib/state", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } +zksync_dal = { path = "../../core/lib/dal", version = "1.0" } +zksync_config = { path = "../../core/lib/config", version = "1.0" } +prometheus_exporter = { path = "../../core/lib/prometheus_exporter", version = "1.0" } +vlog = { path = "../../core/lib/vlog", version = "1.0" } +zksync_queued_job_processor = { path = "../../core/lib/queued_job_processor", version = "1.0" } +vm = { path = "../../core/lib/vm", version = "0.1.0" } +zksync_object_store = { path = "../../core/lib/object_store", version = "1.0" } +zksync_types = { path = "../../core/lib/types", version = "1.0" } +zksync_state = { path = "../../core/lib/state", version = "1.0" } +zksync_utils = { path = "../../core/lib/utils", version = "1.0" } vk_setup_data_generator_server_fri = { path = "../vk_setup_data_generator_server_fri", version = "1.0" } -zksync_prover_utils = { path = "../../lib/prover_utils", version = "1.0" } +zksync_prover_utils = { path = "../../core/lib/prover_utils", version = "1.0" } +zksync_prover_fri_types = { path = "../prover_fri_types", version = "1.0" } +zksync_prover_fri_utils = { path = "../prover_fri_utils", version = "1.0" } zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0" } circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", features = ["log_tracing"]} diff --git a/core/bin/witness_generator/README.md b/prover/witness_generator/README.md similarity index 96% rename from core/bin/witness_generator/README.md rename to prover/witness_generator/README.md index 0600487e3d0f..9d35fe7e054a 100644 --- a/core/bin/witness_generator/README.md +++ b/prover/witness_generator/README.md @@ -15,7 +15,7 @@ aggregation. That is, every aggregation round needs two sets of input: ## BasicCircuitsWitnessGenerator - generates basic circuits (circuits like `Main VM` - up to 50 \* 48 = 2400 circuits): -- input table: `basic_circuit_witness_jobs` +- input table: `basic_circuit_witness_jobs` (todo SMA-1362: will be renamed from `witness_inputs`) - artifact/output table: `leaf_aggregation_jobs` (also creates job stubs in `node_aggregation_jobs` and `scheduler_aggregation_jobs`) value in `aggregation_round` field of `prover_jobs` table: 0 diff --git a/core/bin/witness_generator/rust-toolchain.toml b/prover/witness_generator/rust-toolchain.toml similarity index 100% rename from core/bin/witness_generator/rust-toolchain.toml rename to prover/witness_generator/rust-toolchain.toml diff --git a/core/bin/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs similarity index 94% rename from core/bin/witness_generator/src/basic_circuits.rs rename to prover/witness_generator/src/basic_circuits.rs index 8ad7d30c9a3d..569758e34323 100644 --- a/core/bin/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -7,19 +7,19 @@ use std::{ }; use async_trait::async_trait; -use circuit_definitions::ZkSyncDefaultRoundFunction; +use zksync_prover_fri_types::circuit_definitions::ZkSyncDefaultRoundFunction; use rand::Rng; use serde::{Deserialize, Serialize}; -use zkevm_test_harness::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; -use zkevm_test_harness::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; +use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; use zkevm_test_harness::geometry_config::get_geometry_config; use zkevm_test_harness::toolset::GeometryConfig; use zkevm_test_harness::witness::full_block_artifact::{ BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, BlockBasicCircuitsPublicInputs, }; -use zkevm_test_harness::zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness; -use zkevm_test_harness::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; use vm::{HistoryDisabled, StorageOracle, MAX_CYCLES_FOR_TX}; use zksync_config::configs::FriWitnessGeneratorConfig; @@ -28,6 +28,7 @@ use zksync_dal::ConnectionPool; use zksync_object_store::{ Bucket, ClosedFormInputKey, ObjectStore, ObjectStoreFactory, StoredObject, }; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_queued_job_processor::JobProcessor; use zksync_state::{PostgresStorage, StorageView}; use zksync_types::proofs::AggregationRound; @@ -39,9 +40,8 @@ use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; use crate::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; use crate::utils::{ - expand_bootloader_contents, get_recursive_layer_circuit_id_for_base_layer, - save_base_prover_input_artifacts, AuxOutputWitnessWrapper, ClosedFormInputWrapper, - SchedulerPartialInputWrapper, + expand_bootloader_contents, save_base_prover_input_artifacts, AuxOutputWitnessWrapper, + ClosedFormInputWrapper, SchedulerPartialInputWrapper, }; pub struct BasicCircuitArtifacts { @@ -409,31 +409,36 @@ async fn save_leaf_aggregation_artifacts( async fn build_basic_circuits_witness_generator_input( connection_pool: &ConnectionPool, witness_merkle_input: PrepareBasicCircuitsJob, - block_number: L1BatchNumber, + l1_batch_number: L1BatchNumber, ) -> BasicCircuitWitnessGeneratorInput { let mut connection = connection_pool.access_storage().await; let block_header = connection .blocks_dal() - .get_block_header(block_number) + .get_l1_batch_header(l1_batch_number) + .await + .unwrap(); + let initial_heap_content = connection + .blocks_dal() + .get_initial_bootloader_heap(l1_batch_number) .await .unwrap(); - let previous_block_header = connection + let (_, previous_block_timestamp) = connection .blocks_dal() - .get_block_header(block_number - 1) + .get_l1_batch_state_root_and_timestamp(l1_batch_number - 1) .await .unwrap(); let previous_block_hash = connection .blocks_dal() - .get_block_state_root(block_number - 1) + .get_l1_batch_state_root(l1_batch_number - 1) .await .expect("cannot generate witness before the root hash is computed"); BasicCircuitWitnessGeneratorInput { - block_number, - previous_block_timestamp: previous_block_header.timestamp, + block_number: l1_batch_number, + previous_block_timestamp, previous_block_hash, block_timestamp: block_header.timestamp, used_bytecodes_hashes: block_header.used_contract_hashes, - initial_heap_content: block_header.initial_bootloader_contents, + initial_heap_content, merkle_paths_input: witness_merkle_input, } } @@ -457,7 +462,7 @@ async fn generate_witness( let mut connection = connection_pool.access_storage().await; let header = connection .blocks_dal() - .get_block_header(input.block_number) + .get_l1_batch_header(input.block_number) .await .unwrap(); let bootloader_code_bytes = connection diff --git a/core/bin/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs similarity index 88% rename from core/bin/witness_generator/src/leaf_aggregation.rs rename to prover/witness_generator/src/leaf_aggregation.rs index 7818c326dff7..de1665f95213 100644 --- a/core/bin/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -5,25 +5,26 @@ use zkevm_test_harness::witness::recursive_aggregation::{ use std::time::Instant; use async_trait::async_trait; -use circuit_definitions::circuit_definitions::base_layer::{ +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerProof, ZkSyncBaseLayerVerificationKey, }; -use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; -use circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; -use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; +use zksync_prover_fri_types::circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; +use zksync_prover_fri_types::FriProofWrapper; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_vk_setup_data_server_fri::{ get_base_layer_vk_for_circuit_type, get_recursive_layer_vk_for_circuit_type, }; use crate::utils::{ - get_recursive_layer_circuit_id_for_base_layer, load_proofs_for_job_ids, - save_node_aggregations_artifacts, save_recursive_layer_prover_input_artifacts, - ClosedFormInputWrapper, FriProofWrapper, + load_proofs_for_job_ids, save_node_aggregations_artifacts, + save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, }; -use zkevm_test_harness::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::{ClosedFormInputKey, ObjectStore, ObjectStoreFactory}; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; use zksync_queued_job_processor::JobProcessor; use zksync_types::proofs::{AggregationRound, LeafAggregationJobMetadata}; use zksync_types::L1BatchNumber; @@ -31,7 +32,7 @@ use zksync_types::L1BatchNumber; pub struct LeafAggregationArtifacts { circuit_id: u8, block_number: L1BatchNumber, - aggregations: Vec<( + pub aggregations: Vec<( u64, RecursionQueueSimulator, ZkSyncRecursiveLayerCircuit, @@ -47,12 +48,12 @@ struct BlobUrls { } pub struct LeafAggregationWitnessGeneratorJob { - circuit_id: u8, - block_number: L1BatchNumber, - closed_form_inputs: ClosedFormInputWrapper, - proofs: Vec, - base_vk: ZkSyncBaseLayerVerificationKey, - leaf_params: RecursionLeafParametersWitness, + pub(crate) circuit_id: u8, + pub(crate) block_number: L1BatchNumber, + pub(crate) closed_form_inputs: ClosedFormInputWrapper, + pub(crate) proofs: Vec, + pub(crate) base_vk: ZkSyncBaseLayerVerificationKey, + pub(crate) leaf_params: RecursionLeafParametersWitness, } #[derive(Debug)] @@ -76,7 +77,7 @@ impl LeafAggregationWitnessGenerator { } } - fn process_job_sync( + pub fn process_job_sync( leaf_job: LeafAggregationWitnessGeneratorJob, started_at: Instant, ) -> LeafAggregationArtifacts { @@ -150,7 +151,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { } } -async fn prepare_leaf_aggregation_job( +pub async fn prepare_leaf_aggregation_job( metadata: LeafAggregationJobMetadata, object_store: &dyn ObjectStore, ) -> LeafAggregationWitnessGeneratorJob { diff --git a/prover/witness_generator/src/lib.rs b/prover/witness_generator/src/lib.rs new file mode 100644 index 000000000000..e74077dd44fc --- /dev/null +++ b/prover/witness_generator/src/lib.rs @@ -0,0 +1,4 @@ +pub mod leaf_aggregation; +pub mod node_aggregation; +pub mod scheduler; +pub mod utils; diff --git a/core/bin/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs similarity index 96% rename from core/bin/witness_generator/src/main.rs rename to prover/witness_generator/src/main.rs index 03eea4251f8a..5ac7890daaca 100644 --- a/core/bin/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -64,8 +64,8 @@ async fn main() { let store_factory = ObjectStoreFactory::from_env(); let config = FriWitnessGeneratorConfig::from_env(); let prometheus_config = PrometheusConfig::from_env(); - let connection_pool = ConnectionPool::new(None, DbVariant::Master).await; - let prover_connection_pool = ConnectionPool::new(None, DbVariant::Prover).await; + let connection_pool = ConnectionPool::builder(DbVariant::Master).build().await; + let prover_connection_pool = ConnectionPool::builder(DbVariant::Prover).build().await; let (stop_sender, stop_receiver) = watch::channel(false); let witness_generator_task = match opt.round { diff --git a/core/bin/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs similarity index 95% rename from core/bin/witness_generator/src/node_aggregation.rs rename to prover/witness_generator/src/node_aggregation.rs index c71f70c51f22..26c2e29e0d30 100644 --- a/core/bin/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -1,26 +1,27 @@ use std::time::Instant; use async_trait::async_trait; -use circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use circuit_definitions::circuit_definitions::recursion_layer::{ +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, }; -use circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; +use zksync_prover_fri_types::circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witnesses, }; -use zkevm_test_harness::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; use crate::utils::{ load_proofs_for_job_ids, save_node_aggregations_artifacts, - save_recursive_layer_prover_input_artifacts, AggregationWrapper, FriProofWrapper, + save_recursive_layer_prover_input_artifacts, AggregationWrapper, }; use zksync_dal::ConnectionPool; use zksync_object_store::{AggregationsKey, ObjectStore, ObjectStoreFactory}; +use zksync_prover_fri_types::FriProofWrapper; use zksync_queued_job_processor::JobProcessor; use zksync_types::proofs::NodeAggregationJobMetadata; use zksync_types::{proofs::AggregationRound, L1BatchNumber}; @@ -29,7 +30,7 @@ pub struct NodeAggregationArtifacts { circuit_id: u8, block_number: L1BatchNumber, depth: u16, - next_aggregations: Vec<( + pub next_aggregations: Vec<( u64, RecursionQueueSimulator, ZkSyncRecursiveLayerCircuit, @@ -75,7 +76,7 @@ impl NodeAggregationWitnessGenerator { } } - fn process_job_sync( + pub fn process_job_sync( job: NodeAggregationWitnessGeneratorJob, started_at: Instant, ) -> NodeAggregationArtifacts { @@ -185,7 +186,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { } } -async fn prepare_job( +pub async fn prepare_job( metadata: NodeAggregationJobMetadata, object_store: &dyn ObjectStore, ) -> NodeAggregationWitnessGeneratorJob { diff --git a/core/bin/witness_generator/src/precalculated/tests.rs b/prover/witness_generator/src/precalculated/tests.rs similarity index 100% rename from core/bin/witness_generator/src/precalculated/tests.rs rename to prover/witness_generator/src/precalculated/tests.rs diff --git a/core/bin/witness_generator/src/precalculated_merkle_paths_provider.rs b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs similarity index 100% rename from core/bin/witness_generator/src/precalculated_merkle_paths_provider.rs rename to prover/witness_generator/src/precalculated_merkle_paths_provider.rs diff --git a/core/bin/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs similarity index 79% rename from core/bin/witness_generator/src/scheduler.rs rename to prover/witness_generator/src/scheduler.rs index 152179a6a5d3..b214d46f2f2a 100644 --- a/core/bin/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -3,30 +3,29 @@ use std::convert::TryInto; use std::time::Instant; use async_trait::async_trait; -use circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; -use circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; -use circuit_definitions::circuit_definitions::recursion_layer::scheduler::SchedulerCircuit; -use circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, ZkSyncRecursionProof, +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; +use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::scheduler::SchedulerCircuit; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, SCHEDULER_CAPACITY, }; -use circuit_definitions::recursion_layer_proof_config; -use circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; -use circuit_definitions::zkevm_circuits::scheduler::SchedulerConfig; +use zksync_prover_fri_types::circuit_definitions::recursion_layer_proof_config; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::SchedulerConfig; use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; -use crate::utils::{ - load_proofs_for_job_ids, CircuitWrapper, FriProofWrapper, SchedulerPartialInputWrapper, -}; +use crate::utils::{load_proofs_for_job_ids, SchedulerPartialInputWrapper}; use zksync_dal::ConnectionPool; use zksync_object_store::{FriCircuitKey, ObjectStore, ObjectStoreFactory}; +use zksync_prover_fri_types::{CircuitWrapper, FriProofWrapper}; use zksync_queued_job_processor::JobProcessor; use zksync_types::proofs::AggregationRound; use zksync_types::L1BatchNumber; pub struct SchedulerArtifacts { - scheduler_circuit: ZkSyncRecursiveLayerCircuit, + pub scheduler_circuit: ZkSyncRecursiveLayerCircuit, } #[derive(Clone)] @@ -57,7 +56,7 @@ impl SchedulerWitnessGenerator { } } - fn process_job_sync( + pub fn process_job_sync( job: SchedulerWitnessGeneratorJob, started_at: Instant, ) -> SchedulerArtifacts { @@ -116,28 +115,10 @@ impl JobProcessor for SchedulerWitnessGenerator { .fri_scheduler_dependency_tracker_dal() .get_final_prover_job_ids_for(l1_batch_number) .await; - let started_at = Instant::now(); - let proofs = load_proofs_for_job_ids(&proof_job_ids, &*self.object_store).await; - metrics::histogram!( - "prover_fri.witness_generation.blob_fetch_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), - ); - let recursive_proofs = proofs - .into_iter() - .map(|wrapper| match wrapper { - FriProofWrapper::Base(_) => { - panic!( - "Expected only recursive proofs for scheduler l1 batch {}", - l1_batch_number - ) - } - FriProofWrapper::Recursive(recursive_proof) => recursive_proof.into_inner(), - }) - .collect::>(); + Some(( l1_batch_number, - prepare_job(l1_batch_number, recursive_proofs, &*self.object_store).await, + prepare_job(l1_batch_number, proof_job_ids, &*self.object_store).await, )) } @@ -208,11 +189,31 @@ impl JobProcessor for SchedulerWitnessGenerator { } } -async fn prepare_job( +pub async fn prepare_job( l1_batch_number: L1BatchNumber, - proofs: Vec, + proof_job_ids: [u32; 13], object_store: &dyn ObjectStore, ) -> SchedulerWitnessGeneratorJob { + let started_at = Instant::now(); + let proofs = load_proofs_for_job_ids(&proof_job_ids, object_store).await; + metrics::histogram!( + "prover_fri.witness_generation.blob_fetch_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), + ); + let recursive_proofs = proofs + .into_iter() + .map(|wrapper| match wrapper { + FriProofWrapper::Base(_) => { + panic!( + "Expected only recursive proofs for scheduler l1 batch {}", + l1_batch_number + ) + } + FriProofWrapper::Recursive(recursive_proof) => recursive_proof.into_inner(), + }) + .collect::>(); + let started_at = Instant::now(); let node_vk = get_recursive_layer_vk_for_circuit_type( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, @@ -221,7 +222,7 @@ async fn prepare_job( object_store.get(l1_batch_number).await.unwrap(); scheduler_witness.node_layer_vk_witness = node_vk.clone().into_inner(); - scheduler_witness.proof_witnesses = proofs.into(); + scheduler_witness.proof_witnesses = recursive_proofs.into(); let leaf_vk_commits = get_leaf_vk_params(); let leaf_layer_params = leaf_vk_commits diff --git a/core/bin/witness_generator/src/utils.rs b/prover/witness_generator/src/utils.rs similarity index 69% rename from core/bin/witness_generator/src/utils.rs rename to prover/witness_generator/src/utils.rs index f06e9eca4bb0..7cff834a3258 100644 --- a/core/bin/witness_generator/src/utils.rs +++ b/prover/witness_generator/src/utils.rs @@ -1,20 +1,18 @@ -use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use circuit_definitions::boojum::field::goldilocks::GoldilocksExt2; -use circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; -use circuit_definitions::circuit_definitions::base_layer::{ - ZkSyncBaseLayerCircuit, ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerProof, +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksExt2; +use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ + ZkSyncBaseLayerClosedFormInput, }; -use circuit_definitions::circuit_definitions::recursion_layer::{ - base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerProof, - ZkSyncRecursionLayerStorageType, ZkSyncRecursiveLayerCircuit, +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursiveLayerCircuit, }; -use circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; -use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; -use circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; -use circuit_definitions::ZkSyncDefaultRoundFunction; +use zksync_prover_fri_types::circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; + use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; use zkevm_test_harness::witness::full_block_artifact::BlockBasicCircuits; +use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; +use zksync_prover_fri_types::circuit_definitions::ZkSyncDefaultRoundFunction; use zkevm_test_harness::zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness; use zksync_config::constants::USED_BOOTLOADER_MEMORY_BYTES; @@ -22,6 +20,7 @@ use zksync_object_store::{ serialize_using_bincode, AggregationsKey, Bucket, ClosedFormInputKey, FriCircuitKey, ObjectStore, StoredObject, }; +use zksync_prover_fri_types::{CircuitWrapper, FriProofWrapper}; use zksync_types::proofs::AggregationRound; use zksync_types::{L1BatchNumber, U256}; @@ -36,36 +35,6 @@ pub fn expand_bootloader_contents(packed: &[(usize, U256)]) -> Vec { result.to_vec() } -#[derive(serde::Serialize, serde::Deserialize)] -pub enum CircuitWrapper { - Base( - ZkSyncBaseLayerCircuit< - GoldilocksField, - VmWitnessOracle, - ZkSyncDefaultRoundFunction, - >, - ), - Recursive(ZkSyncRecursiveLayerCircuit), -} - -impl StoredObject for CircuitWrapper { - const BUCKET: Bucket = Bucket::ProverJobsFri; - type Key<'a> = FriCircuitKey; - - fn encode_key(key: Self::Key<'_>) -> String { - let FriCircuitKey { - block_number, - sequence_number, - circuit_id, - aggregation_round, - depth, - } = key; - format!("{block_number}_{sequence_number}_{circuit_id}_{aggregation_round:?}_{depth}.bin") - } - - serialize_using_bincode!(); -} - #[derive(serde::Serialize, serde::Deserialize)] pub struct ClosedFormInputWrapper( pub(crate) Vec>, @@ -132,23 +101,6 @@ impl StoredObject for SchedulerPartialInputWrapper { serialize_using_bincode!(); } -#[derive(serde::Serialize, serde::Deserialize)] -pub enum FriProofWrapper { - Base(ZkSyncBaseLayerProof), - Recursive(ZkSyncRecursionLayerProof), -} - -impl StoredObject for FriProofWrapper { - const BUCKET: Bucket = Bucket::ProofsFri; - type Key<'a> = u32; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("proof_{key}.bin") - } - - serialize_using_bincode!(); -} - #[derive(serde::Serialize, serde::Deserialize)] pub struct AuxOutputWitnessWrapper(pub BlockAuxilaryOutputWitness); @@ -242,17 +194,6 @@ pub async fn save_node_aggregations_artifacts( .unwrap() } -pub fn get_recursive_layer_circuit_id_for_base_layer(base_layer_circuit_id: u8) -> u8 { - let recursive_circuit_type = base_circuit_type_into_recursive_leaf_circuit_type( - BaseLayerCircuitType::from_numeric_value(base_layer_circuit_id), - ); - recursive_circuit_type as u8 -} - -pub fn get_base_layer_circuit_id_for_recursive_layer(recursive_layer_circuit_id: u8) -> u8 { - recursive_layer_circuit_id - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8 -} - pub async fn load_proofs_for_job_ids( job_ids: &[u32], object_store: &dyn ObjectStore, diff --git a/prover/witness_generator/tests/basic_test.rs b/prover/witness_generator/tests/basic_test.rs new file mode 100644 index 000000000000..ad328f5a2cfa --- /dev/null +++ b/prover/witness_generator/tests/basic_test.rs @@ -0,0 +1,125 @@ +use std::time::Instant; + +use serde::Serialize; +use zksync_config::ObjectStoreConfig; +use zksync_object_store::{AggregationsKey, FriCircuitKey, ObjectStoreFactory}; +use zksync_types::proofs::{ + AggregationRound, LeafAggregationJobMetadata, NodeAggregationJobMetadata, +}; +use zksync_types::L1BatchNumber; + +use zksync_prover_fri_types::CircuitWrapper; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_witness_generator::leaf_aggregation::{ + prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator, +}; +use zksync_witness_generator::node_aggregation::NodeAggregationWitnessGenerator; +use zksync_witness_generator::scheduler::SchedulerWitnessGenerator; +use zksync_witness_generator::utils::AggregationWrapper; +use zksync_witness_generator::{node_aggregation, scheduler}; + +fn compare_serialized(expected: &T, actual: &T) { + let serialized_expected = bincode::serialize(expected).unwrap(); + let serialized_actual = bincode::serialize(actual).unwrap(); + assert_eq!(serialized_expected, serialized_actual); +} + +#[tokio::test] +async fn test_leaf_witness_gen() { + let mut object_store_config = ObjectStoreConfig::from_env(); + object_store_config.file_backed_base_path = "./tests/data/leaf/".to_owned(); + let object_store = ObjectStoreFactory::new(object_store_config) + .create_store() + .await; + + let circuit_id = 4; + let block_number = L1BatchNumber(125010); + let key = AggregationsKey { + block_number, + circuit_id: get_recursive_layer_circuit_id_for_base_layer(circuit_id), + depth: 0, + }; + let expected_aggregation = object_store + .get::(key) + .await + .expect("expected aggregation missing"); + let leaf_aggregation_job_metadata = LeafAggregationJobMetadata { + id: 1, + block_number, + circuit_id, + prover_job_ids_for_proofs: vec![4639043, 4639044, 4639045], + }; + + let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store).await; + + let artifacts = LeafAggregationWitnessGenerator::process_job_sync(job, Instant::now()); + let aggregations = AggregationWrapper(artifacts.aggregations); + compare_serialized(&expected_aggregation, &aggregations); +} + +#[tokio::test] +#[ignore] // re-enable with new artifacts +async fn test_node_witness_gen() { + let mut object_store_config = ObjectStoreConfig::from_env(); + object_store_config.file_backed_base_path = "./tests/data/node/".to_owned(); + let object_store = ObjectStoreFactory::new(object_store_config) + .create_store() + .await; + + let circuit_id = 8; + let block_number = L1BatchNumber(127856); + let key = AggregationsKey { + block_number, + circuit_id, + depth: 1, + }; + let expected_aggregation = object_store + .get::(key) + .await + .expect("expected aggregation missing"); + let node_aggregation_job_metadata = NodeAggregationJobMetadata { + id: 1, + block_number, + circuit_id, + depth: 0, + prover_job_ids_for_proofs: vec![5211320], + }; + + let job = node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store).await; + + let artifacts = NodeAggregationWitnessGenerator::process_job_sync(job, Instant::now()); + let aggregations = AggregationWrapper(artifacts.next_aggregations); + compare_serialized(&expected_aggregation, &aggregations); +} + +#[tokio::test] +#[ignore] // re-enable with new artifacts +async fn test_scheduler_witness_gen() { + let mut object_store_config = ObjectStoreConfig::from_env(); + object_store_config.file_backed_base_path = "./tests/data/scheduler/".to_owned(); + let object_store = ObjectStoreFactory::new(object_store_config) + .create_store() + .await; + let block_number = L1BatchNumber(128599); + let key = FriCircuitKey { + block_number, + circuit_id: 1, + sequence_number: 0, + depth: 0, + aggregation_round: AggregationRound::Scheduler, + }; + let expected_circuit = object_store + .get(key) + .await + .expect("expected scheduler circuit missing"); + let proof_job_ids = [ + 5639969, 5627082, 5627084, 5627083, 5627086, 5627085, 5631320, 5627090, 5627091, 5627092, + 5627093, 5627094, 5629097, + ]; + + let job = scheduler::prepare_job(block_number, proof_job_ids, &*object_store).await; + + let artifacts = SchedulerWitnessGenerator::process_job_sync(job, Instant::now()); + let circuit = CircuitWrapper::Recursive(artifacts.scheduler_circuit); + compare_serialized(&expected_circuit, &circuit); +} diff --git a/prover/witness_generator/tests/data/leaf/leaf_aggregation_witness_jobs_fri/closed_form_inputs_125010_4.bin b/prover/witness_generator/tests/data/leaf/leaf_aggregation_witness_jobs_fri/closed_form_inputs_125010_4.bin new file mode 100644 index 000000000000..fbbc2ddf9d60 Binary files /dev/null and b/prover/witness_generator/tests/data/leaf/leaf_aggregation_witness_jobs_fri/closed_form_inputs_125010_4.bin differ diff --git a/prover/witness_generator/tests/data/leaf/node_aggregation_witness_jobs_fri/aggregations_125010_6_0.bin b/prover/witness_generator/tests/data/leaf/node_aggregation_witness_jobs_fri/aggregations_125010_6_0.bin new file mode 100644 index 000000000000..88056c4d43b2 Binary files /dev/null and b/prover/witness_generator/tests/data/leaf/node_aggregation_witness_jobs_fri/aggregations_125010_6_0.bin differ diff --git a/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639043.bin b/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639043.bin new file mode 100644 index 000000000000..33cf91d0a0cf Binary files /dev/null and b/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639043.bin differ diff --git a/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639044.bin b/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639044.bin new file mode 100644 index 000000000000..b7bc18275fa1 Binary files /dev/null and b/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639044.bin differ diff --git a/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639045.bin b/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639045.bin new file mode 100644 index 000000000000..cd7ce1b31e7b Binary files /dev/null and b/prover/witness_generator/tests/data/leaf/proofs_fri/proof_4639045.bin differ diff --git a/prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_0.bin b/prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_0.bin new file mode 100644 index 000000000000..49efbb500e01 Binary files /dev/null and b/prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_0.bin differ diff --git a/prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_1.bin b/prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_1.bin new file mode 100644 index 000000000000..2f3ea43b1df8 Binary files /dev/null and b/prover/witness_generator/tests/data/node/node_aggregation_witness_jobs_fri/aggregations_127856_8_1.bin differ diff --git a/prover/witness_generator/tests/data/node/proofs_fri/proof_5211320.bin b/prover/witness_generator/tests/data/node/proofs_fri/proof_5211320.bin new file mode 100644 index 000000000000..db3ba392658b Binary files /dev/null and b/prover/witness_generator/tests/data/node/proofs_fri/proof_5211320.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627082.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627082.bin new file mode 100644 index 000000000000..633280c683f9 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627082.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627083.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627083.bin new file mode 100644 index 000000000000..32c83fa1738c Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627083.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627084.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627084.bin new file mode 100644 index 000000000000..56d64de891ba Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627084.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627085.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627085.bin new file mode 100644 index 000000000000..f158420ae1da Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627085.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627086.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627086.bin new file mode 100644 index 000000000000..7d06213b71cb Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627086.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627090.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627090.bin new file mode 100644 index 000000000000..f6c405290c09 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627090.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627091.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627091.bin new file mode 100644 index 000000000000..6942d2286390 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627091.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627092.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627092.bin new file mode 100644 index 000000000000..b4bfe15a3d01 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627092.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627093.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627093.bin new file mode 100644 index 000000000000..6b9234a54e20 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627093.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627094.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627094.bin new file mode 100644 index 000000000000..6e218471c364 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5627094.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5629097.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5629097.bin new file mode 100644 index 000000000000..3e9a2bfa60f7 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5629097.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5631320.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5631320.bin new file mode 100644 index 000000000000..f5a449e9e628 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5631320.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5639969.bin b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5639969.bin new file mode 100644 index 000000000000..7f8d408cb45b Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/proofs_fri/proof_5639969.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/prover_jobs_fri/128599_0_1_Scheduler_0.bin b/prover/witness_generator/tests/data/scheduler/prover_jobs_fri/128599_0_1_Scheduler_0.bin new file mode 100644 index 000000000000..87a7ff4d3752 Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/prover_jobs_fri/128599_0_1_Scheduler_0.bin differ diff --git a/prover/witness_generator/tests/data/scheduler/scheduler_witness_jobs_fri/scheduler_witness_128599.bin b/prover/witness_generator/tests/data/scheduler/scheduler_witness_jobs_fri/scheduler_witness_128599.bin new file mode 100644 index 000000000000..ef71a25737ab Binary files /dev/null and b/prover/witness_generator/tests/data/scheduler/scheduler_witness_jobs_fri/scheduler_witness_128599.bin differ diff --git a/prover/witness_vector_generator/Cargo.toml b/prover/witness_vector_generator/Cargo.toml new file mode 100644 index 000000000000..aa89d8a3e4c6 --- /dev/null +++ b/prover/witness_vector_generator/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "zksync_witness_vector_generator" +version = "1.0.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +zksync_types = { path = "../../core/lib/types", version = "1.0" } +zksync_dal = { path = "../../core/lib/dal", version = "1.0" } +zksync_config = { path = "../../core/lib/config", version = "1.0" } +zksync_object_store = { path = "../../core/lib/object_store", version = "1.0" } +zksync_prover_fri_utils = { path = "../prover_fri_utils", version = "1.0" } +zksync_utils = { path = "../../core/lib/utils", version = "1.0" } +prometheus_exporter = { path = "../../core/lib/prometheus_exporter", version = "1.0" } +zksync_prover_fri_types = { path = "../prover_fri_types", version = "1.0" } +zksync_prover_utils = {path = "../../core/lib/prover_utils", version = "1.0" } +zksync_queued_job_processor = { path = "../../core/lib/queued_job_processor", version = "1.0" } +vk_setup_data_generator_server_fri = { path = "../vk_setup_data_generator_server_fri", version = "1.0" } +vlog = { path = "../../core/lib/vlog", version = "1.0" } + +structopt = "0.3.26" +tokio = { version = "1", features = ["time"] } +futures = { version = "0.3", features = ["compat"] } +ctrlc = { version = "3.1", features = ["termination"] } +metrics = "0.20.0" +serde = { version = "1.0", features = ["derive"] } +async-trait = "0.1" +queues = "1.1.0" +bincode = "1.0" diff --git a/prover/witness_vector_generator/README.md b/prover/witness_vector_generator/README.md new file mode 100644 index 000000000000..ff4013c0d619 --- /dev/null +++ b/prover/witness_vector_generator/README.md @@ -0,0 +1,7 @@ +# Witness vector generator + +Used to generate witness vectors using circuit and sending them to prover over TCP. + +## running + +`zk f cargo +nightly-2023-05-31 run --release --bin zksync_witness_vector_generator` diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/witness_vector_generator/src/generator.rs new file mode 100644 index 000000000000..081614ada8ff --- /dev/null +++ b/prover/witness_vector_generator/src/generator.rs @@ -0,0 +1,207 @@ +use std::time::{Duration, Instant}; + +use async_trait::async_trait; +use tokio::task::JoinHandle; + +use tokio::time::sleep; +use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; +use zksync_config::configs::FriWitnessVectorGeneratorConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStore; +use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; +use zksync_prover_fri_types::{CircuitWrapper, ProverJob, WitnessVectorArtifacts}; +use zksync_prover_fri_utils::fetch_next_circuit; +use zksync_prover_fri_utils::get_numeric_circuit_id; +use zksync_prover_fri_utils::socket_utils::send_assembly; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::proofs::{AggregationRound, GpuProverInstanceStatus, SocketAddress}; +use zksync_vk_setup_data_server_fri::get_finalization_hints; + +pub struct WitnessVectorGenerator { + blob_store: Box, + pool: ConnectionPool, + circuit_ids_for_round_to_be_proven: Vec, + zone: String, + config: FriWitnessVectorGeneratorConfig, +} + +impl WitnessVectorGenerator { + pub fn new( + blob_store: Box, + prover_connection_pool: ConnectionPool, + circuit_ids_for_round_to_be_proven: Vec, + zone: String, + config: FriWitnessVectorGeneratorConfig, + ) -> Self { + Self { + blob_store, + pool: prover_connection_pool, + circuit_ids_for_round_to_be_proven, + zone, + config, + } + } + + pub fn generate_witness_vector(job: ProverJob) -> WitnessVectorArtifacts { + let mut key = job.setup_data_key.clone(); + if key.round == AggregationRound::NodeAggregation { + key.circuit_id = ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8; + } + let finalization_hints = get_finalization_hints(key); + let mut cs = match job.circuit_wrapper.clone() { + CircuitWrapper::Base(base_circuit) => { + base_circuit.synthesis::(&finalization_hints) + } + CircuitWrapper::Recursive(recursive_circuit) => { + recursive_circuit.synthesis::(&finalization_hints) + } + }; + WitnessVectorArtifacts::new(cs.materialize_witness_vec(), job) + } +} + +#[async_trait] +impl JobProcessor for WitnessVectorGenerator { + type Job = ProverJob; + type JobId = u32; + type JobArtifacts = WitnessVectorArtifacts; + const SERVICE_NAME: &'static str = "WitnessVectorGenerator"; + + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut storage = self.pool.access_storage().await; + let mut fri_prover_dal = storage.fri_prover_jobs_dal(); + let job = fetch_next_circuit( + &mut fri_prover_dal, + &*self.blob_store, + &self.circuit_ids_for_round_to_be_proven, + ) + .await?; + Some((job.job_id, job)) + } + + async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { + self.pool + .access_storage() + .await + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + } + + async fn process_job( + &self, + job: ProverJob, + _started_at: Instant, + ) -> JoinHandle { + tokio::task::spawn_blocking(move || Self::generate_witness_vector(job)) + } + + async fn save_result( + &self, + job_id: Self::JobId, + started_at: Instant, + artifacts: WitnessVectorArtifacts, + ) { + metrics::histogram!( + "prover_fri.witness_vector_generator.gpu_witness_vector_generation_time", + started_at.elapsed(), + "circuit_type" => get_numeric_circuit_id(&artifacts.prover_job.circuit_wrapper).to_string(), + ); + vlog::info!("Finished witness vector generation for job: {job_id} in zone: {:?} took: {started_at:?}", self.zone); + + let _now = Instant::now(); + let mut serialized: Vec = + bincode::serialize(&artifacts).expect("Failed to serialize witness vector artifacts"); + + let now = Instant::now(); + let mut attempts = 0; + + while now.elapsed() < self.config.prover_instance_wait_timeout() { + let prover = self + .pool + .access_storage() + .await + .fri_gpu_prover_queue_dal() + .lock_available_prover( + self.config.max_prover_reservation_duration(), + self.config.specialized_group_id, + self.zone.clone(), + ) + .await; + + if let Some(address) = prover { + let result = send_assembly(job_id, &mut serialized, &address); + handle_send_result(&result, job_id, &address, &self.pool, self.zone.clone()).await; + + if result.is_ok() { + return; + } + + vlog::warn!( + "Could not send witness vector to {address:?}. Prover group {}, zone {}, \ + job {job_id}, send attempt {attempts}.", + self.config.specialized_group_id, + self.zone, + ); + attempts += 1; + } else { + sleep(self.config.prover_instance_poll_time()).await; + } + } + vlog::trace!( + "Not able to get any free prover instance for sending witness vector for job: {job_id}" + ); + } +} + +async fn handle_send_result( + result: &Result<(Duration, u64), String>, + job_id: u32, + address: &SocketAddress, + pool: &ConnectionPool, + zone: String, +) { + match result { + Ok((elapsed, len)) => { + let blob_size_in_gb = len / (1024 * 1024 * 1024); + + vlog::trace!( + "Sent assembly of size: {blob_size_in_gb}GB successfully, took: {elapsed:?} \ + for job: {job_id} to: {address:?}" + ); + metrics::histogram!( + "prover_fri.witness_vector_generator.blob_sending_time", + *elapsed, + "blob_size_in_gb" => blob_size_in_gb.to_string(), + ); + + pool.access_storage() + .await + .fri_prover_jobs_dal() + .update_status(job_id, "in_gpu_proof") + .await; + } + + Err(err) => { + vlog::trace!( + "Failed sending assembly to address: {address:?}, socket not reachable \ + reason: {err}" + ); + + // mark prover instance in gpu_prover_queue dead + pool.access_storage() + .await + .fri_gpu_prover_queue_dal() + .update_prover_instance_status(address.clone(), GpuProverInstanceStatus::Dead, zone) + .await; + + // mark the job as failed + pool.access_storage() + .await + .fri_prover_jobs_dal() + .save_proof_error(job_id, "prover instance unreachable".to_string()) + .await; + } + } +} diff --git a/prover/witness_vector_generator/src/lib.rs b/prover/witness_vector_generator/src/lib.rs new file mode 100644 index 000000000000..365bdf1cc0d3 --- /dev/null +++ b/prover/witness_vector_generator/src/lib.rs @@ -0,0 +1,3 @@ +#![feature(generic_const_exprs)] + +pub mod generator; diff --git a/prover/witness_vector_generator/src/main.rs b/prover/witness_vector_generator/src/main.rs new file mode 100644 index 000000000000..e642d5b59c0d --- /dev/null +++ b/prover/witness_vector_generator/src/main.rs @@ -0,0 +1,82 @@ +#![feature(generic_const_exprs)] + +use prometheus_exporter::run_prometheus_exporter; +use structopt::StructOpt; +use tokio::{sync::oneshot, sync::watch}; + +use crate::generator::WitnessVectorGenerator; +use zksync_config::configs::fri_prover_group::FriProverGroupConfig; +use zksync_config::configs::{AlertsConfig, FriWitnessVectorGeneratorConfig, PrometheusConfig}; +use zksync_dal::connection::DbVariant; +use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStoreFactory; +use zksync_prover_utils::region_fetcher::get_zone; +use zksync_queued_job_processor::JobProcessor; +use zksync_utils::wait_for_tasks::wait_for_tasks; + +mod generator; + +#[derive(Debug, StructOpt)] +#[structopt( + name = "zksync_witness_vector_generator", + about = "Tool for generating witness vectors for circuits" +)] +struct Opt { + /// Number of times witness_vector_generator should be run. + #[structopt(short = "n", long = "n_iterations")] + number_of_iterations: Option, +} + +#[tokio::main] +async fn main() { + vlog::init(); + let opt = Opt::from_args(); + let config = FriWitnessVectorGeneratorConfig::from_env(); + let prometheus_config = PrometheusConfig { + listener_port: config.prometheus_listener_port, + pushgateway_url: config.prometheus_pushgateway_url.clone(), + push_interval_ms: config.prometheus_push_interval_ms, + }; + let pool = ConnectionPool::builder(DbVariant::Prover).build().await; + let blob_store = ObjectStoreFactory::from_env().create_store().await; + let circuit_ids_for_round_to_be_proven = FriProverGroupConfig::from_env() + .get_circuit_ids_for_group_id(config.specialized_group_id) + .unwrap_or(vec![]); + let zone = get_zone().await; + let witness_vector_generator = WitnessVectorGenerator::new( + blob_store, + pool, + circuit_ids_for_round_to_be_proven, + zone, + config, + ); + + let (stop_sender, stop_receiver) = watch::channel(false); + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(stop_signal_sender) = stop_signal_sender.take() { + stop_signal_sender.send(()).ok(); + } + }) + .expect("Error setting Ctrl+C handler"); + + vlog::info!("Starting witness vector generation"); + + let tasks = vec![ + run_prometheus_exporter(prometheus_config.listener_port, None), + tokio::spawn(witness_vector_generator.run(stop_receiver, opt.number_of_iterations)), + ]; + + let particular_crypto_alerts = Some(AlertsConfig::from_env().sporadic_crypto_errors_substrs); + let graceful_shutdown = None::>; + let tasks_allowed_to_finish = false; + tokio::select! { + _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, + _ = stop_signal_receiver => { + vlog::info!("Stop signal received, shutting down"); + } + }; + stop_sender.send(true).ok(); +} diff --git a/prover/witness_vector_generator/tests/basic_test.rs b/prover/witness_vector_generator/tests/basic_test.rs new file mode 100644 index 000000000000..3361a44b028a --- /dev/null +++ b/prover/witness_vector_generator/tests/basic_test.rs @@ -0,0 +1,32 @@ +use std::fs; +use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; +use zksync_types::proofs::AggregationRound; +use zksync_types::L1BatchNumber; +use zksync_witness_vector_generator::generator::WitnessVectorGenerator; + +#[test] +fn test_generate_witness_vector() { + let filename = "./tests/data/base_layer_main_vm.bin"; + let file = fs::read(filename).expect("failed reading circuit"); + let circuit_wrapper = bincode::deserialize::(&file) + .expect("circuit wrapper deserialization failed"); + let key = ProverServiceDataKey { + circuit_id: 1, + round: AggregationRound::BasicCircuits, + }; + let job = ProverJob { + block_number: L1BatchNumber(1), + job_id: 1, + circuit_wrapper, + setup_data_key: key, + }; + let vector = WitnessVectorGenerator::generate_witness_vector(job); + assert!(!vector.witness_vector.all_values.is_empty()); + assert!(!vector.witness_vector.multiplicities.is_empty()); + assert!(!vector.witness_vector.public_inputs_locations.is_empty()); + let serialized = bincode::serialize(&vector).expect("failed to serialize witness vector"); + assert!( + serialized.len() < 1_000_000_000, + "The size of the serialized vector shall be less than 1GB" + ); +} diff --git a/prover/witness_vector_generator/tests/data/base_layer_main_vm.bin b/prover/witness_vector_generator/tests/data/base_layer_main_vm.bin new file mode 100644 index 000000000000..64fa6fe6cd46 Binary files /dev/null and b/prover/witness_vector_generator/tests/data/base_layer_main_vm.bin differ diff --git a/sdk/zksync-rs/src/ethereum/mod.rs b/sdk/zksync-rs/src/ethereum/mod.rs index 55f277e09dfa..3371c7201499 100644 --- a/sdk/zksync-rs/src/ethereum/mod.rs +++ b/sdk/zksync-rs/src/ethereum/mod.rs @@ -67,6 +67,8 @@ pub struct EthereumProvider { polling_interval: Duration, } +// TODO (SMA-1623): create a way to pass `Options` (e.g. nonce, gas_limit, priority_fee_per_gas) +// into methods that perform L1 transactions. The unit is wei. pub const DEFAULT_PRIORITY_FEE: u64 = 2_000_000_000; impl EthereumProvider { @@ -497,6 +499,7 @@ impl EthereumProvider { gas_price }; + // TODO (PLA-85): Add gas estimations for deposits in Rust SDK let l2_gas_limit = U256::from(3_000_000u32); let base_cost: U256 = self diff --git a/sdk/zksync-rs/src/operations/withdraw.rs b/sdk/zksync-rs/src/operations/withdraw.rs index 8823102fed21..0037deacd630 100644 --- a/sdk/zksync-rs/src/operations/withdraw.rs +++ b/sdk/zksync-rs/src/operations/withdraw.rs @@ -25,9 +25,9 @@ pub struct WithdrawBuilder<'a, S: EthereumSigner, P> { } impl<'a, S, P> WithdrawBuilder<'a, S, P> - where - S: EthereumSigner, - P: ZksNamespaceClient + EthNamespaceClient + Sync, +where + S: EthereumSigner, + P: ZksNamespaceClient + EthNamespaceClient + Sync, { /// Initializes a withdraw transaction building process. pub fn new(wallet: &'a Wallet) -> Self { @@ -55,6 +55,8 @@ impl<'a, S, P> WithdrawBuilder<'a, S, P> .ok_or_else(|| ClientError::MissingRequiredField("amount".into()))?; let (contract_address, calldata, value) = if token == ETHEREUM_ADDRESS { + // TODO (SMA-1608): Do not implement the ABI manually, introduce ABI files with an update script similarly to + // how it's done for L1 part of SDK. let calldata_params = vec![ethabi::ParamType::Address]; let mut calldata = ethabi::short_signature("withdraw", &calldata_params).to_vec(); calldata.append(&mut ethabi::encode(&[ethabi::Token::Address(to)])); @@ -73,6 +75,8 @@ impl<'a, S, P> WithdrawBuilder<'a, S, P> default_bridges.l2_erc20_default_bridge }; + // TODO (SMA-1608): Do not implement the ABI manually, introduce ABI files with an update script similarly to + // how it's done for L1 part of SDK. let calldata_params = vec![ ethabi::ParamType::Address, ethabi::ParamType::Address, diff --git a/sdk/zksync-web3.js/CHANGELOG.md b/sdk/zksync-web3.js/CHANGELOG.md index dfa39d6ee171..90300c6c4ad7 100644 --- a/sdk/zksync-web3.js/CHANGELOG.md +++ b/sdk/zksync-web3.js/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## [0.15.4](https://github.com/matter-labs/zksync-2-dev/compare/zksync-web3-v0.15.3...zksync-web3-v0.15.4) (2023-07-25) + + +### Bug Fixes + +* **sdk:** allow null for txIndexInL1Batch in formatter ([#2232](https://github.com/matter-labs/zksync-2-dev/issues/2232)) ([474740a](https://github.com/matter-labs/zksync-2-dev/commit/474740a7f9ca648869fd8f82cc4da0fcefd9cbf7)) + +## [0.15.3](https://github.com/matter-labs/zksync-2-dev/compare/zksync-web3-v0.15.2...zksync-web3-v0.15.3) (2023-07-25) + + +### Bug Fixes + +* **sdk:** Fix getting receipt for transactions rejected in statekeeper ([#2071](https://github.com/matter-labs/zksync-2-dev/issues/2071)) ([c97e494](https://github.com/matter-labs/zksync-2-dev/commit/c97e494c1ef7f58fe8632a3ebf943d775b1703cb)) +* **sdk:** make new fields optional in SDK ([#2226](https://github.com/matter-labs/zksync-2-dev/issues/2226)) ([9a3b530](https://github.com/matter-labs/zksync-2-dev/commit/9a3b5307a5593664cfaa510f3511751125edb96e)) + ## [0.15.2](https://github.com/matter-labs/zksync-2-dev/compare/zksync-web3-v0.15.1...zksync-web3-v0.15.2) (2023-07-06) diff --git a/sdk/zksync-web3.js/abi/IZkSync.json b/sdk/zksync-web3.js/abi/IZkSync.json index b553291d6c29..0ba0ad7e3930 100644 --- a/sdk/zksync-web3.js/abi/IZkSync.json +++ b/sdk/zksync-web3.js/abi/IZkSync.json @@ -176,25 +176,6 @@ "name": "IsPorterAvailableStatusUpdate", "type": "event" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "oldAllowList", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "newAllowList", - "type": "address" - } - ], - "name": "NewAllowList", - "type": "event" - }, { "anonymous": false, "inputs": [ @@ -214,44 +195,6 @@ "name": "NewGovernor", "type": "event" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "previousBytecodeHash", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "bytes32", - "name": "newBytecodeHash", - "type": "bytes32" - } - ], - "name": "NewL2BootloaderBytecodeHash", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "previousBytecodeHash", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "bytes32", - "name": "newBytecodeHash", - "type": "bytes32" - } - ], - "name": "NewL2DefaultAccountBytecodeHash", - "type": "event" - }, { "anonymous": false, "inputs": [ @@ -409,78 +352,6 @@ "name": "NewPriorityTxMaxGasLimit", "type": "event" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "oldVerifier", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "newVerifier", - "type": "address" - } - ], - "name": "NewVerifier", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "components": [ - { - "internalType": "bytes32", - "name": "recursionNodeLevelVkHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "recursionLeafLevelVkHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "recursionCircuitsSetVksHash", - "type": "bytes32" - } - ], - "indexed": false, - "internalType": "struct VerifierParams", - "name": "oldVerifierParams", - "type": "tuple" - }, - { - "components": [ - { - "internalType": "bytes32", - "name": "recursionNodeLevelVkHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "recursionLeafLevelVkHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "recursionCircuitsSetVksHash", - "type": "bytes32" - } - ], - "indexed": false, - "internalType": "struct VerifierParams", - "name": "newVerifierParams", - "type": "tuple" - } - ], - "name": "NewVerifierParams", - "type": "event" - }, { "anonymous": false, "inputs": [ @@ -1121,6 +992,19 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "getProtocolVersion", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "getSecurityCouncil", @@ -1857,45 +1741,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [ - { - "internalType": "contract IAllowList", - "name": "_newAllowList", - "type": "address" - } - ], - "name": "setAllowList", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "_l2BootloaderBytecodeHash", - "type": "bytes32" - } - ], - "name": "setL2BootloaderBytecodeHash", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "_l2DefaultAccountBytecodeHash", - "type": "bytes32" - } - ], - "name": "setL2DefaultAccountBytecodeHash", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [ { @@ -1953,49 +1798,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [ - { - "internalType": "contract Verifier", - "name": "_newVerifier", - "type": "address" - } - ], - "name": "setVerifier", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "components": [ - { - "internalType": "bytes32", - "name": "recursionNodeLevelVkHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "recursionLeafLevelVkHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "recursionCircuitsSetVksHash", - "type": "bytes32" - } - ], - "internalType": "struct VerifierParams", - "name": "_newVerifierParams", - "type": "tuple" - } - ], - "name": "setVerifierParams", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [ { diff --git a/sdk/zksync-web3.js/package.json b/sdk/zksync-web3.js/package.json index 6079cbc33cff..a72fe11ff2bb 100644 --- a/sdk/zksync-web3.js/package.json +++ b/sdk/zksync-web3.js/package.json @@ -1,6 +1,6 @@ { "name": "zksync-web3", - "version": "0.15.2", + "version": "0.15.4", "main": "build/src/index.js", "types": "build/src/index.d.ts", "files": [ diff --git a/sdk/zksync-web3.js/src/contract.ts b/sdk/zksync-web3.js/src/contract.ts index 2d25274dc9fa..dfd4a0c4ba77 100644 --- a/sdk/zksync-web3.js/src/contract.ts +++ b/sdk/zksync-web3.js/src/contract.ts @@ -42,6 +42,7 @@ export class ContractFactory extends ethers.ContractFactory { } override getDeployTransaction(...args: any[]): ethers.providers.TransactionRequest { + // TODO (SMA-1585): Users should be able to provide the salt. let salt = '0x0000000000000000000000000000000000000000000000000000000000000000'; // The overrides will be popped out in this call: diff --git a/sdk/zksync-web3.js/src/provider.ts b/sdk/zksync-web3.js/src/provider.ts index c48c761eaa03..e5ab4976b05f 100644 --- a/sdk/zksync-web3.js/src/provider.ts +++ b/sdk/zksync-web3.js/src/provider.ts @@ -334,7 +334,7 @@ export class Provider extends ethers.providers.JsonRpcProvider { key: hash, value: hash, transactionHash: hash, - txIndexInL1Batch: number, + txIndexInL1Batch: Formatter.allowNull(number), logIndex: number }; @@ -428,6 +428,8 @@ export class Provider extends ethers.providers.JsonRpcProvider { if (transaction.customData.factoryDeps) { // @ts-ignore result.eip712Meta.factoryDeps = transaction.customData.factoryDeps.map((dep: ethers.BytesLike) => + // TODO (SMA-1605): we arraify instead of hexlifying because server expects Vec. + // We should change deserialization there. Array.from(utils.arrayify(dep)) ); } @@ -687,6 +689,7 @@ export class Provider extends ethers.providers.JsonRpcProvider { } static getDefaultProvider() { + // TODO (SMA-1606): Add different urls for different networks. return new Provider(process.env.ZKSYNC_WEB3_API_URL || 'http://localhost:3050'); } @@ -814,6 +817,7 @@ export class Provider extends ethers.providers.JsonRpcProvider { }; } + // TODO (EVM-3): support refundRecipient for fee estimation async estimateL1ToL2Execute(transaction: { contractAddress: Address; calldata: BytesLike; diff --git a/sdk/zksync-web3.js/src/types.ts b/sdk/zksync-web3.js/src/types.ts index ccb60fba9872..8f0323a8d9b9 100644 --- a/sdk/zksync-web3.js/src/types.ts +++ b/sdk/zksync-web3.js/src/types.ts @@ -55,6 +55,7 @@ export type BlockTag = | 'earliest' | 'pending'; +// TODO (SMA-1585): Support create2 variants. export type DeploymentType = 'create' | 'createAccount'; export interface Token { @@ -114,7 +115,7 @@ export interface L2ToL1Log { blockHash: string; l1BatchNumber: number; transactionIndex: number; - txIndexInL1Batch: number; + txIndexInL1Batch?: number; shardId: number; isService: boolean; sender: string; @@ -190,6 +191,10 @@ export interface BatchDetails { executedAt?: Date; l1GasPrice: number; l2FairGasPrice: number; + baseSystemContractsHashes: { + bootloader: string; + defaultAa: string; + }; } export interface BlockDetails { @@ -206,13 +211,17 @@ export interface BlockDetails { provenAt?: Date; executeTxHash?: string; executedAt?: Date; + baseSystemContractsHashes: { + bootloader: string; + defaultAa: string; + }; } export interface TransactionDetails { isL1Originated: boolean; status: string; fee: BigNumberish; - gasPerPubdata: BigNumberish; + gasPerPubdata?: BigNumberish; initiatorAddress: Address; receivedAt: Date; ethCommitTxHash?: string; diff --git a/yarn.lock b/yarn.lock index d5991cf89cb0..338905333b71 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1384,6 +1384,15 @@ dependencies: chalk "4.1.2" +"@matterlabs/hardhat-zksync-solc@0.3.17": + version "0.3.17" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.3.17.tgz#72f199544dc89b268d7bfc06d022a311042752fd" + integrity sha512-aZgQ0yfXW5xPkfuEH1d44ncWV4T2LzKZd0VVPo4PL5cUrYs2/II1FaEDp5zsf3FxOR1xT3mBsjuSrtJkk4AL8Q== + dependencies: + "@nomiclabs/hardhat-docker" "^2.0.0" + chalk "4.1.2" + dockerode "^3.3.4" + "@matterlabs/hardhat-zksync-solc@^0.3.15": version "0.3.16" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.3.16.tgz#dd8ed44f1a580f282794a15fee995f418b040158" @@ -1393,6 +1402,16 @@ chalk "4.1.2" dockerode "^3.3.4" +"@matterlabs/hardhat-zksync-verify@^0.2.0": + version "0.2.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.2.0.tgz#a0c6b897202057873355b680244f72f573d86a97" + integrity sha512-iUwxhPlNk+HWe+UadLqQzdDb2fammbKYoz8wqVuyr9jygFUf8JNPLWDZOS0KCQgRn/dmT22+i9nSREOg66bAHA== + dependencies: + "@matterlabs/hardhat-zksync-solc" "0.3.17" + axios "^1.4.0" + chalk "4.1.2" + dockerode "^3.3.4" + "@matterlabs/hardhat-zksync-vyper@^0.2.0": version "0.2.0" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-0.2.0.tgz#4cefe09cdceb9251faaa434f04cda018713c4545" @@ -1583,6 +1602,21 @@ mcl-wasm "^0.7.1" rustbn.js "~0.2.0" +"@nomicfoundation/hardhat-verify@^1.1.0": + version "1.1.1" + resolved "https://registry.yarnpkg.com/@nomicfoundation/hardhat-verify/-/hardhat-verify-1.1.1.tgz#6a433d777ce0172d1f0edf7f2d3e1df14b3ecfc1" + integrity sha512-9QsTYD7pcZaQFEA3tBb/D/oCStYDiEVDN7Dxeo/4SCyHRSm86APypxxdOMEPlGmXsAvd+p1j/dTODcpxb8aztA== + dependencies: + "@ethersproject/abi" "^5.1.2" + "@ethersproject/address" "^5.0.2" + cbor "^8.1.0" + chalk "^2.4.2" + debug "^4.1.1" + lodash.clonedeep "^4.5.0" + semver "^6.3.0" + table "^6.8.0" + undici "^5.14.0" + "@nomicfoundation/solidity-analyzer-darwin-arm64@0.1.0": version "0.1.0" resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-darwin-arm64/-/solidity-analyzer-darwin-arm64-0.1.0.tgz#83a7367342bd053a76d04bbcf4f373fef07cf760" @@ -1663,7 +1697,7 @@ resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-ethers/-/hardhat-ethers-2.2.2.tgz#812d48929c3bf8fe840ec29eab4b613693467679" integrity sha512-NLDlDFL2us07C0jB/9wzvR0kuLivChJWCXTKcj3yqjZqMoYp7g7wwS157F70VHx/+9gHIBGzak5pKDwG8gEefA== -"@nomiclabs/hardhat-etherscan@^3.1.0": +"@nomiclabs/hardhat-etherscan@^3.1.0", "@nomiclabs/hardhat-etherscan@^3.1.7": version "3.1.7" resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-etherscan/-/hardhat-etherscan-3.1.7.tgz#72e3d5bd5d0ceb695e097a7f6f5ff6fcbf062b9a" integrity sha512-tZ3TvSgpvsQ6B6OGmo1/Au6u8BrAkvs1mIC/eURA3xgIfznUZBhmpne8hv7BXUzw9xNL3fXdpOYgOQlVMTcoHQ== @@ -2812,13 +2846,14 @@ axios@^0.21.1: dependencies: follow-redirects "^1.14.0" -axios@^0.27.2: - version "0.27.2" - resolved "https://registry.yarnpkg.com/axios/-/axios-0.27.2.tgz#207658cc8621606e586c85db4b41a750e756d972" - integrity sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ== +axios@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.4.0.tgz#38a7bf1224cd308de271146038b551d725f0be1f" + integrity sha512-S4XCWMEmzvo64T9GfvQDOXgYRDJ/wsSZc7Jvdgx5u1sd0JwsuPLqb3SYmusag+edF6ziyMensPVqLTSc1PiSEA== dependencies: - follow-redirects "^1.14.9" + follow-redirects "^1.15.0" form-data "^4.0.0" + proxy-from-env "^1.1.0" babel-code-frame@^6.26.0: version "6.26.0" @@ -6370,7 +6405,7 @@ flow-stoplight@^1.0.0: resolved "https://registry.yarnpkg.com/flow-stoplight/-/flow-stoplight-1.0.0.tgz#4a292c5bcff8b39fa6cc0cb1a853d86f27eeff7b" integrity sha512-rDjbZUKpN8OYhB0IE/vY/I8UWO/602IIJEU/76Tv4LvYnwHCk0BCsvz4eRr9n+FQcri7L5cyaXOo0+/Kh4HisA== -follow-redirects@^1.12.1, follow-redirects@^1.14.0, follow-redirects@^1.14.9: +follow-redirects@^1.12.1, follow-redirects@^1.14.0, follow-redirects@^1.15.0: version "1.15.2" resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13" integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA== @@ -8483,6 +8518,14 @@ kleur@^3.0.3: resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== +"l1-zksync-contracts@link:contacts/ethereum": + version "0.0.0" + uid "" + +"l2-zksync-contracts@link:contacts/zksync": + version "0.0.0" + uid "" + lcid@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835" @@ -8740,6 +8783,11 @@ lodash.assign@^4.0.3, lodash.assign@^4.0.6: resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7" integrity sha512-hFuH8TY+Yji7Eja3mGiuAxBqLagejScbG8GbG0j6o9vzn0YL14My+ktnqtZgFTosKymC9/44wP6s7xyuLfnClw== +lodash.clonedeep@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef" + integrity sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ== + lodash.differencewith@~4.5.0: version "4.5.0" resolved "https://registry.yarnpkg.com/lodash.differencewith/-/lodash.differencewith-4.5.0.tgz#bafafbc918b55154e179176a00bb0aefaac854b7" @@ -10316,6 +10364,11 @@ proxy-addr@~2.0.7: forwarded "0.2.0" ipaddr.js "1.9.1" +proxy-from-env@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2" + integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg== + prr@~1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476" @@ -13344,4 +13397,4 @@ zksync-web3@^0.14.3: integrity sha512-hT72th4AnqyLW1d5Jlv8N2B/qhEnl2NePK2A3org7tAa24niem/UAaHMkEvmWI3SF9waYUPtqAtjpf+yvQ9zvQ== "zksync-web3@link:sdk/zksync-web3.js": - version "0.15.2" + version "0.15.4"