diff --git a/Cargo.lock b/Cargo.lock index 11ba8a461d78..d8919ee451ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,13 +2,19 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" + [[package]] name = "actix-codec" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57a7559404a7f3573127aab53c08ce37a6c6a315c374a31070f3c91cd1b4a7fe" dependencies = [ - "bitflags", + "bitflags 1.3.2", "bytes 1.4.0", "futures-core", "futures-sink", @@ -46,7 +52,7 @@ dependencies = [ "actix-utils", "ahash 0.8.3", "base64 0.21.0", - "bitflags", + "bitflags 1.3.2", "brotli", "bytes 1.4.0", "bytestring", @@ -231,7 +237,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "clap 4.1.8", + "clap 4.2.4", "dotenvy", "tokio", "zksync_dal", @@ -313,6 +319,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "aliasable" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -337,6 +349,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "ansi_term" version = "0.12.1" @@ -346,6 +364,55 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "anstream" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e579a7752471abc2a8268df8b20005e3eadd975f585398f17efcfd8d4927371" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" + +[[package]] +name = "anstyle-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcd8291a340dd8ac70e18878bc4501dd7b4ff970cfa21c207d36ece51ea88fd" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + [[package]] name = "anyhow" version = "1.0.69" @@ -612,6 +679,18 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "backon" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f34fac4d7cdaefa2deded0eda2d5d59dbfd43370ff3f856209e72340ae84c294" +dependencies = [ + "futures 0.3.27", + "pin-project", + "rand 0.8.5", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.67" @@ -663,7 +742,7 @@ dependencies = [ [[package]] name = "bellman_ce" version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#3aa6226d04d60c539ff4ee480479ac5b92871a20" +source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ "arrayvec 0.7.2", "bit-vec", @@ -706,21 +785,23 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", "lazycell", "peeking_take_while", + "prettyplease", "proc-macro2 1.0.52", "quote 1.0.26", "regex", "rustc-hash", "shlex", + "syn 2.0.12", ] [[package]] @@ -738,6 +819,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813" + [[package]] name = "bitvec" version = "0.20.4" @@ -784,7 +871,7 @@ dependencies = [ [[package]] name = "blake2s_const" version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#3aa6226d04d60c539ff4ee480479ac5b92871a20" +source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ "arrayref", "arrayvec 0.5.2", @@ -1016,6 +1103,33 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ciborium" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" + +[[package]] +name = "ciborium-ll" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.2.5" @@ -1052,57 +1166,83 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", - "textwrap", + "textwrap 0.11.0", "unicode-width", "vec_map", ] [[package]] name = "clap" -version = "4.1.8" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d7ae14b20b94cb02149ed21a86c423859cbe18dc7ed69845cace50e52b40a5" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ - "bitflags", + "bitflags 1.3.2", + "clap_lex 0.2.4", + "indexmap", + "textwrap 0.16.0", +] + +[[package]] +name = "clap" +version = "4.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "956ac1f6381d8d82ab4684768f89c0ea3afe66925ceadb4eeb3fc452ffc55d62" +dependencies = [ + "clap_builder", "clap_derive", - "clap_lex", - "is-terminal", "once_cell", +] + +[[package]] +name = "clap_builder" +version = "4.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84080e799e54cff944f4b4a4b0e71630b0e0443b25b985175c7dddc1a859b749" +dependencies = [ + "anstream", + "anstyle", + "bitflags 1.3.2", + "clap_lex 0.4.1", "strsim 0.10.0", - "termcolor", ] [[package]] name = "clap_derive" -version = "4.1.8" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0" +checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" dependencies = [ "heck 0.4.1", - "proc-macro-error", "proc-macro2 1.0.52", "quote 1.0.26", - "syn 1.0.109", + "syn 2.0.12", ] [[package]] name = "clap_lex" -version = "0.3.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350b9cf31731f9957399229e9b2adc51eeabdfbe9d71d9a0552275fd12710d09" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" dependencies = [ "os_str_bytes", ] +[[package]] +name = "clap_lex" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" + [[package]] name = "cloudabi" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -1140,6 +1280,12 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "combine" version = "4.6.6" @@ -1159,6 +1305,12 @@ dependencies = [ "crossbeam-utils 0.8.15", ] +[[package]] +name = "const-decoder" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5241cd7938b1b415942e943ea96f615953d500b50347b505b0b507080bad5a6f" + [[package]] name = "const-oid" version = "0.7.1" @@ -1254,15 +1406,16 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" dependencies = [ + "anes", "atty", "cast", - "clap 2.34.0", + "ciborium", + "clap 3.2.25", "criterion-plot", - "csv", "itertools", "lazy_static", "num-traits", @@ -1271,7 +1424,6 @@ dependencies = [ "rayon", "regex", "serde", - "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -1280,14 +1432,31 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", "itertools", ] +[[package]] +name = "cross_external_nodes_checker" +version = "1.0.0" +dependencies = [ + "anyhow", + "ctrlc", + "envy", + "futures 0.3.27", + "prometheus_exporter", + "serde", + "serde_json", + "tokio", + "vlog", + "zksync_types", + "zksync_web3_decl", +] + [[package]] name = "crossbeam" version = "0.7.3" @@ -1498,7 +1667,7 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" dependencies = [ "proc-macro-error", "proc-macro2 1.0.52", @@ -1507,27 +1676,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "csv" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" -dependencies = [ - "csv-core", - "itoa 1.0.6", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" -dependencies = [ - "memchr", -] - [[package]] name = "ctor" version = "0.1.26" @@ -1786,16 +1934,6 @@ dependencies = [ "signature", ] -[[package]] -name = "eip712-signature" -version = "0.1.0" -source = "git+https://github.com/vladbochok/eip712-signature#30b11455e7d613313e8c12d2aad961fd4bf902fe" -dependencies = [ - "ethereum-types", - "parity-crypto", - "thiserror", -] - [[package]] name = "either" version = "1.8.1" @@ -1847,19 +1985,6 @@ dependencies = [ "termcolor", ] -[[package]] -name = "env_logger" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" -dependencies = [ - "humantime", - "is-terminal", - "log", - "regex", - "termcolor", -] - [[package]] name = "envy" version = "0.4.2" @@ -2103,7 +2228,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" dependencies = [ - "bitflags", + "bitflags 1.3.2", "fuchsia-zircon-sys", ] @@ -2365,9 +2490,8 @@ dependencies = [ [[package]] name = "google-cloud-auth" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44074eed3f9f0c05a522090f0cf1cfcdaef29965424d07908a6a372ffdee0985" +version = "0.9.1" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", "base64 0.21.0", @@ -2387,9 +2511,8 @@ dependencies = [ [[package]] name = "google-cloud-default" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d47d0a154793b622b0aa39fda79d40694b6ef9aa8c932c0342f2088502aa3ea" +version = "0.1.1" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", "google-cloud-auth", @@ -2400,8 +2523,7 @@ dependencies = [ [[package]] name = "google-cloud-metadata" version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "reqwest", "thiserror", @@ -2410,9 +2532,8 @@ dependencies = [ [[package]] name = "google-cloud-storage" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ed4e4f53bc4816db6f5669fb079338a8b6375a985fd6c9a1f3f8a864922541" +version = "0.10.0" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-stream", "base64 0.21.0", @@ -2432,7 +2553,6 @@ dependencies = [ "thiserror", "time 0.3.20", "tokio", - "tokio-util 0.7.7", "tracing", "url", ] @@ -2440,8 +2560,7 @@ dependencies = [ [[package]] name = "google-cloud-token" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9fa5c241ab09d3531496127ef107a29cc2a8fde63676f7cbbe56a8a5e75883" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", ] @@ -2547,7 +2666,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bytes 1.4.0", "headers-core", "http", @@ -2764,6 +2883,12 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "iai" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" + [[package]] name = "iana-time-zone" version = "0.1.53" @@ -3314,6 +3439,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +[[package]] +name = "leb128" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" + [[package]] name = "libc" version = "0.2.140" @@ -3338,9 +3469,9 @@ checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" [[package]] name = "librocksdb-sys" -version = "0.6.1+6.28.2" +version = "0.6.3+6.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc587013734dadb7cf23468e531aa120788b87243648be42e2d3a072186291" +checksum = "184ce2a189a817be2731070775ad053b6804a340fee05c6686d711db27455917" dependencies = [ "bindgen", "bzip2-sys", @@ -3385,8 +3516,10 @@ dependencies = [ "envy", "futures 0.3.27", "hex", + "metrics", "num 0.3.1", "once_cell", + "prometheus_exporter", "rand 0.8.5", "rand_distr", "regex", @@ -3700,7 +3833,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "libc", "static_assertions", @@ -3961,7 +4094,7 @@ version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "foreign-types", "libc", @@ -4075,9 +4208,32 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.4.1" +version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" + +[[package]] +name = "ouroboros" +version = "0.15.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1358bd1558bd2a083fed428ffeda486fbfb323e698cdda7794259d592ca72db" +dependencies = [ + "aliasable", + "ouroboros_macro", +] + +[[package]] +name = "ouroboros_macro" +version = "0.15.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" +dependencies = [ + "Inflector", + "proc-macro-error", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", +] [[package]] name = "overload" @@ -4447,7 +4603,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "concurrent-queue", "libc", @@ -4468,6 +4624,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +dependencies = [ + "proc-macro2 1.0.52", + "syn 2.0.12", +] + [[package]] name = "primitive-types" version = "0.10.1" @@ -4874,7 +5040,7 @@ version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4914,7 +5080,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4930,9 +5096,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -4950,9 +5116,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "remove_dir_all" @@ -5144,7 +5310,7 @@ version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", @@ -5310,7 +5476,7 @@ version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -5443,16 +5609,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" version = "1.0.156" @@ -5759,7 +5915,7 @@ dependencies = [ "atoi", "base64 0.13.1", "bigdecimal", - "bitflags", + "bitflags 1.3.2", "byteorder", "bytes 1.4.0", "chrono", @@ -5936,15 +6092,25 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +dependencies = [ + "proc-macro2 1.0.52", + "quote 1.0.26", + "unicode-ident", +] + [[package]] name = "sync_vm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" +version = "1.3.2" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" dependencies = [ "arrayvec 0.7.2", "cs_derive", "derivative", - "eip712-signature", "franklin-crypto", "hex", "itertools", @@ -6040,6 +6206,12 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "textwrap" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" + [[package]] name = "thiserror" version = "1.0.39" @@ -6152,14 +6324,13 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.26.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg 1.1.0", "bytes 1.4.0", "libc", - "memchr", "mio 0.8.6", "num_cpus", "parking_lot 0.12.1", @@ -6182,13 +6353,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2 1.0.52", "quote 1.0.26", - "syn 1.0.109", + "syn 2.0.12", ] [[package]] @@ -6595,6 +6766,12 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "1.3.0" @@ -6679,6 +6856,33 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "vm-benchmark" +version = "0.1.0" +dependencies = [ + "criterion", + "iai", + "metrics", + "metrics-exporter-prometheus", + "tokio", + "vm-benchmark-harness", +] + +[[package]] +name = "vm-benchmark-harness" +version = "0.1.0" +dependencies = [ + "once_cell", + "ouroboros", + "vm", + "zk_evm", + "zksync_config", + "zksync_contracts", + "zksync_state", + "zksync_types", + "zksync_utils", +] + [[package]] name = "waker-fn" version = "1.1.0" @@ -6933,13 +7137,13 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -6948,7 +7152,16 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] @@ -6957,13 +7170,28 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] @@ -6972,42 +7200,84 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winnow" version = "0.3.5" @@ -7050,27 +7320,23 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zk_evm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#c3d405d2a45526d87a803792745297860a19916b" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.2#397683815115d21c6f9d314463b1ffaafdfc1951" dependencies = [ - "blake2 0.10.6", - "k256", "lazy_static", "num 0.4.0", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", "static_assertions", "zkevm_opcode_defs", ] [[package]] name = "zkevm-assembly" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#0ddd7e70d8d3d13725a937ec1553c8088fb61fda" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#77a55f8427a2b44a19e213c06440da5248edbd2c" dependencies = [ - "env_logger 0.9.3", + "env_logger", "hex", "lazy_static", "log", @@ -7087,27 +7353,29 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#bb7888c83599bb9ee98041abea11f6524556d4e9" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#261b48e9369b356bbd65023d20227b45b47915a2" dependencies = [ - "bitflags", + "bitflags 2.2.1", + "blake2 0.10.6", "ethereum-types", + "k256", "lazy_static", "sha2 0.10.6", + "sha3 0.10.6", ] [[package]] name = "zkevm_test_harness" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.1#2a568f7bdbd61cdf389370be1162977d93ed9625" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#1364026143d4060550130dc3f644ea74ee245441" dependencies = [ "bincode", - "blake2 0.10.6", "circuit_testing", "codegen 0.2.0", "crossbeam 0.8.2", "derivative", - "env_logger 0.10.0", + "env_logger", "hex", "num-bigint 0.4.3", "num-integer", @@ -7115,8 +7383,6 @@ dependencies = [ "rayon", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", "smallvec", "structopt", "sync_vm", @@ -7156,7 +7422,9 @@ dependencies = [ name = "zksync_circuit_breaker" version = "1.0.0" dependencies = [ + "assert_matches", "async-trait", + "backon", "convert_case 0.6.0", "futures 0.3.27", "hex", @@ -7185,6 +7453,7 @@ dependencies = [ "serde_json", "url", "zksync_basic_types", + "zksync_contracts", "zksync_utils", ] @@ -7200,9 +7469,11 @@ dependencies = [ "lazy_static", "metrics", "prometheus_exporter", + "regex", "serde", "serde_json", "structopt", + "tempfile", "thiserror", "tokio", "vlog", @@ -7234,11 +7505,12 @@ dependencies = [ "actix-web", "anyhow", "assert_matches", - "async-std", "async-trait", "bigdecimal", "bincode", + "bitflags 1.3.2", "chrono", + "clap 4.2.4", "ctrlc", "db_test_macro", "futures 0.3.27", @@ -7259,7 +7531,6 @@ dependencies = [ "reqwest", "serde", "serde_json", - "structopt", "tempfile", "thiserror", "tokio", @@ -7270,10 +7541,13 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_dal", + "zksync_db_storage_provider", "zksync_eth_client", "zksync_eth_signer", + "zksync_health_check", "zksync_mempool", "zksync_merkle_tree", + "zksync_merkle_tree2", "zksync_mini_merkle_tree", "zksync_object_store", "zksync_prover_utils", @@ -7324,7 +7598,7 @@ dependencies = [ "vm", "zksync_config", "zksync_contracts", - "zksync_object_store", + "zksync_health_check", "zksync_state", "zksync_storage", "zksync_types", @@ -7332,6 +7606,14 @@ dependencies = [ "zksync_web3_decl", ] +[[package]] +name = "zksync_db_storage_provider" +version = "1.0.0" +dependencies = [ + "zksync_dal", + "zksync_types", +] + [[package]] name = "zksync_eth_client" version = "1.0.0" @@ -7374,6 +7656,27 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_external_node" +version = "1.0.0" +dependencies = [ + "anyhow", + "prometheus_exporter", + "tokio", + "vlog", + "zksync_basic_types", + "zksync_config", + "zksync_core", + "zksync_dal", + "zksync_eth_client", + "zksync_health_check", + "zksync_storage", +] + +[[package]] +name = "zksync_health_check" +version = "0.1.0" + [[package]] name = "zksync_mempool" version = "1.0.0" @@ -7387,23 +7690,15 @@ dependencies = [ name = "zksync_merkle_tree" version = "1.0.0" dependencies = [ - "anyhow", - "async-trait", "bincode", "byteorder", - "criterion", - "fnv", - "futures 0.3.27", "itertools", "metrics", "once_cell", - "rand 0.4.6", "rayon", "serde", - "serde_json", "tempfile", "thiserror", - "tokio", "vlog", "zksync_config", "zksync_crypto", @@ -7412,6 +7707,29 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_merkle_tree2" +version = "1.0.0" +dependencies = [ + "assert_matches", + "clap 4.2.4", + "leb128", + "metrics", + "once_cell", + "rand 0.8.5", + "rayon", + "serde", + "serde_json", + "serde_with", + "tempfile", + "thiserror", + "zksync_config", + "zksync_crypto", + "zksync_storage", + "zksync_types", + "zksync_utils", +] + [[package]] name = "zksync_mini_merkle_tree" version = "1.0.0" @@ -7426,6 +7744,7 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ + "bincode", "google-cloud-auth", "google-cloud-default", "google-cloud-storage", @@ -7442,8 +7761,12 @@ dependencies = [ name = "zksync_prover_utils" version = "1.0.0" dependencies = [ + "ctrlc", + "futures 0.3.27", "metrics", + "regex", "reqwest", + "tokio", "vlog", "zksync_config", "zksync_utils", @@ -7464,6 +7787,7 @@ dependencies = [ name = "zksync_state" version = "1.0.0" dependencies = [ + "metrics", "tempfile", "vlog", "zksync_storage", @@ -7579,6 +7903,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", + "vlog", "zk_evm", "zksync_basic_types", ] @@ -7614,6 +7939,34 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_witness_generator" +version = "1.0.0" +dependencies = [ + "async-trait", + "bincode", + "const-decoder", + "futures 0.3.27", + "metrics", + "prometheus_exporter", + "rand 0.8.5", + "serde", + "structopt", + "tokio", + "vlog", + "vm", + "zksync_config", + "zksync_dal", + "zksync_db_storage_provider", + "zksync_object_store", + "zksync_prover_utils", + "zksync_queued_job_processor", + "zksync_state", + "zksync_types", + "zksync_utils", + "zksync_verification_key_generator_and_server", +] + [[package]] name = "zstd" version = "0.12.3+zstd.1.5.2" diff --git a/Cargo.toml b/Cargo.toml index cf6bd9c1f6b9..5bb5ea268596 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,14 +6,17 @@ members = [ # "core/bin/prover", # Server "core/bin/zksync_core", + "core/bin/external_node", "core/bin/admin-tools", "core/bin/system-constants-generator", + "core/tests/cross_external_nodes_checker", # Contract verifier "core/bin/contract-verifier", # Setup key generator and server: its commented as it cannot be built with stable rust. # "core/bin/setup_key_generator_and_server", # Verification key generator and server "core/bin/verification_key_generator_and_server", + "core/bin/witness_generator", # circuit synthesizer: its commented as it cannot be built with stable rust. # "core/bin/circuit_synthesizer", # Libraries @@ -28,6 +31,7 @@ members = [ "core/lib/eth_signer", "core/lib/mempool", "core/lib/merkle_tree", + "core/lib/merkle_tree2", "core/lib/object_store", "core/lib/mini_merkle_tree", "core/lib/prometheus_exporter", @@ -40,10 +44,13 @@ members = [ "core/lib/vlog", "core/lib/vm", "core/lib/web3_decl", + "core/lib/db_storage_provider", # Test infrastructure "core/tests/loadnext", "core/tests/testkit", + "core/tests/vm-benchmark", + "core/tests/vm-benchmark/harness", # SDK section "sdk/zksync-rs", @@ -55,3 +62,7 @@ exclude = [ "core/bin/prover", "core/bin/circuit_synthesizer", "core/bin/setup_k [profile.test.package.zksync_merkle_tree] opt-level = 3 +# for `perf` profiling +[profile.perf] +inherits = "release" +debug = true diff --git a/bors.toml b/bors.toml index 339bbc08f476..683bf268844c 100644 --- a/bors.toml +++ b/bors.toml @@ -4,14 +4,12 @@ timeout-sec = 14400 # If expected statuses are not specified explicitly, bors tries to "guess" and apperently does it wrong sometimes status = [ - "codecov/patch", - "codecov/project", - "generate", "integration", "loadtest", "lint", "testkit", - "Build images / Build and Push Docker Images" + "unit-tests", + "Build images / Build and Push Docker Images", ] use_squash_merge = true diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 0ce92e5d12cd..1ff1c3e455f4 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,381 @@ # Changelog +## [5.0.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.0.0...v5.0.1) (2023-05-30) + + +### Bug Fixes + +* **explorer-api:** remove IFs for zero address ([#1880](https://github.com/matter-labs/zksync-2-dev/issues/1880)) ([2590a69](https://github.com/matter-labs/zksync-2-dev/commit/2590a696caa3a2a3800d97aa2af5b3b355c777a2)) +* **vm:** Revert "fix: Improve event spam performance ([#1882](https://github.com/matter-labs/zksync-2-dev/issues/1882))" ([#1896](https://github.com/matter-labs/zksync-2-dev/issues/1896)) ([8a07cdd](https://github.com/matter-labs/zksync-2-dev/commit/8a07cdd3e13b9add6066bafc51a5c33d5b81111d)) + +## [5.0.0](https://github.com/matter-labs/zksync-2-dev/compare/v4.5.0...v5.0.0) (2023-05-29) + + +### ⚠ BREAKING CHANGES + +* Upgrade to VM1.3.2 ([#1802](https://github.com/matter-labs/zksync-2-dev/issues/1802)) + +### Features + +* **contract-verifier:** binary for loading verified sources ([#1839](https://github.com/matter-labs/zksync-2-dev/issues/1839)) ([44fcacd](https://github.com/matter-labs/zksync-2-dev/commit/44fcacd6e4285fde0e73c90795c416ab9dd6d3c8)) +* **explorer-api:** Rework `get_account_transactions_hashes_page` ([#1876](https://github.com/matter-labs/zksync-2-dev/issues/1876)) ([7bbdd0f](https://github.com/matter-labs/zksync-2-dev/commit/7bbdd0f4f814085bb1ca6559736fdb6ca32add30)) +* **external node:** Concurrent data fetching ([#1855](https://github.com/matter-labs/zksync-2-dev/issues/1855)) ([fa294aa](https://github.com/matter-labs/zksync-2-dev/commit/fa294aaa929f10b6a72d29407b1f4e9071e57b5e)) +* **external node:** Expose 'external_node.synced' metric ([#1843](https://github.com/matter-labs/zksync-2-dev/issues/1843)) ([1c0a5ef](https://github.com/matter-labs/zksync-2-dev/commit/1c0a5ef02317e7316d70d2929199d85b873e12de)) +* **external node:** Expose sync lag metric ([#1848](https://github.com/matter-labs/zksync-2-dev/issues/1848)) ([2331175](https://github.com/matter-labs/zksync-2-dev/commit/2331175133057dbb1c35d76ced89c0061b9730d1)) +* **merkle tree:** Implement full mode for the new tree ([#1825](https://github.com/matter-labs/zksync-2-dev/issues/1825)) ([438a54e](https://github.com/matter-labs/zksync-2-dev/commit/438a54e994b8f6c62d8718f67388e56dbd5eba8a)) +* **merkle tree:** Integrate full mode in new tree in `MetadataCalculator` ([#1858](https://github.com/matter-labs/zksync-2-dev/issues/1858)) ([aee6fc9](https://github.com/matter-labs/zksync-2-dev/commit/aee6fc9bdcc6680a46a1d37814d1bda99343a513)) +* **merkle tree:** Parallelize full mode in new tree ([#1844](https://github.com/matter-labs/zksync-2-dev/issues/1844)) ([7b835ef](https://github.com/matter-labs/zksync-2-dev/commit/7b835ef01642a9fdcae607c3ac306211f2df5ca9)) +* Upgrade to VM1.3.2 ([#1802](https://github.com/matter-labs/zksync-2-dev/issues/1802)) ([e46da3d](https://github.com/matter-labs/zksync-2-dev/commit/e46da3dc67c19631690dd5c265411c47e8a0716c)) + + +### Bug Fixes + +* Add visibility to get number of GPUs ([#1830](https://github.com/matter-labs/zksync-2-dev/issues/1830)) ([8245420](https://github.com/matter-labs/zksync-2-dev/commit/8245420f2bad1c51f1f8856c8be35c6cb65485b8)) +* **api:** Don't require ZkSyncConfig to instantiate API ([#1816](https://github.com/matter-labs/zksync-2-dev/issues/1816)) ([263e546](https://github.com/matter-labs/zksync-2-dev/commit/263e546a122982954cb5c37de939f851390308ae)) +* **api:** set real nonce during fee estimation ([#1817](https://github.com/matter-labs/zksync-2-dev/issues/1817)) ([a3916ea](https://github.com/matter-labs/zksync-2-dev/commit/a3916eac038f6e2bb7b961e26a713cc176bd1b26)) +* Don't require ZkSyncConfig to perform genesis ([#1865](https://github.com/matter-labs/zksync-2-dev/issues/1865)) ([f7e7424](https://github.com/matter-labs/zksync-2-dev/commit/f7e7424c7bd00482e3562869779e3ad344529f62)) +* **external node:** Allow reorg detector to be 1 block ahead of the main node ([#1853](https://github.com/matter-labs/zksync-2-dev/issues/1853)) ([3c5a1f6](https://github.com/matter-labs/zksync-2-dev/commit/3c5a1f698af86a3739aa6e311a968e920718e368)) +* **external node:** Allow reorg detector to work on executed batches too ([#1869](https://github.com/matter-labs/zksync-2-dev/issues/1869)) ([b1d991c](https://github.com/matter-labs/zksync-2-dev/commit/b1d991ccb604766da0bf7686cedb1d36ab01ad05)) +* **external node:** Fix batch status gaps in batch status updater ([#1836](https://github.com/matter-labs/zksync-2-dev/issues/1836)) ([354876e](https://github.com/matter-labs/zksync-2-dev/commit/354876eb22eb2dacd237ab700b37df6ae03269e8)) +* **external node:** Shutdown components on the reorg detector failure ([#1842](https://github.com/matter-labs/zksync-2-dev/issues/1842)) ([ac8395c](https://github.com/matter-labs/zksync-2-dev/commit/ac8395c90303b66bd827aa973f4308ca8cfb30d2)) +* Improve event spam performance ([#1882](https://github.com/matter-labs/zksync-2-dev/issues/1882)) ([f37f858](https://github.com/matter-labs/zksync-2-dev/commit/f37f85813f2aefee792378b73fba8a64047ab371)) +* make iai comparison work even when benchmark sets differ ([#1888](https://github.com/matter-labs/zksync-2-dev/issues/1888)) ([acd4054](https://github.com/matter-labs/zksync-2-dev/commit/acd405411380d75684342b3f54e2ff616aa1db43)) +* **merkle tree:** Do not require object store config for external node ([#1875](https://github.com/matter-labs/zksync-2-dev/issues/1875)) ([ca5cf7a](https://github.com/matter-labs/zksync-2-dev/commit/ca5cf7a4a1d6b3778ad4085a43bd08a343efe72d)) +* **object store:** Fix `block_on()` in `GoogleCloudStorage` ([#1841](https://github.com/matter-labs/zksync-2-dev/issues/1841)) ([bd60f6b](https://github.com/matter-labs/zksync-2-dev/commit/bd60f6be5f72b363fb1ca9b194f52917ca75153e)) +* **setup-key-generator:** update vm version in setup-key generator ([#1867](https://github.com/matter-labs/zksync-2-dev/issues/1867)) ([3d45b1f](https://github.com/matter-labs/zksync-2-dev/commit/3d45b1fadf152c8f22ae1e980f7f45a0a7ffe1df)) +* update zk_evm ([#1861](https://github.com/matter-labs/zksync-2-dev/issues/1861)) ([04121d7](https://github.com/matter-labs/zksync-2-dev/commit/04121d7cbbc6776be0aaf1aba235360b283ca794)) +* **vm1.3.2:** update crypto dep to fix main vm circuit synthesis ([#1889](https://github.com/matter-labs/zksync-2-dev/issues/1889)) ([855aead](https://github.com/matter-labs/zksync-2-dev/commit/855aeadc15ef2aeca024fe1616bc438ce9910e2a)) +* **vm:** include zero hash related recent fixes from 1.3.1 to 1.3.2 ([#1874](https://github.com/matter-labs/zksync-2-dev/issues/1874)) ([7e622be](https://github.com/matter-labs/zksync-2-dev/commit/7e622be4669e359be7bb6d0858e701ae34b2b963)) + + +### Performance Improvements + +* **merkle tree:** Garbage collection for tree revert artifacts ([#1866](https://github.com/matter-labs/zksync-2-dev/issues/1866)) ([8e23486](https://github.com/matter-labs/zksync-2-dev/commit/8e23486b03133e269feb412c7d2f129109a21a6a)) + +## [4.5.0](https://github.com/matter-labs/zksync-2-dev/compare/v4.4.0...v4.5.0) (2023-05-16) + + +### Features + +* **merkle tree:** Parallelize tree traversal ([#1814](https://github.com/matter-labs/zksync-2-dev/issues/1814)) ([4f7bede](https://github.com/matter-labs/zksync-2-dev/commit/4f7bede980cb3e20bea26261d86cf59a78e4a8f6)) +* **merkle tree:** Throttle new tree implementation ([#1835](https://github.com/matter-labs/zksync-2-dev/issues/1835)) ([1767b70](https://github.com/matter-labs/zksync-2-dev/commit/1767b70edd862e4a68d39c9c932ab997e4f81a6d)) +* **state-keeper:** Implement bounded gas adjuster ([#1811](https://github.com/matter-labs/zksync-2-dev/issues/1811)) ([65e33ad](https://github.com/matter-labs/zksync-2-dev/commit/65e33addd3aadac2a9eefb041ee3678168bfbb01)) +* support sepolia network ([#1822](https://github.com/matter-labs/zksync-2-dev/issues/1822)) ([79a2a0c](https://github.com/matter-labs/zksync-2-dev/commit/79a2a0ce009e841ecae1484270dafa61beee905b)) + + +### Bug Fixes + +* Add tree readiness check to healtcheck endpoint ([#1789](https://github.com/matter-labs/zksync-2-dev/issues/1789)) ([3010900](https://github.com/matter-labs/zksync-2-dev/commit/30109004986e8a19603db7f31af7a06bea3344bb)) +* update zkevm-test-harness (exluding transitive dependencies) ([#1827](https://github.com/matter-labs/zksync-2-dev/issues/1827)) ([faa2900](https://github.com/matter-labs/zksync-2-dev/commit/faa29000a841ba2949bb9769dd9b9d0b01493384)) + + +### Performance Improvements + +* make pop_frame correct and use it instead of drain_frame ([#1808](https://github.com/matter-labs/zksync-2-dev/issues/1808)) ([bb58fa1](https://github.com/matter-labs/zksync-2-dev/commit/bb58fa1559985c0663fa2daa44b4ea75f2c98883)) + +## [4.4.0](https://github.com/matter-labs/zksync-2-dev/compare/v4.3.0...v4.4.0) (2023-05-08) + + +### Features + +* **api:** Expose metrics about open ws ([#1805](https://github.com/matter-labs/zksync-2-dev/issues/1805)) ([5888047](https://github.com/matter-labs/zksync-2-dev/commit/5888047732f61f2916bc03f4516512467fc2d9e9)) +* **api:** revert correct errors to api ([#1806](https://github.com/matter-labs/zksync-2-dev/issues/1806)) ([f3b1a6b](https://github.com/matter-labs/zksync-2-dev/commit/f3b1a6bc8fd977a6be0b5ad01d7e0dfcd71e05ba)) +* **external node:** Fetch L1 gas price from the main node ([#1796](https://github.com/matter-labs/zksync-2-dev/issues/1796)) ([9b0b771](https://github.com/matter-labs/zksync-2-dev/commit/9b0b771095c78d4b3a3572d75abc1e93d0334ee3)) +* **external node:** Reorg detector ([#1747](https://github.com/matter-labs/zksync-2-dev/issues/1747)) ([c3f9b71](https://github.com/matter-labs/zksync-2-dev/commit/c3f9b71d0ed85c2a45ca225de1887e10695b01a1)) +* **merkle tree:** Allow using old / new tree based on config ([#1776](https://github.com/matter-labs/zksync-2-dev/issues/1776)) ([78117b8](https://github.com/matter-labs/zksync-2-dev/commit/78117b8b3c1fadcd9ba9d6d4a017fa6d3ba5517d)) +* **merkle tree:** Verify tree consistency ([#1795](https://github.com/matter-labs/zksync-2-dev/issues/1795)) ([d590b3f](https://github.com/matter-labs/zksync-2-dev/commit/d590b3f0965a23eb0011779aab829d86d4fdc1d1)) +* **wintess-generator:** create dedicated witness-generator binary for new prover ([#1781](https://github.com/matter-labs/zksync-2-dev/issues/1781)) ([83d45b8](https://github.com/matter-labs/zksync-2-dev/commit/83d45b8d29618c9f96e34ba139c45f5cd18f6585)) + + +### Bug Fixes + +* **api:** waffle incompatibilities ([#1730](https://github.com/matter-labs/zksync-2-dev/issues/1730)) ([910bb9b](https://github.com/matter-labs/zksync-2-dev/commit/910bb9b3fd2936e2f7fc7a6c7369eaec32a968c5)) +* **db:** Error returned from database: syntax error at or near ([#1794](https://github.com/matter-labs/zksync-2-dev/issues/1794)) ([611a05d](https://github.com/matter-labs/zksync-2-dev/commit/611a05de8e5633e13afce31bcce4e3940928f1ad)) +* enable/disable history at compile time ([#1803](https://github.com/matter-labs/zksync-2-dev/issues/1803)) ([0720021](https://github.com/matter-labs/zksync-2-dev/commit/0720021b1c1e30c966f06532de21bde3f01fc647)) +* **external node:** Reduce amount of configuration variables required for the state keeper ([#1798](https://github.com/matter-labs/zksync-2-dev/issues/1798)) ([b2e63a9](https://github.com/matter-labs/zksync-2-dev/commit/b2e63a977583a02d09f68753e3f34ed2eb375cf9)) +* **merkle tree:** Remove double-tree mode from `MetadataCalculator` ([#1801](https://github.com/matter-labs/zksync-2-dev/issues/1801)) ([fca05b9](https://github.com/matter-labs/zksync-2-dev/commit/fca05b91de56ebe992a112b907b5782f77f32d16)) +* Optimize vm memory ([#1797](https://github.com/matter-labs/zksync-2-dev/issues/1797)) ([4d78e54](https://github.com/matter-labs/zksync-2-dev/commit/4d78e5404227c61d52e963bf68dd54682b4e5190)) + +## [4.3.0](https://github.com/matter-labs/zksync-2-dev/compare/v4.2.0...v4.3.0) (2023-05-01) + + +### Features + +* **contract-verifier:** support metadata.bytecodeHash=none ([#1785](https://github.com/matter-labs/zksync-2-dev/issues/1785)) ([c11b7f1](https://github.com/matter-labs/zksync-2-dev/commit/c11b7f10abe105ba7c7698a422a07300df74b079)) +* **db-storage-provider:** abstract db storage provide into a sharable lib ([#1775](https://github.com/matter-labs/zksync-2-dev/issues/1775)) ([2b76b66](https://github.com/matter-labs/zksync-2-dev/commit/2b76b66580d02d70e512eeb74e89102fc07a81eb)) + + +### Bug Fixes + +* **circuit:** update zkevm to prevent circuit-synthesis failures ([#1786](https://github.com/matter-labs/zksync-2-dev/issues/1786)) ([056e1c9](https://github.com/matter-labs/zksync-2-dev/commit/056e1c9ef449fb48a595895cf99ad92a43b87a47)) +* Sync DAL and SQLX ([#1777](https://github.com/matter-labs/zksync-2-dev/issues/1777)) ([06d2903](https://github.com/matter-labs/zksync-2-dev/commit/06d2903af9453d6eb1250100f7de76344416d50b)) +* **vm:** get_used_contracts vm method ([#1783](https://github.com/matter-labs/zksync-2-dev/issues/1783)) ([d2911de](https://github.com/matter-labs/zksync-2-dev/commit/d2911de0038a9bbae72ffd4507a1202d9c17b7ab)) + +## [4.2.0](https://github.com/matter-labs/zksync-2-dev/compare/v4.1.0...v4.2.0) (2023-04-27) + + +### Features + +* **contract-verifier:** add zksolc v1.3.10 ([#1754](https://github.com/matter-labs/zksync-2-dev/issues/1754)) ([f6dd7fe](https://github.com/matter-labs/zksync-2-dev/commit/f6dd7fe31b42b6304478c45481e40bbf9f59fdbb)) +* **external node:** Implement the eth_syncing method ([#1761](https://github.com/matter-labs/zksync-2-dev/issues/1761)) ([4432611](https://github.com/matter-labs/zksync-2-dev/commit/44326111c5edea227114fa723285004896cef4ac)) +* **merkle tree:** Initial tree implementation ([#1735](https://github.com/matter-labs/zksync-2-dev/issues/1735)) ([edd48fc](https://github.com/matter-labs/zksync-2-dev/commit/edd48fc37bdd58f9f9d85e27d684c01ef2cac8ae)) +* **object-store:** Add retires in object-store ([#1734](https://github.com/matter-labs/zksync-2-dev/issues/1734)) ([2306300](https://github.com/matter-labs/zksync-2-dev/commit/2306300249506d5a9995dfe8acf8b9951907ee3b)) + + +### Bug Fixes + +* **external node:** Fetch base system contracts from the main node ([#1675](https://github.com/matter-labs/zksync-2-dev/issues/1675)) ([eaa8637](https://github.com/matter-labs/zksync-2-dev/commit/eaa86378bd3b6a6cd2b64dcdb4e1a6c585244d0c)) +* **integration-tests:** Fix bugs in our integration tests ([#1758](https://github.com/matter-labs/zksync-2-dev/issues/1758)) ([6914170](https://github.com/matter-labs/zksync-2-dev/commit/691417004f768462b874c20a79f4605e4e327eab)) +* Make the DAL interface fully blocking ([#1755](https://github.com/matter-labs/zksync-2-dev/issues/1755)) ([7403c7c](https://github.com/matter-labs/zksync-2-dev/commit/7403c7cf278b71f3720967c509cb197f11b68e05)) +* **state-keeper:** remove storage_logs_dedup table ([#1741](https://github.com/matter-labs/zksync-2-dev/issues/1741)) ([0d85310](https://github.com/matter-labs/zksync-2-dev/commit/0d85310adf70f35d1ccb999ff6ffe46c2a2ae0ce)) +* Track `wait_for_prev_hash_time` metric in mempool (state-keeper) ([#1757](https://github.com/matter-labs/zksync-2-dev/issues/1757)) ([107ebbe](https://github.com/matter-labs/zksync-2-dev/commit/107ebbe7e6a2fa527be94da0c83404d75f3df356)) +* **vm:** fix overflows originating from ceil_div ([#1743](https://github.com/matter-labs/zksync-2-dev/issues/1743)) ([a39a1c9](https://github.com/matter-labs/zksync-2-dev/commit/a39a1c94d256d42cd4d1e8ee37665772d993b9f7)) + +## [4.1.0](https://github.com/matter-labs/zksync-2-dev/compare/v4.0.0...v4.1.0) (2023-04-25) + + +### Features + +* **api:** store cache between binary search iterations ([#1742](https://github.com/matter-labs/zksync-2-dev/issues/1742)) ([c0d2afa](https://github.com/matter-labs/zksync-2-dev/commit/c0d2afad7d2e33e559e4474cce947ae3ad4cd2d7)) +* **contract-verifier:** add zksolc v1.3.9 ([#1732](https://github.com/matter-labs/zksync-2-dev/issues/1732)) ([880d19a](https://github.com/matter-labs/zksync-2-dev/commit/880d19a88f9edd4b1293a65fe83026b64c4a1a5f)) +* **external node:** Spawn healthcheck server ([#1728](https://github.com/matter-labs/zksync-2-dev/issues/1728)) ([c092590](https://github.com/matter-labs/zksync-2-dev/commit/c0925908bfe0b116c115659467077010972f9c8e)) +* **vm:** Correctly count storage invocations ([#1725](https://github.com/matter-labs/zksync-2-dev/issues/1725)) ([108a8f5](https://github.com/matter-labs/zksync-2-dev/commit/108a8f57d17f55012c7afd2dd02eb25bbd72eef2)) +* **vm:** make vm history optional ([#1717](https://github.com/matter-labs/zksync-2-dev/issues/1717)) ([b61452e](https://github.com/matter-labs/zksync-2-dev/commit/b61452e51689ae5e6809817a45685ad1bcc31064)) +* **vm:** Trace transaction calls ([#1556](https://github.com/matter-labs/zksync-2-dev/issues/1556)) ([e520e46](https://github.com/matter-labs/zksync-2-dev/commit/e520e4610277ba838c2ed3cbb21f8e890b44c5d7)) + + +### Bug Fixes + +* add coeficient to gas limit + method for full fee estimation ([#1622](https://github.com/matter-labs/zksync-2-dev/issues/1622)) ([229cda9](https://github.com/matter-labs/zksync-2-dev/commit/229cda977daa11a98a97515a2f75d709e2e8ed9a)) +* **db:** Add index on events (address, miniblock_number, event_index_in_block) ([#1727](https://github.com/matter-labs/zksync-2-dev/issues/1727)) ([6f15141](https://github.com/matter-labs/zksync-2-dev/commit/6f15141c67e20f764c3f84dc17152df7b2e7887a)) +* **explorer-api:** filter out fictive transactions and fix mint/burn events deduplication ([#1724](https://github.com/matter-labs/zksync-2-dev/issues/1724)) ([cd2376b](https://github.com/matter-labs/zksync-2-dev/commit/cd2376b0c37cde5eb8c0ee7db8ae9981052b88ed)) +* **external node:** Use unique connection pools for critical components ([#1736](https://github.com/matter-labs/zksync-2-dev/issues/1736)) ([9e1b817](https://github.com/matter-labs/zksync-2-dev/commit/9e1b817da59c7201602fc463f3cfa1dc50a3c304)) +* **tree:** do not decrease leaf index for non existing leaf ([#1731](https://github.com/matter-labs/zksync-2-dev/issues/1731)) ([3c8918e](https://github.com/matter-labs/zksync-2-dev/commit/3c8918eecb8151e94c810582101e99d8929a6e7a)) + +## [4.0.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.9.1...v4.0.0) (2023-04-20) + + +### ⚠ BREAKING CHANGES + +* Implement WETH bridge, support custom bridge in sdk, bootloader gas calculation fix ([#1633](https://github.com/matter-labs/zksync-2-dev/issues/1633)) + +### Features + +* Implement WETH bridge, support custom bridge in sdk, bootloader gas calculation fix ([#1633](https://github.com/matter-labs/zksync-2-dev/issues/1633)) ([eb67ec5](https://github.com/matter-labs/zksync-2-dev/commit/eb67ec555bc027137d80122873cd12a93f9234c6)) + + +### Bug Fixes + +* **external node:** Get timestamp after applying pending miniblocks from IO ([#1722](https://github.com/matter-labs/zksync-2-dev/issues/1722)) ([875921a](https://github.com/matter-labs/zksync-2-dev/commit/875921a3462807aae53ef4cb8e15564d7015e7fa)) +* Use stronger server kill for fee projection test ([#1701](https://github.com/matter-labs/zksync-2-dev/issues/1701)) ([d5e65b2](https://github.com/matter-labs/zksync-2-dev/commit/d5e65b234bd904f34c74f959aee10d2f4ad4156e)) + +## [3.9.1](https://github.com/matter-labs/zksync-2-dev/compare/v3.9.0...v3.9.1) (2023-04-18) + + +### Bug Fixes + +* **vm:** small import refactor ([cfca479](https://github.com/matter-labs/zksync-2-dev/commit/cfca4794620f19911773ccc5276bcb07170a5aab)) + +## [3.9.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.8.0...v3.9.0) (2023-04-18) + + +### Features + +* **api servers:** panic when a transaction execution results in too many storage accesses ([#1718](https://github.com/matter-labs/zksync-2-dev/issues/1718)) ([fb910fe](https://github.com/matter-labs/zksync-2-dev/commit/fb910fe5ba07fcd02bec1a7a9379806e07d7b3d3)) +* **house-keeper:** move polling interval to config ([#1684](https://github.com/matter-labs/zksync-2-dev/issues/1684)) ([49c7ff3](https://github.com/matter-labs/zksync-2-dev/commit/49c7ff360a7b70054f88a48f11776e25bd1980ff)) +* **prover:** allow region+zone to be overridden for non-gcp env ([#1715](https://github.com/matter-labs/zksync-2-dev/issues/1715)) ([f1df9b0](https://github.com/matter-labs/zksync-2-dev/commit/f1df9b072eb7ef1d5d55748b9baca11bb361ef04)) + + +### Bug Fixes + +* add custom buckets for db/vm ratio ([#1707](https://github.com/matter-labs/zksync-2-dev/issues/1707)) ([811d3ad](https://github.com/matter-labs/zksync-2-dev/commit/811d3adbe834edb745e75bbb196074fc72303f5f)) +* **api:** override `max_priority_fee` when estimating ([#1708](https://github.com/matter-labs/zksync-2-dev/issues/1708)) ([14830f2](https://github.com/matter-labs/zksync-2-dev/commit/14830f2198f9b81e5465f2d695f37ef0dfd78679)) +* **eth-sender:** resend all txs ([#1710](https://github.com/matter-labs/zksync-2-dev/issues/1710)) ([cb20109](https://github.com/matter-labs/zksync-2-dev/commit/cb20109ea5bebd2bbd7142c3f87890a08ff9ae59)) +* update @matterlabs/hardhat-zksync-solc to 3.15 ([#1713](https://github.com/matter-labs/zksync-2-dev/issues/1713)) ([e3fa879](https://github.com/matter-labs/zksync-2-dev/commit/e3fa879ed0dbbd9b9d515c9c413993d6e94106f5)) +* **vm:** fix deduplicating factory deps ([#1709](https://github.com/matter-labs/zksync-2-dev/issues/1709)) ([a05cf7e](https://github.com/matter-labs/zksync-2-dev/commit/a05cf7ea2732899bfe3734004b502850a9137a00)) +* **vm:** underflow in tests ([#1685](https://github.com/matter-labs/zksync-2-dev/issues/1685)) ([1bac564](https://github.com/matter-labs/zksync-2-dev/commit/1bac56427ebc6473a7dc40bae3e05d3fd56b1dac)) + +## [3.8.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.7.2...v3.8.0) (2023-04-17) + + +### Features + +* **object-store:** support loading credentials from file ([#1674](https://github.com/matter-labs/zksync-2-dev/issues/1674)) ([4f82574](https://github.com/matter-labs/zksync-2-dev/commit/4f825746a70423b935b79ef6227683cb2afdb63f)) + + +### Bug Fixes + +* **contract-verifier:** fix input deserialization ([#1704](https://github.com/matter-labs/zksync-2-dev/issues/1704)) ([c390e5f](https://github.com/matter-labs/zksync-2-dev/commit/c390e5f0e99fd54b21f762f609aa81451598a219)) +* **contract-verifier:** parse isSystem setting ([#1686](https://github.com/matter-labs/zksync-2-dev/issues/1686)) ([a8d0e99](https://github.com/matter-labs/zksync-2-dev/commit/a8d0e990e0651a647bcde28051f80552ec662613)) +* **tracking:** remove unused import ([adf4e4b](https://github.com/matter-labs/zksync-2-dev/commit/adf4e4b36f4831c69664dd4902a47b7e7c3bc1e5)) + +## [3.7.2](https://github.com/matter-labs/zksync-2-dev/compare/v3.7.1...v3.7.2) (2023-04-16) + + +### Bug Fixes + +* **logging:** add more logging when saving events in the DB ([85212e6](https://github.com/matter-labs/zksync-2-dev/commit/85212e6210b80a3b1d4e25528dd7c15d03a5e652)) +* **logging:** add more logging when saving events in the DB ([b9cb0fa](https://github.com/matter-labs/zksync-2-dev/commit/b9cb0fa8fa1b1e71625d2754211b16a5f012ba3e)) +* **logging:** add more logging when saving events in the DB ([0deac3d](https://github.com/matter-labs/zksync-2-dev/commit/0deac3d84d8de085f1fd3d7886ab137a5e9004a2)) +* **logging:** add more logging when saving events in the DB ([d330096](https://github.com/matter-labs/zksync-2-dev/commit/d330096f2f35f3b173eb59981cb496d2f654d8e5)) + +## [3.7.1](https://github.com/matter-labs/zksync-2-dev/compare/v3.7.0...v3.7.1) (2023-04-15) + + +### Bug Fixes + +* **metrics:** item count tracking in state keeper ([#1696](https://github.com/matter-labs/zksync-2-dev/issues/1696)) ([8d7c8d8](https://github.com/matter-labs/zksync-2-dev/commit/8d7c8d889bfc7b4469699f7fb17be65baaf407c4)) + +## [3.7.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.6.0...v3.7.0) (2023-04-14) + + +### Features + +* add getL1BatchDetails method to js SDK ([#1666](https://github.com/matter-labs/zksync-2-dev/issues/1666)) ([babb8a9](https://github.com/matter-labs/zksync-2-dev/commit/babb8a94466a8f8c81a19391d61aa9ea66f9cfa8)) +* **external node:** consistency checker ([#1658](https://github.com/matter-labs/zksync-2-dev/issues/1658)) ([e0d65ef](https://github.com/matter-labs/zksync-2-dev/commit/e0d65ef6604685c8a6213d466a575bc41f8bfe45)) +* **healtcheck:** Add new server with healthcheck for all components ([#1667](https://github.com/matter-labs/zksync-2-dev/issues/1667)) ([5f00e5c](https://github.com/matter-labs/zksync-2-dev/commit/5f00e5c4d55f7783480350138d79c7275ecf531c)) +* **sdk:** extend BlockDetails type to include l1BatchNumber ([#1677](https://github.com/matter-labs/zksync-2-dev/issues/1677)) ([67acf90](https://github.com/matter-labs/zksync-2-dev/commit/67acf90301e401004d41361b43f2d3336a48676e)) +* **state-keeper:** add metrics for how long we wait for a tx ([#1680](https://github.com/matter-labs/zksync-2-dev/issues/1680)) ([c8b4447](https://github.com/matter-labs/zksync-2-dev/commit/c8b4447cc67e426ca184391d3da11e3d648910ce)) +* **state-keeper:** track number of rows when saving blocks to the DB ([#1682](https://github.com/matter-labs/zksync-2-dev/issues/1682)) ([b6f306b](https://github.com/matter-labs/zksync-2-dev/commit/b6f306b97e8e13ef4f80eb657a09ed4389efdb7e)) +* **VM:** track time spent on VM storage access ([#1687](https://github.com/matter-labs/zksync-2-dev/issues/1687)) ([9b645be](https://github.com/matter-labs/zksync-2-dev/commit/9b645beacfabc6478c67581fcf4f00d0c2a08516)) +* **witness-generator:** split witness-generator into individual components ([#1623](https://github.com/matter-labs/zksync-2-dev/issues/1623)) ([82724e1](https://github.com/matter-labs/zksync-2-dev/commit/82724e1d6db16725684351c184e24f7b767a69f4)) + + +### Bug Fixes + +* (logging) total time spent accessing storage = get + set ([#1689](https://github.com/matter-labs/zksync-2-dev/issues/1689)) ([49a3a9b](https://github.com/matter-labs/zksync-2-dev/commit/49a3a9bd3aa25317cfa745f35802f235045864d9)) +* **api:** fix `max_fee_per_gas` estimation ([#1671](https://github.com/matter-labs/zksync-2-dev/issues/1671)) ([aed3112](https://github.com/matter-labs/zksync-2-dev/commit/aed3112d63ec4306f98ccfe20841e9cf298bccd1)) +* **circuit breaker:** add retries for http-call functions ([#1541](https://github.com/matter-labs/zksync-2-dev/issues/1541)) ([a316446](https://github.com/matter-labs/zksync-2-dev/commit/a316446d6f959198a5ccee8698a549a597e4e716)) +* **external node:** Misc external node fixes ([#1673](https://github.com/matter-labs/zksync-2-dev/issues/1673)) ([da9ea17](https://github.com/matter-labs/zksync-2-dev/commit/da9ea172c0813e19c3be6c78166ff012f087ea97)) +* **loadtest:** override EIP1559 fields ([#1683](https://github.com/matter-labs/zksync-2-dev/issues/1683)) ([6c3eeb3](https://github.com/matter-labs/zksync-2-dev/commit/6c3eeb38ef9485473f1eb1fa428cf163a07c8e62)) +* **loadtest:** update max nonce ahead ([#1668](https://github.com/matter-labs/zksync-2-dev/issues/1668)) ([c5eac45](https://github.com/matter-labs/zksync-2-dev/commit/c5eac45791fba65613c903f06c36d83ce9c1b8b7)) +* **metrics:** minor changes to metrics collection ([#1664](https://github.com/matter-labs/zksync-2-dev/issues/1664)) ([5ba5f3b](https://github.com/matter-labs/zksync-2-dev/commit/5ba5f3b180c1373f2c3274e997496ed0d3125394)) +* **prover-query:** added waiting_to_queued_witness_job_mover ([#1640](https://github.com/matter-labs/zksync-2-dev/issues/1640)) ([dbacac1](https://github.com/matter-labs/zksync-2-dev/commit/dbacac194a1c5961b372b7e316f7ca9e2cc17495)) + +## [3.6.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.5.0...v3.6.0) (2023-04-10) + + +### Features + +* **contract-verifier:** support optimization mode ([#1661](https://github.com/matter-labs/zksync-2-dev/issues/1661)) ([3bb85b9](https://github.com/matter-labs/zksync-2-dev/commit/3bb85b95ec2125bc0bad584d5f89612013aba955)) + +## [3.5.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.4.2...v3.5.0) (2023-04-10) + + +### Features + +* **eth-sender:** abstract max_acceptable_priority_fee in config ([#1651](https://github.com/matter-labs/zksync-2-dev/issues/1651)) ([17c75b2](https://github.com/matter-labs/zksync-2-dev/commit/17c75b291d696545718fe896cbd74276e0a2c148)) +* **witness-generator:** emit metrics for each witness-generator type ([#1650](https://github.com/matter-labs/zksync-2-dev/issues/1650)) ([6d72e67](https://github.com/matter-labs/zksync-2-dev/commit/6d72e67994ae90979fc58c9406cd318bb4e75348)) + + +### Bug Fixes + +* **external node:** docker workflow & foreign key constraint bug ([#1656](https://github.com/matter-labs/zksync-2-dev/issues/1656)) ([2944a00](https://github.com/matter-labs/zksync-2-dev/commit/2944a004a38b71f44d2f5617c9a9945853659d46)) +* **logging:** downgrade non-essential logs to trace level ([#1654](https://github.com/matter-labs/zksync-2-dev/issues/1654)) ([f325995](https://github.com/matter-labs/zksync-2-dev/commit/f3259953d0d5366d75bbdeb840e660861d6eb86a)) +* **prover:** make prover-related jobs run less frequently ([#1647](https://github.com/matter-labs/zksync-2-dev/issues/1647)) ([cb47511](https://github.com/matter-labs/zksync-2-dev/commit/cb475116f5f729798e1dbdb95a99872f5867403b)) +* **state-keeper:** Do not reject tx if bootloader has not enough gas ([#1657](https://github.com/matter-labs/zksync-2-dev/issues/1657)) ([6bce00d](https://github.com/matter-labs/zksync-2-dev/commit/6bce00d44009323114a4d9d7030a2a318e49f82c)) + +## [3.4.2](https://github.com/matter-labs/zksync-2-dev/compare/v3.4.1...v3.4.2) (2023-04-07) + + +### Bug Fixes + +* **api:** use verify-execute mode in `submit_tx` ([#1653](https://github.com/matter-labs/zksync-2-dev/issues/1653)) ([3ed98e2](https://github.com/matter-labs/zksync-2-dev/commit/3ed98e2ca65685aa6087304d57cd2c8eae3a8745)) +* **external node:** Read base system contracts from DB instead of disk ([#1642](https://github.com/matter-labs/zksync-2-dev/issues/1642)) ([865c9c6](https://github.com/matter-labs/zksync-2-dev/commit/865c9c64767d10661d769ffeeddda83e60bf3273)) +* **object_store:** handle other 404 from crate other than HttpClient … ([#1643](https://github.com/matter-labs/zksync-2-dev/issues/1643)) ([a01f0b2](https://github.com/matter-labs/zksync-2-dev/commit/a01f0b2ec8426d6d009ab40f45ceff5f9f0346ef)) + +## [3.4.1](https://github.com/matter-labs/zksync-2-dev/compare/v3.4.0...v3.4.1) (2023-04-06) + + +### Bug Fixes + +* **prover-queries:** add prover_job_retry_manager component ([#1637](https://github.com/matter-labs/zksync-2-dev/issues/1637)) ([9c0258a](https://github.com/matter-labs/zksync-2-dev/commit/9c0258a3ae178f10a99ccceb5c984079ab055139)) + +## [3.4.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.3.1...v3.4.0) (2023-04-05) + + +### Features + +* **contract_verifier:** add zksolc v1.3.8 ([#1630](https://github.com/matter-labs/zksync-2-dev/issues/1630)) ([1575d12](https://github.com/matter-labs/zksync-2-dev/commit/1575d1280f9160ba21acba30ba985c6b643e12c7)) +* **external node:** External Node Alpha ([#1614](https://github.com/matter-labs/zksync-2-dev/issues/1614)) ([6304567](https://github.com/matter-labs/zksync-2-dev/commit/6304567285c64dcf129fd7ee0630d219564d969a)) +* **state keeper:** computational gas criterion ([#1542](https://github.com/matter-labs/zksync-2-dev/issues/1542)) ([e96a424](https://github.com/matter-labs/zksync-2-dev/commit/e96a424fa594e45b59744b6b74f7f7737bf1ef00)) + + +### Bug Fixes + +* **api:** dont bind block number in get_logs ([#1632](https://github.com/matter-labs/zksync-2-dev/issues/1632)) ([7adbbab](https://github.com/matter-labs/zksync-2-dev/commit/7adbbabd582925cf6e0a21f9d5064641ae95d7d6)) +* **api:** remove explicit number cast in DB query ([#1621](https://github.com/matter-labs/zksync-2-dev/issues/1621)) ([e4ec312](https://github.com/matter-labs/zksync-2-dev/commit/e4ec31261f75265bfb3d954258bcd602917a5a8d)) +* **prover:** fix backoff calculation ([#1629](https://github.com/matter-labs/zksync-2-dev/issues/1629)) ([1b89646](https://github.com/matter-labs/zksync-2-dev/commit/1b89646ae324e69e415eaf38d41ace57dc76551c)) +* **state_keeper:** deduplicate factory deps before compressing ([#1620](https://github.com/matter-labs/zksync-2-dev/issues/1620)) ([35719d1](https://github.com/matter-labs/zksync-2-dev/commit/35719d1fef150321a30c9e94d65f938f551a5850)) + +## [3.3.1](https://github.com/matter-labs/zksync-2-dev/compare/v3.3.0...v3.3.1) (2023-04-04) + + +### Bug Fixes + +* **queued-job-processor:** add exponential back-offs while polling jobs ([#1625](https://github.com/matter-labs/zksync-2-dev/issues/1625)) ([80c6096](https://github.com/matter-labs/zksync-2-dev/commit/80c60960b9901f7427bb002699a3aabc341f2664)) + +## [3.3.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.2.2...v3.3.0) (2023-04-04) + + +### Features + +* **contract-verifier:** support verification of force deployed contracts ([#1611](https://github.com/matter-labs/zksync-2-dev/issues/1611)) ([be37e09](https://github.com/matter-labs/zksync-2-dev/commit/be37e0951a8eb9e37ea4aba3c4bfaa0ba90ac208)) + +## [3.2.2](https://github.com/matter-labs/zksync-2-dev/compare/v3.2.1...v3.2.2) (2023-04-02) + + +### Bug Fixes + +* **explorer-api:** Improve finalized block query ([#1618](https://github.com/matter-labs/zksync-2-dev/issues/1618)) ([c9e0fbc](https://github.com/matter-labs/zksync-2-dev/commit/c9e0fbca2191a4b0886e42f779a3e1d629071633)) + +## [3.2.1](https://github.com/matter-labs/zksync-2-dev/compare/v3.2.0...v3.2.1) (2023-04-01) + + +### Bug Fixes + +* **prover:** increase polling interval in job processors ([2f00e64](https://github.com/matter-labs/zksync-2-dev/commit/2f00e64198f2e728933bac810e29cf8545815e6c)) + +## [3.2.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.1.0...v3.2.0) (2023-04-01) + + +### Features + +* **external node:** Prepare the execution layer ([#1594](https://github.com/matter-labs/zksync-2-dev/issues/1594)) ([143a112](https://github.com/matter-labs/zksync-2-dev/commit/143a1122d86592601e24a3b2f71cdc4ab3f85d2b)) +* **tracking:** track individual circuit block height ([#1613](https://github.com/matter-labs/zksync-2-dev/issues/1613)) ([71a302e](https://github.com/matter-labs/zksync-2-dev/commit/71a302e34319ccadb008a04aaa243ce96ac97eb4)) + + +### Bug Fixes + +* **prover:** get rid of exclusive lock ([#1616](https://github.com/matter-labs/zksync-2-dev/issues/1616)) ([3e7443d](https://github.com/matter-labs/zksync-2-dev/commit/3e7443d88415444e424f8cea8bd929c4b4f0c2e5)) + +## [3.1.0](https://github.com/matter-labs/zksync-2-dev/compare/v3.0.8...v3.1.0) (2023-03-29) + + +### Features + +* **api:** implement health check for jsonrpc ([#1605](https://github.com/matter-labs/zksync-2-dev/issues/1605)) ([267c497](https://github.com/matter-labs/zksync-2-dev/commit/267c49708df9f708a93bc69a8a9f0094b6f97a67)) +* **prover-multizone:** Added support for running prover in multi-zone ([#1577](https://github.com/matter-labs/zksync-2-dev/issues/1577)) ([629f63b](https://github.com/matter-labs/zksync-2-dev/commit/629f63b07118c8a17a653c62b5ef3cd4bdfcaaa4)) +* **VM:** Update zk evm ([#1609](https://github.com/matter-labs/zksync-2-dev/issues/1609)) ([643187a](https://github.com/matter-labs/zksync-2-dev/commit/643187ab3e03ca540ce7a01eaddddf459a79dd40)) + + +### Bug Fixes + +* **api-error:** handle empty CannotEstimateGas ([#1606](https://github.com/matter-labs/zksync-2-dev/issues/1606)) ([135e420](https://github.com/matter-labs/zksync-2-dev/commit/135e420e1d1956a11999f465428f9349f73e5581)) +* **api-error:** rename submit tx error from can't estimate tx to gas ([#1548](https://github.com/matter-labs/zksync-2-dev/issues/1548)) ([9a4cbc1](https://github.com/matter-labs/zksync-2-dev/commit/9a4cbc16032a1739820187ff07ca0d1dedef02a0)) +* **eth_sender:** do not save identical eth_txs_history rows ([#1603](https://github.com/matter-labs/zksync-2-dev/issues/1603)) ([13f01de](https://github.com/matter-labs/zksync-2-dev/commit/13f01de846a08f35aa2144bc130f0a84c1626d40)) +* **eth-sender:** Use transaction in confirm_tx method ([#1604](https://github.com/matter-labs/zksync-2-dev/issues/1604)) ([05cffbe](https://github.com/matter-labs/zksync-2-dev/commit/05cffbedd87042707620c86dddecd98eb2337925)) +* **metrics:** fix server.prover.jobs metrics ([#1608](https://github.com/matter-labs/zksync-2-dev/issues/1608)) ([9f351e8](https://github.com/matter-labs/zksync-2-dev/commit/9f351e842ec6178be8d0b1c0b40797ca565319c8)) +* **synthesizer:** update filtering to include region zone ([#1607](https://github.com/matter-labs/zksync-2-dev/issues/1607)) ([12d40b9](https://github.com/matter-labs/zksync-2-dev/commit/12d40b91f5f99b44270aec3e00ed0c0f5fe9adb9)) + +## [3.0.8](https://github.com/matter-labs/zksync-2-dev/compare/v3.0.7...v3.0.8) (2023-03-27) + + +### Bug Fixes + +* **explorer_api:** total_transactions stats ([#1595](https://github.com/matter-labs/zksync-2-dev/issues/1595)) ([824e4f7](https://github.com/matter-labs/zksync-2-dev/commit/824e4f74beedd1b86bf5134f27ab22c2309ef2f0)) +* **witness-generator:** update test-harness to fix circuit-synthesis failure ([#1596](https://github.com/matter-labs/zksync-2-dev/issues/1596)) ([7453822](https://github.com/matter-labs/zksync-2-dev/commit/74538225ca45dea134acdd8f8f2540dc5a1d64c4)) + ## [3.0.0](https://github.com/matter-labs/zksync-2-dev/compare/v2.11.1...v3.0.0) (2023-03-22) diff --git a/core/bin/admin-tools/src/application.rs b/core/bin/admin-tools/src/application.rs index 34b49eef71ea..1dcb1a1ae741 100644 --- a/core/bin/admin-tools/src/application.rs +++ b/core/bin/admin-tools/src/application.rs @@ -24,8 +24,7 @@ pub fn create_app<'a>(profile: &Option) -> Result, AppError> { let tokio = tokio::runtime::Runtime::new().map_err(|x| AppError::Init(InitError::IO(x)))?; - let db = - tokio.block_on(async { zksync_dal::StorageProcessor::establish_connection(true).await }); + let db = zksync_dal::StorageProcessor::establish_connection_blocking(true); let invocation = std::process::Command::new("stty") .arg("-f") diff --git a/core/bin/circuit_synthesizer/Cargo.lock b/core/bin/circuit_synthesizer/Cargo.lock index 8dedc1c8dd00..91f957301957 100644 --- a/core/bin/circuit_synthesizer/Cargo.lock +++ b/core/bin/circuit_synthesizer/Cargo.lock @@ -118,7 +118,7 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -431,7 +431,7 @@ dependencies = [ [[package]] name = "bellman_ce" version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#3aa6226d04d60c539ff4ee480479ac5b92871a20" +source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ "arrayvec 0.7.2", "bit-vec", @@ -478,7 +478,7 @@ version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -506,6 +506,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813" + [[package]] name = "bitvec" version = "0.20.4" @@ -552,7 +558,7 @@ dependencies = [ [[package]] name = "blake2s_const" version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#3aa6226d04d60c539ff4ee480479ac5b92871a20" +source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ "arrayref", "arrayvec 0.5.2", @@ -747,7 +753,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap", "unicode-width", @@ -760,7 +766,7 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -1081,7 +1087,7 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" dependencies = [ "proc-macro-error", "proc-macro2 1.0.52", @@ -1311,16 +1317,6 @@ dependencies = [ "signature", ] -[[package]] -name = "eip712-signature" -version = "0.1.0" -source = "git+https://github.com/vladbochok/eip712-signature#30b11455e7d613313e8c12d2aad961fd4bf902fe" -dependencies = [ - "ethereum-types", - "parity-crypto", - "thiserror", -] - [[package]] name = "either" version = "1.8.1" @@ -1849,9 +1845,8 @@ dependencies = [ [[package]] name = "google-cloud-auth" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44074eed3f9f0c05a522090f0cf1cfcdaef29965424d07908a6a372ffdee0985" +version = "0.9.1" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", "base64 0.21.0", @@ -1871,9 +1866,8 @@ dependencies = [ [[package]] name = "google-cloud-default" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d47d0a154793b622b0aa39fda79d40694b6ef9aa8c932c0342f2088502aa3ea" +version = "0.1.1" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", "google-cloud-auth", @@ -1884,8 +1878,7 @@ dependencies = [ [[package]] name = "google-cloud-metadata" version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "reqwest", "thiserror", @@ -1894,9 +1887,8 @@ dependencies = [ [[package]] name = "google-cloud-storage" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ed4e4f53bc4816db6f5669fb079338a8b6375a985fd6c9a1f3f8a864922541" +version = "0.10.0" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-stream", "base64 0.21.0", @@ -1916,7 +1908,6 @@ dependencies = [ "thiserror", "time 0.3.20", "tokio", - "tokio-util 0.7.7", "tracing", "url", ] @@ -1924,8 +1915,7 @@ dependencies = [ [[package]] name = "google-cloud-token" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9fa5c241ab09d3531496127ef107a29cc2a8fde63676f7cbbe56a8a5e75883" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", ] @@ -2008,7 +1998,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bytes", "headers-core", "http", @@ -2948,7 +2938,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "libc", "static_assertions", @@ -3183,7 +3173,7 @@ version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3617,7 +3607,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "concurrent-queue", "libc", @@ -3776,7 +3766,7 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "api", "bincode", @@ -3805,12 +3795,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "queues" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1475abae4f8ad4998590fe3acfe20104f0a5d48fc420c817cd2c09c3f56151f0" - [[package]] name = "quote" version = "0.6.13" @@ -4029,7 +4013,7 @@ version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4069,7 +4053,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4085,9 +4069,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "cce168fea28d3e05f158bda4576cf0c844d5045bc2cc3620fa0292ed5bb5814c" dependencies = [ "aho-corasick", "memchr", @@ -4105,9 +4089,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "reqwest" @@ -4290,7 +4274,7 @@ version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", @@ -4447,7 +4431,7 @@ version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -4873,7 +4857,7 @@ dependencies = [ "atoi", "base64 0.13.1", "bigdecimal", - "bitflags", + "bitflags 1.3.2", "byteorder", "bytes", "chrono", @@ -5050,15 +5034,25 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +dependencies = [ + "proc-macro2 1.0.52", + "quote 1.0.26", + "unicode-ident", +] + [[package]] name = "sync_vm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" +version = "1.3.2" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" dependencies = [ "arrayvec 0.7.2", "cs_derive", "derivative", - "eip712-signature", "franklin-crypto", "hex", "itertools", @@ -5227,14 +5221,13 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.26.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" dependencies = [ "autocfg 1.1.0", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", @@ -5242,7 +5235,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -5257,13 +5250,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.52", "quote 1.0.26", - "syn 1.0.109", + "syn 2.0.12", ] [[package]] @@ -5962,13 +5955,13 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -5977,7 +5970,16 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] @@ -5986,13 +5988,28 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] @@ -6001,42 +6018,84 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winnow" version = "0.3.5" @@ -6069,25 +6128,21 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zk_evm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#c3d405d2a45526d87a803792745297860a19916b" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.2#397683815115d21c6f9d314463b1ffaafdfc1951" dependencies = [ - "blake2 0.10.6", - "k256", "lazy_static", "num 0.4.0", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", "static_assertions", "zkevm_opcode_defs", ] [[package]] name = "zkevm-assembly" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#0ddd7e70d8d3d13725a937ec1553c8088fb61fda" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#77a55f8427a2b44a19e213c06440da5248edbd2c" dependencies = [ "env_logger 0.9.3", "hex", @@ -6106,22 +6161,24 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#bb7888c83599bb9ee98041abea11f6524556d4e9" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#261b48e9369b356bbd65023d20227b45b47915a2" dependencies = [ - "bitflags", + "bitflags 2.2.1", + "blake2 0.10.6", "ethereum-types", + "k256", "lazy_static", "sha2 0.10.6", + "sha3 0.10.6", ] [[package]] name = "zkevm_test_harness" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.1#2a568f7bdbd61cdf389370be1162977d93ed9625" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#1364026143d4060550130dc3f644ea74ee245441" dependencies = [ "bincode", - "blake2 0.10.6", "circuit_testing", "codegen 0.2.0", "crossbeam 0.8.2", @@ -6134,8 +6191,6 @@ dependencies = [ "rayon", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", "smallvec", "structopt", "sync_vm", @@ -6157,14 +6212,12 @@ dependencies = [ name = "zksync_circuit_synthesizer" version = "0.1.0" dependencies = [ - "bincode", "ctrlc", "futures 0.3.27", "local-ip-address", "metrics", "prometheus_exporter", "prover-service", - "queues", "structopt", "tokio", "vlog", @@ -6189,6 +6242,7 @@ dependencies = [ "serde_json", "url", "zksync_basic_types", + "zksync_contracts", "zksync_utils", ] @@ -6239,7 +6293,7 @@ dependencies = [ "vm", "zksync_config", "zksync_contracts", - "zksync_object_store", + "zksync_health_check", "zksync_state", "zksync_storage", "zksync_types", @@ -6247,6 +6301,10 @@ dependencies = [ "zksync_web3_decl", ] +[[package]] +name = "zksync_health_check" +version = "0.1.0" + [[package]] name = "zksync_mini_merkle_tree" version = "1.0.0" @@ -6261,6 +6319,7 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ + "bincode", "google-cloud-auth", "google-cloud-default", "google-cloud-storage", @@ -6276,8 +6335,12 @@ dependencies = [ name = "zksync_prover_utils" version = "1.0.0" dependencies = [ + "ctrlc", + "futures 0.3.27", "metrics", + "regex", "reqwest", + "tokio", "vlog", "zksync_config", "zksync_utils", @@ -6298,6 +6361,7 @@ dependencies = [ name = "zksync_state" version = "1.0.0" dependencies = [ + "metrics", "vlog", "zksync_storage", "zksync_types", @@ -6360,11 +6424,13 @@ dependencies = [ "envy", "futures 0.3.27", "hex", + "itertools", "num 0.3.1", "reqwest", "serde", "thiserror", "tokio", + "vlog", "zk_evm", "zksync_basic_types", ] diff --git a/core/bin/circuit_synthesizer/Cargo.toml b/core/bin/circuit_synthesizer/Cargo.toml index 843ecd10f6d9..384f01bec1c1 100644 --- a/core/bin/circuit_synthesizer/Cargo.toml +++ b/core/bin/circuit_synthesizer/Cargo.toml @@ -7,7 +7,6 @@ edition = "2021" name = "zksync_circuit_synthesizer" path = "src/main.rs" - [dependencies] zksync_dal = { path = "../../lib/dal", version = "1.0" } zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } @@ -18,14 +17,13 @@ vlog = { path = "../../lib/vlog", version = "1.0" } prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } zksync_prover_utils = {path = "../../lib/prover_utils", version = "1.0" } -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "main"} -prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "main", features=["legacy"], default-features=false} +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.2"} + +prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["legacy"], default-features=false} structopt = "0.3.26" -queues = "1.1.0" tokio = { version = "1.23.0", features = ["full"] } futures = { version = "0.3", features = ["compat"] } ctrlc = { version = "3.1", features = ["termination"] } local-ip-address = "0.5.0" -bincode = "1.3.2" metrics = "0.20" diff --git a/core/bin/circuit_synthesizer/rust-toolchain.toml b/core/bin/circuit_synthesizer/rust-toolchain.toml new file mode 100644 index 000000000000..5d56faf9ae08 --- /dev/null +++ b/core/bin/circuit_synthesizer/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "nightly" diff --git a/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs b/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs index 9604f65149b2..9cfa9ec81829 100644 --- a/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs +++ b/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs @@ -1,6 +1,10 @@ use std::io::copy; +use std::io::ErrorKind; +use std::io::Read; use std::net::SocketAddr; use std::net::TcpStream; +use std::option::Option; +use std::time::Duration; use std::time::Instant; use local_ip_address::local_ip; @@ -13,44 +17,90 @@ use zkevm_test_harness::bellman::plonk::better_better_cs::cs::Circuit; use zkevm_test_harness::pairing::bn256::Bn256; use zkevm_test_harness::witness::oracle::VmWitnessOracle; -use zksync_config::configs::CircuitSynthesizerConfig; use zksync_config::configs::prover_group::ProverGroupConfig; +use zksync_config::configs::CircuitSynthesizerConfig; use zksync_config::ProverConfigs; -use zksync_dal::ConnectionPool; use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; -use zksync_object_store::gcs_utils::prover_circuit_input_blob_url; -use zksync_object_store::object_store::{create_object_store_from_env, PROVER_JOBS_BUCKET_PATH}; +use zksync_dal::ConnectionPool; +use zksync_object_store::{CircuitKey, ObjectStore, ObjectStoreError, ObjectStoreFactory}; use zksync_prover_utils::numeric_index_to_circuit_name; -use zksync_prover_utils::region_fetcher::get_region; +use zksync_prover_utils::region_fetcher::{get_region, get_zone}; use zksync_queued_job_processor::{async_trait, JobProcessor}; -use zksync_types::proofs::ProverJobMetadata; + +#[derive(Debug)] +pub enum CircuitSynthesizerError { + InvalidGroupCircuits(u8), + InvalidCircuitId(u8), + InputLoadFailed(ObjectStoreError), +} pub struct CircuitSynthesizer { config: CircuitSynthesizerConfig, + blob_store: Box, + allowed_circuit_types: Option>, + region: String, + zone: String, } impl CircuitSynthesizer { - pub fn new(config: CircuitSynthesizerConfig) -> Self { - Self { config } + pub async fn new( + config: CircuitSynthesizerConfig, + prover_groups: ProverGroupConfig, + store_factory: &ObjectStoreFactory, + ) -> Result { + let is_specialized = prover_groups.is_specialized_group_id(config.prover_group_id); + let allowed_circuit_types = if is_specialized { + let types = prover_groups + .get_circuit_ids_for_group_id(config.prover_group_id) + .ok_or(CircuitSynthesizerError::InvalidGroupCircuits( + config.prover_group_id, + ))? + .into_iter() + .map(|id| { + numeric_index_to_circuit_name(id) + .map(|x| (id, x.to_owned())) + .ok_or(CircuitSynthesizerError::InvalidCircuitId(id)) + }) + .collect::, CircuitSynthesizerError>>()?; + Some(types) + } else { + None + }; + + vlog::info!( + "Configured for group [{}], circuits: {allowed_circuit_types:?}", + config.prover_group_id + ); + + Ok(Self { + config, + blob_store: store_factory.create_store(), + allowed_circuit_types: allowed_circuit_types + .map(|x| x.into_iter().map(|x| x.1).collect()), + region: get_region().await, + zone: get_zone().await, + }) } pub fn synthesize( circuit: ZkSyncCircuit>, ) -> (ProvingAssembly, u8) { - let circuit_synthesis_started_at = Instant::now(); + let start_instant = Instant::now(); + let mut assembly = Prover::new_proving_assembly(); circuit .synthesize(&mut assembly) .expect("circuit synthesize failed"); + let circuit_type = numeric_index_to_circuit_name(circuit.numeric_circuit_type()).unwrap(); + vlog::info!( - "Finished circuit synthesis for circuit: {} took {:?} seconds", - circuit_type, - circuit_synthesis_started_at.elapsed().as_secs(), + "Finished circuit synthesis for circuit: {circuit_type} took {:?}", + start_instant.elapsed() ); metrics::histogram!( "server.circuit_synthesizer.synthesize", - circuit_synthesis_started_at.elapsed().as_secs() as f64, + start_instant.elapsed(), "circuit_type" => circuit_type, ); @@ -59,24 +109,6 @@ impl CircuitSynthesizer { } } -fn get_circuit( - prover_job_metadata: ProverJobMetadata, -) -> ZkSyncCircuit> { - let circuit_input_blob_url = prover_circuit_input_blob_url( - prover_job_metadata.block_number, - prover_job_metadata.sequence_number, - prover_job_metadata.circuit_type.clone(), - prover_job_metadata.aggregation_round, - ); - let object_store = create_object_store_from_env(); - let circuit_input = object_store - .get(PROVER_JOBS_BUCKET_PATH, circuit_input_blob_url) - .expect("Failed fetching prover jobs from GCS"); - - bincode::deserialize::>>(&circuit_input) - .expect("Failed to deserialize circuit input") -} - #[async_trait] impl JobProcessor for CircuitSynthesizer { type Job = ZkSyncCircuit>; @@ -88,47 +120,48 @@ impl JobProcessor for CircuitSynthesizer { &self, connection_pool: ConnectionPool, ) -> Option<(Self::JobId, Self::Job)> { - let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); - let prover_group_config = ProverGroupConfig::from_env(); - - let circuit_ids = prover_group_config - .get_circuit_ids_for_group_id(config.prover_group_id) - .unwrap_or(vec![]); - if prover_group_config.is_specialized_group_id(config.prover_group_id) { - assert!(!circuit_ids.is_empty(), "No circuits found for specialized prover group id :{}", config.prover_group_id); - } - vlog::info!("Fetching prover jobs for group: {} and circuits: {:?}", config.prover_group_id, circuit_ids); - let circuit_types: Vec = circuit_ids.iter() - .map(|&id| numeric_index_to_circuit_name(id).unwrap_or_else(|| panic!("unknown id :{}", id)).to_string()) - .collect(); - let prover_job = if circuit_types.is_empty() { - connection_pool - .access_storage_blocking() - .prover_dal() - .get_next_prover_job(self.config.generation_timeout(), self.config.max_attempts)? - } else { - connection_pool - .access_storage_blocking() + vlog::trace!( + "Attempting to fetch job types: {:?}", + self.allowed_circuit_types + ); + + let mut storage = connection_pool.access_storage_blocking(); + let prover_job = match &self.allowed_circuit_types { + Some(types) => storage .prover_dal() - .get_next_prover_job_by_circuit_types(self.config.generation_timeout(), self.config.max_attempts, circuit_types)? + .get_next_prover_job_by_circuit_types(types.clone()), + None => storage.prover_dal().get_next_prover_job(), + }?; + + let circuit_key = CircuitKey { + block_number: prover_job.block_number, + sequence_number: prover_job.sequence_number, + circuit_type: &prover_job.circuit_type, + aggregation_round: prover_job.aggregation_round, }; - let job_id = prover_job.id; - Some((job_id, get_circuit(prover_job))) + let input = self + .blob_store + .get(circuit_key) + .map_err(CircuitSynthesizerError::InputLoadFailed) + .unwrap_or_else(|err| panic!("{err:?}")); + + Some((prover_job.id, input)) } async fn save_failure( + &self, pool: ConnectionPool, job_id: Self::JobId, _started_at: Instant, error: String, - ) -> () { - let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); + ) { pool.access_storage_blocking() .prover_dal() - .save_proof_error(job_id, error, config.max_attempts); + .save_proof_error(job_id, error, self.config.max_attempts); } async fn process_job( + &self, _connection_pool: ConnectionPool, job: Self::Job, _started_at: Instant, @@ -137,129 +170,188 @@ impl JobProcessor for CircuitSynthesizer { } async fn save_result( + &self, pool: ConnectionPool, job_id: Self::JobId, _started_at: Instant, - artifacts: Self::JobArtifacts, + (assembly, circuit_id): Self::JobArtifacts, ) { - let region = get_region().await; - vlog::info!("Finished circuit synthesis for job: {} in region: {}", job_id, region); - let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); - let (assembly, circuit_id) = artifacts; + vlog::trace!( + "Finished circuit synthesis for job: {job_id} in region: {}", + self.region + ); + + let now = Instant::now(); + let mut serialized: Vec = vec![]; + serialize_job(&assembly, job_id as usize, circuit_id, &mut serialized); + + vlog::trace!( + "Serialized circuit assembly for job {job_id} in {:?}", + now.elapsed() + ); + let now = Instant::now(); - while now.elapsed() < config.prover_instance_wait_timeout() { - let optional_prover_instance = pool - .clone() + let mut attempts = 0; + + while now.elapsed() < self.config.prover_instance_wait_timeout() { + let prover = pool .access_storage_blocking() .gpu_prover_queue_dal() - .get_free_prover_instance(config.gpu_prover_queue_timeout(), config.prover_group_id, region.clone()); - match optional_prover_instance { - Some(address) => { - vlog::info!( - "Found a free prover instance: {:?} to send assembly for job: {}", - address, - job_id - ); - send_assembly(job_id, circuit_id, assembly, address, pool); + .lock_available_prover( + self.config.gpu_prover_queue_timeout(), + self.config.prover_group_id, + self.region.clone(), + self.zone.clone(), + ); + + if let Some(address) = prover { + let result = send_assembly(job_id, &mut serialized, &address); + handle_send_result( + &result, + job_id, + &address, + &pool, + self.region.clone(), + self.zone.clone(), + ); + + if result.is_ok() { return; } - None => { - sleep(config.prover_instance_poll_time()).await; - } + // We'll retry with another prover again, no point in dropping the results. + + vlog::warn!( + "Could not send assembly to {address:?}. Prover group {}, region {}, \ + circuit id {circuit_id}, send attempt {attempts}.", + self.config.prover_group_id, + self.region + ); + attempts += 1; + } else { + sleep(self.config.prover_instance_poll_time()).await; } } - vlog::info!( - "Not able to get any free prover instance for sending assembly for job: {}", - job_id + vlog::trace!( + "Not able to get any free prover instance for sending assembly for job: {job_id}" ); } } fn send_assembly( job_id: u32, - circuit_id: u8, - assembly: ProvingAssembly, - address: SocketAddress, - pool: ConnectionPool, -) { - let socket_address = SocketAddr::new(address.host, address.port); - vlog::info!( - "Sending assembly to host: {}, port: {}", + serialized: &mut Vec, + address: &SocketAddress, +) -> Result<(Duration, u64), String> { + vlog::trace!( + "Sending assembly to {}:{}, job id {{{job_id}}}", address.host, address.port ); - match TcpStream::connect(socket_address) { - Ok(stream) => { - serialize_and_send(job_id, circuit_id, address, stream, assembly, pool); + + let socket_address = SocketAddr::new(address.host, address.port); + let started_at = Instant::now(); + let mut error_messages = vec![]; + + for _ in 0..10 { + match TcpStream::connect(socket_address) { + Ok(mut stream) => { + return send(&mut serialized.as_slice(), &mut stream) + .map(|result| (started_at.elapsed(), result)) + .map_err(|err| format!("Could not send assembly to prover: {err:?}")); + } + Err(err) => { + error_messages.push(format!("{err:?}")); + } } - Err(e) => { - vlog::info!( - "Failed sending assembly to address: {:?}, socket not reachable reason: {:?}", - address, - e - ); - handle_unreachable_prover_instance(job_id, address, pool); + } + + Err(format!( + "Could not establish connection with prover after several attempts: {error_messages:?}" + )) +} + +fn send(read: &mut impl Read, tcp: &mut TcpStream) -> std::io::Result { + let mut attempts = 10; + let mut last_result = Ok(0); + + while attempts > 0 { + match copy(read, tcp) { + Ok(copied) => return Ok(copied), + Err(err) if can_be_retried(err.kind()) => { + attempts -= 1; + last_result = Err(err); + } + Err(err) => return Err(err), } + + std::thread::sleep(Duration::from_millis(50)); } + + last_result } -fn serialize_and_send( +fn can_be_retried(err: ErrorKind) -> bool { + matches!(err, ErrorKind::TimedOut | ErrorKind::ConnectionRefused) +} + +fn handle_send_result( + result: &Result<(Duration, u64), String>, job_id: u32, - circuit_id: u8, - address: SocketAddress, - mut stream: TcpStream, - assembly: ProvingAssembly, - pool: ConnectionPool, + address: &SocketAddress, + pool: &ConnectionPool, + region: String, + zone: String, ) { - let started_at = Instant::now(); - let mut serialized: Vec = vec![]; - serialize_job::<_>(&assembly, job_id as usize, circuit_id, &mut serialized); - let blob_size_in_gb = serialized.len() / (1024 * 1024 * 1024); - copy(&mut serialized.as_slice(), &mut stream) - .unwrap_or_else(|_| panic!("failed sending assembly to address: {:?}", address)); - let local_ip = local_ip().expect("Failed obtaining local IP address"); - vlog::info!( - "Sent assembly of size: {}GB successfully, took: {} seconds for job: {} by: {:?} to: {:?}", - blob_size_in_gb, - started_at.elapsed().as_secs(), - job_id, - local_ip, - address - ); - metrics::histogram!( - "server.circuit_synthesizer.blob_sending_time", - started_at.elapsed().as_secs() as f64, - "blob_size_in_gb" => blob_size_in_gb.to_string(), - ); - handle_successful_sent_assembly(job_id, pool); -} + match result { + Ok((elapsed, len)) => { + let local_ip = local_ip().expect("Failed obtaining local IP address"); + let blob_size_in_gb = len / (1024 * 1024 * 1024); -fn handle_successful_sent_assembly(job_id: u32, pool: ConnectionPool) { - // releasing prover instance in gpu_prover_queue by marking it available is done by prover itself. - // we don't do it here to avoid race condition. + // region: logs - // mark the job as `in_gpu_proof` - pool.clone() - .access_storage_blocking() - .prover_dal() - .update_status(job_id, "in_gpu_proof"); -} + vlog::trace!( + "Sent assembly of size: {blob_size_in_gb}GB successfully, took: {elapsed:?} \ + for job: {job_id} by: {local_ip:?} to: {address:?}" + ); + metrics::histogram!( + "server.circuit_synthesizer.blob_sending_time", + *elapsed, + "blob_size_in_gb" => blob_size_in_gb.to_string(), + ); -fn handle_unreachable_prover_instance(job_id: u32, address: SocketAddress, pool: ConnectionPool) { - // mark prover instance in gpu_prover_queue dead - pool.clone() - .access_storage_blocking() - .gpu_prover_queue_dal() - .update_prover_instance_status(address, GpuProverInstanceStatus::Dead, 0); - - let prover_config = ProverConfigs::from_env().non_gpu; - // mark the job as failed - pool.clone() - .access_storage_blocking() - .prover_dal() - .save_proof_error( - job_id, - "prover instance unreachable".to_string(), - prover_config.max_attempts, - ); + // endregion + + pool.access_storage_blocking() + .prover_dal() + .update_status(job_id, "in_gpu_proof"); + } + + Err(err) => { + vlog::trace!( + "Failed sending assembly to address: {address:?}, socket not reachable \ + reason: {err}" + ); + + // mark prover instance in gpu_prover_queue dead + pool.access_storage_blocking() + .gpu_prover_queue_dal() + .update_prover_instance_status( + address.clone(), + GpuProverInstanceStatus::Dead, + 0, + region, + zone, + ); + + let prover_config = ProverConfigs::from_env().non_gpu; + // mark the job as failed + pool.access_storage_blocking() + .prover_dal() + .save_proof_error( + job_id, + "prover instance unreachable".to_string(), + prover_config.max_attempts, + ); + } + } } diff --git a/core/bin/circuit_synthesizer/src/main.rs b/core/bin/circuit_synthesizer/src/main.rs index 60f4f55827b9..0f168df2ffb2 100644 --- a/core/bin/circuit_synthesizer/src/main.rs +++ b/core/bin/circuit_synthesizer/src/main.rs @@ -1,18 +1,11 @@ -extern crate core; - -use std::cell::RefCell; - -use futures::{future, SinkExt, StreamExt}; -use futures::channel::mpsc; -use futures::executor::block_on; +use futures::future; use structopt::StructOpt; -use tokio::sync::watch; -use tokio::task::JoinHandle; +use tokio::{sync::oneshot, sync::watch, task::JoinHandle}; use prometheus_exporter::run_prometheus_exporter; -use zksync_config::configs::CircuitSynthesizerConfig; -use zksync_config::configs::utils::Prometheus; +use zksync_config::configs::{utils::Prometheus, CircuitSynthesizerConfig, ProverGroupConfig}; use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; use crate::circuit_synthesizer::CircuitSynthesizer; @@ -27,16 +20,13 @@ struct Opt { number_of_iterations: Option, } -pub async fn wait_for_tasks(task_futures: Vec>) { +async fn wait_for_tasks(task_futures: Vec>) { match future::select_all(task_futures).await.0 { Ok(_) => { vlog::info!("One of the actors finished its run, while it wasn't expected to do it"); } - Err(error) => { - vlog::info!( - "One of the tokio actors unexpectedly finished with error: {:?}", - error - ); + Err(err) => { + vlog::info!("One of the tokio actors unexpectedly finished with error: {err:?}"); } } } @@ -54,19 +44,29 @@ async fn main() { } let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); let pool = ConnectionPool::new(None, true); - let circuit_synthesizer = CircuitSynthesizer::new(config.clone()); + + let circuit_synthesizer = CircuitSynthesizer::new( + config.clone(), + ProverGroupConfig::from_env(), + &ObjectStoreFactory::from_env(), + ) + .await + .unwrap_or_else(|err| { + vlog::error!("Could not initialize synthesizer: {err:?}"); + panic!("Could not initialize synthesizer: {err:?}"); + }); let (stop_sender, stop_receiver) = watch::channel(false); - let (stop_signal_sender, mut stop_signal_receiver) = mpsc::channel(256); - { - let stop_signal_sender = RefCell::new(stop_signal_sender.clone()); - ctrlc::set_handler(move || { - let mut sender = stop_signal_sender.borrow_mut(); - block_on(sender.send(true)).expect("Ctrl+C signal send"); - }) - .expect("Error setting Ctrl+C handler"); - } + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(stop_signal_sender) = stop_signal_sender.take() { + stop_signal_sender.send(()).ok(); + } + }) + .expect("Error setting Ctrl+C handler"); + vlog::info!("Starting circuit synthesizer"); let prometheus_config = Prometheus { listener_port: config.prometheus_listener_port, @@ -75,18 +75,14 @@ async fn main() { }; let tasks = vec![ run_prometheus_exporter(prometheus_config, true), - tokio::spawn(circuit_synthesizer.run( - pool, - stop_receiver, - opt.number_of_iterations, - ))]; + tokio::spawn(circuit_synthesizer.run(pool, stop_receiver, opt.number_of_iterations)), + ]; tokio::select! { - _ = async { wait_for_tasks(tasks).await } => {}, - _ = async { stop_signal_receiver.next().await } => { + _ = wait_for_tasks(tasks) => {}, + _ = stop_signal_receiver => { vlog::info!("Stop signal received, shutting down"); - }, - } - ; - let _ = stop_sender.send(true); + } + }; + stop_sender.send(true).ok(); } diff --git a/core/bin/contract-verifier/Cargo.toml b/core/bin/contract-verifier/Cargo.toml index 6c354256cb0b..cd4b7f1ef926 100644 --- a/core/bin/contract-verifier/Cargo.toml +++ b/core/bin/contract-verifier/Cargo.toml @@ -31,3 +31,5 @@ hex = "0.4" serde = { version = "1.0", features = ["derive"] } structopt = "0.3.20" lazy_static = "1.4" +tempfile = "3.0.2" +regex = "1" diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index ed625028f83b..573eb79c9622 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -31,8 +31,8 @@ pub async fn wait_for_tasks(task_futures: Vec>) { } async fn update_compiler_versions(connection_pool: &ConnectionPool) { - let mut storage = connection_pool.access_storage().await; - let mut transaction = storage.start_transaction().await; + let mut storage = connection_pool.access_storage_blocking(); + let mut transaction = storage.start_transaction_blocking(); let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); @@ -72,7 +72,7 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { .set_solc_versions(solc_versions) .unwrap(); - transaction.commit().await; + transaction.commit_blocking(); } use structopt::StructOpt; diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index 2c3c4efc3164..eb69d5052481 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -4,26 +4,35 @@ use std::path::Path; use std::time::{Duration, Instant}; use chrono::Utc; -use ethabi::Function; +use ethabi::{Contract, Token}; use lazy_static::lazy_static; +use regex::Regex; use tokio::time; use zksync_config::ContractVerifierConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_queued_job_processor::{async_trait, JobProcessor}; -use zksync_types::explorer_api::{ - CompilationArtifacts, DeployContractCalldata, SourceCodeData, VerificationInfo, - VerificationRequest, +use zksync_types::{ + explorer_api::{ + CompilationArtifacts, DeployContractCalldata, SourceCodeData, VerificationInfo, + VerificationRequest, + }, + Address, }; use crate::error::ContractVerifierError; -use crate::zksolc_utils::{CompilerInput, Optimizer, Settings, Source, ZkSolc}; +use crate::zksolc_utils::{ + CompilerInput, CompilerOutput, Optimizer, Settings, Source, StandardJson, ZkSolc, +}; lazy_static! { - static ref CREATE_CONTRACT_FUNCTION: Function = zksync_contracts::deployer_contract() - .function("create") - .unwrap() - .clone(); + static ref DEPLOYER_CONTRACT: Contract = zksync_contracts::deployer_contract(); +} + +#[derive(Debug)] +enum ConstructorArgs { + Check(Vec), + Ignore, } #[derive(Debug)] @@ -53,26 +62,31 @@ impl ContractVerifier { vlog::warn!("Contract is missing in DB for already accepted verification request. Contract address: {:#?}", request.req.contract_address); ContractVerifierError::InternalError })?; - let (constructor_arguments, to_ignore) = - Self::decode_constructor_arguments_from_calldata(creation_tx_calldata); + let constructor_args = Self::decode_constructor_arguments_from_calldata( + creation_tx_calldata, + request.req.contract_address, + ); - if artifacts.bytecode == deployed_bytecode - && (to_ignore || request.req.constructor_arguments.0 == constructor_arguments) - { - if to_ignore { + if artifacts.bytecode != deployed_bytecode { + return Err(ContractVerifierError::BytecodeMismatch); + } + + match constructor_args { + ConstructorArgs::Check(args) => { + if request.req.constructor_arguments.0 != args { + return Err(ContractVerifierError::IncorrectConstructorArguments); + } + } + ConstructorArgs::Ignore => { request.req.constructor_arguments = Vec::new().into(); } - - Ok(VerificationInfo { - request, - artifacts, - verified_at: Utc::now(), - }) - } else if artifacts.bytecode != deployed_bytecode { - Err(ContractVerifierError::BytecodeMismatch) - } else { - Err(ContractVerifierError::IncorrectConstructorArguments) } + + Ok(VerificationInfo { + request, + artifacts, + verified_at: Utc::now(), + }) } async fn compile( @@ -124,44 +138,58 @@ impl ContractVerifier { .await .map_err(|_| ContractVerifierError::CompilationTimeout)??; - if let Some(errors) = output.get("errors") { - let errors = errors.as_array().unwrap().clone(); - if errors - .iter() - .any(|err| err["severity"].as_str().unwrap() == "error") - { - let error_messages = errors - .into_iter() - .map(|err| err["formattedMessage"].clone()) - .collect(); - return Err(ContractVerifierError::CompilationError( - serde_json::Value::Array(error_messages), - )); - } - } + match output { + CompilerOutput::StandardJson(output) => { + if let Some(errors) = output.get("errors") { + let errors = errors.as_array().unwrap().clone(); + if errors + .iter() + .any(|err| err["severity"].as_str().unwrap() == "error") + { + let error_messages = errors + .into_iter() + .map(|err| err["formattedMessage"].clone()) + .collect(); + return Err(ContractVerifierError::CompilationError( + serde_json::Value::Array(error_messages), + )); + } + } - let contracts = output["contracts"] - .get(file_name.as_str()) - .cloned() - .ok_or(ContractVerifierError::MissingSource(file_name))?; - let contract = contracts - .get(&contract_name) - .cloned() - .ok_or(ContractVerifierError::MissingContract(contract_name))?; - let bytecode_str = contract["evm"]["bytecode"]["object"].as_str().ok_or( - ContractVerifierError::AbstractContract(request.req.contract_name), - )?; - let bytecode = hex::decode(bytecode_str).unwrap(); - let abi = contract["abi"].clone(); - if !abi.is_array() { - vlog::error!( - "zksolc returned unexpected value for ABI: {}", - serde_json::to_string_pretty(&abi).unwrap() - ); - return Err(ContractVerifierError::InternalError); - } + let contracts = output["contracts"] + .get(file_name.as_str()) + .cloned() + .ok_or(ContractVerifierError::MissingSource(file_name))?; + let contract = contracts + .get(&contract_name) + .cloned() + .ok_or(ContractVerifierError::MissingContract(contract_name))?; + let bytecode_str = contract["evm"]["bytecode"]["object"].as_str().ok_or( + ContractVerifierError::AbstractContract(request.req.contract_name), + )?; + let bytecode = hex::decode(bytecode_str).unwrap(); + let abi = contract["abi"].clone(); + if !abi.is_array() { + vlog::error!( + "zksolc returned unexpected value for ABI: {}", + serde_json::to_string_pretty(&abi).unwrap() + ); + return Err(ContractVerifierError::InternalError); + } - Ok(CompilationArtifacts { bytecode, abi }) + Ok(CompilationArtifacts { bytecode, abi }) + } + CompilerOutput::YulSingleFile(output) => { + let re = Regex::new(r"Contract `.*` bytecode: 0x([\da-f]+)").unwrap(); + let cap = re.captures(&output).unwrap(); + let bytecode_str = cap.get(1).unwrap().as_str(); + let bytecode = hex::decode(bytecode_str).unwrap(); + Ok(CompilationArtifacts { + bytecode, + abi: serde_json::Value::Array(Vec::new()), + }) + } + } } fn build_compiler_input( @@ -178,7 +206,7 @@ impl ContractVerifier { ); match request.req.source_code_data { - SourceCodeData::SingleFile(source_code) => { + SourceCodeData::SolSingleFile(source_code) => { let source = Source { content: source_code, }; @@ -190,44 +218,103 @@ impl ContractVerifier { libraries: None, output_selection: Some(default_output_selection), optimizer, + is_system: request.req.is_system, + metadata: None, }; - Ok(CompilerInput { + Ok(CompilerInput::StandardJson(StandardJson { language: "Solidity".to_string(), sources, settings, - }) + })) } SourceCodeData::StandardJsonInput(map) => { - let mut compiler_input: CompilerInput = + let mut compiler_input: StandardJson = serde_json::from_value(serde_json::Value::Object(map)) .map_err(|_| ContractVerifierError::FailedToDeserializeInput)?; // Set default output selection even if it is different in request. compiler_input.settings.output_selection = Some(default_output_selection); - Ok(compiler_input) + Ok(CompilerInput::StandardJson(compiler_input)) + } + SourceCodeData::YulSingleFile(source_code) => { + Ok(CompilerInput::YulSingleFile(source_code)) } } } fn decode_constructor_arguments_from_calldata( calldata: DeployContractCalldata, - ) -> (Vec, bool) { + contract_address_to_verify: Address, + ) -> ConstructorArgs { match calldata { DeployContractCalldata::Deploy(calldata) => { - // `calldata` is abi encoded call of `function create(bytes32 _salt, bytes32 _bytecodeHash, bytes _input)`. - // Constructor arguments are in the third parameter. - let tokens = CREATE_CONTRACT_FUNCTION - .decode_input(&calldata[4..]) - .expect("Failed to decode constructor arguments"); - ( - tokens[2] - .clone() - .into_bytes() - .expect("The third parameter of `create` should be of type `bytes`"), - false, - ) + let create = DEPLOYER_CONTRACT.function("create").unwrap(); + let create2 = DEPLOYER_CONTRACT.function("create2").unwrap(); + + let create_acc = DEPLOYER_CONTRACT.function("createAccount").unwrap(); + let create2_acc = DEPLOYER_CONTRACT.function("create2Account").unwrap(); + + let force_deploy = DEPLOYER_CONTRACT + .function("forceDeployOnAddresses") + .unwrap(); + // It's assumed that `create` and `create2` methods have the same parameters + // and the same for `createAccount` and `create2Account`. + match &calldata[0..4] { + selector + if selector == create.short_signature() + || selector == create2.short_signature() => + { + let tokens = create + .decode_input(&calldata[4..]) + .expect("Failed to decode input"); + // Constructor arguments are in the third parameter. + ConstructorArgs::Check(tokens[2].clone().into_bytes().expect( + "The third parameter of `create/create2` should be of type `bytes`", + )) + } + selector + if selector == create_acc.short_signature() + || selector == create2_acc.short_signature() => + { + let tokens = create + .decode_input(&calldata[4..]) + .expect("Failed to decode input"); + // Constructor arguments are in the third parameter. + ConstructorArgs::Check( + tokens[2].clone().into_bytes().expect( + "The third parameter of `createAccount/create2Account` should be of type `bytes`", + ), + ) + } + selector if selector == force_deploy.short_signature() => { + let tokens = force_deploy + .decode_input(&calldata[4..]) + .expect("Failed to decode input"); + let deployments = tokens[0].clone().into_array().unwrap(); + for deployment in deployments { + match deployment { + Token::Tuple(tokens) => { + let address = tokens[1].clone().into_address().unwrap(); + if address == contract_address_to_verify { + let call_constructor = + tokens[2].clone().into_bool().unwrap(); + return if call_constructor { + let input = tokens[4].clone().into_bytes().unwrap(); + ConstructorArgs::Check(input) + } else { + ConstructorArgs::Ignore + }; + } + } + _ => panic!("Expected `deployment` to be a tuple"), + } + } + panic!("Couldn't find force deployment for given address"); + } + _ => ConstructorArgs::Ignore, + } } - DeployContractCalldata::Ignore => (Vec::new(), true), + DeployContractCalldata::Ignore => ConstructorArgs::Ignore, } } @@ -271,12 +358,13 @@ impl JobProcessor for ContractVerifier { type JobArtifacts = (); const SERVICE_NAME: &'static str = "contract_verifier"; + const BACKOFF_MULTIPLIER: u64 = 1; async fn get_next_job( &self, connection_pool: ConnectionPool, ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage().await; + let mut connection = connection_pool.access_storage_blocking(); // Time overhead for all operations except for compilation. const TIME_OVERHEAD: Duration = Duration::from_secs(10); @@ -294,12 +382,13 @@ impl JobProcessor for ContractVerifier { } async fn save_failure( + &self, connection_pool: ConnectionPool, job_id: usize, _started_at: Instant, error: String, - ) -> () { - let mut connection = connection_pool.access_storage().await; + ) { + let mut connection = connection_pool.access_storage_blocking(); connection .explorer() @@ -315,6 +404,7 @@ impl JobProcessor for ContractVerifier { #[allow(clippy::async_yields_async)] async fn process_job( + &self, connection_pool: ConnectionPool, job: VerificationRequest, started_at: Instant, @@ -336,5 +426,13 @@ impl JobProcessor for ContractVerifier { }) } - async fn save_result(_: ConnectionPool, _: Self::JobId, _: Instant, _: Self::JobArtifacts) {} + async fn save_result( + &self, + _: ConnectionPool, + _: Self::JobId, + _: Instant, + _: Self::JobArtifacts, + ) { + // Do nothing + } } diff --git a/core/bin/contract-verifier/src/zksolc_utils.rs b/core/bin/contract-verifier/src/zksolc_utils.rs index f8cd8c0e646b..0602917f0266 100644 --- a/core/bin/contract-verifier/src/zksolc_utils.rs +++ b/core/bin/contract-verifier/src/zksolc_utils.rs @@ -1,13 +1,26 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use std::io::Write; use std::path::PathBuf; use std::process::Stdio; use crate::error::ContractVerifierError; +#[derive(Debug)] +pub enum CompilerInput { + StandardJson(StandardJson), + YulSingleFile(String), +} + +#[derive(Debug)] +pub enum CompilerOutput { + StandardJson(serde_json::Value), + YulSingleFile(String), +} + #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct CompilerInput { +pub struct StandardJson { /// The input language. pub language: String, /// The input source code files hashmap. @@ -23,6 +36,21 @@ pub struct Source { pub content: String, } +#[derive(Debug, Serialize, Deserialize)] +pub enum MetadataHash { + /// Do not include bytecode hash. + #[serde(rename = "none")] + None, +} + +#[derive(Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Metadata { + /// The bytecode hash mode. + #[serde(skip_serializing_if = "Option::is_none")] + pub bytecode_hash: Option, +} + #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Settings { @@ -32,7 +60,14 @@ pub struct Settings { /// The output selection filters. pub output_selection: Option, /// The optimizer settings. + #[serde(default)] pub optimizer: Optimizer, + /// The metadata settings. + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Flag for system compilation mode. + #[serde(default)] + pub is_system: bool, } #[derive(Debug, Serialize, Deserialize)] @@ -40,6 +75,17 @@ pub struct Settings { pub struct Optimizer { /// Whether the optimizer is enabled. pub enabled: bool, + /// The optimization mode string. + pub mode: Option, +} + +impl Default for Optimizer { + fn default() -> Self { + Self { + enabled: true, + mode: None, + } + } } impl Optimizer { @@ -47,7 +93,10 @@ impl Optimizer { /// A shortcut constructor. /// pub fn new(enabled: bool) -> Self { - Self { enabled } + Self { + enabled, + mode: None, + } } } @@ -68,42 +117,81 @@ impl ZkSolc { &self, input: &CompilerInput, is_system_flag: bool, - ) -> Result { + ) -> Result { use tokio::io::AsyncWriteExt; - let content = serde_json::to_vec(input).unwrap(); let mut command = tokio::process::Command::new(&self.zksolc_path); if is_system_flag { command.arg("--system-mode"); } - let mut child = command - .arg("--standard-json") + command .arg("--solc") .arg(self.solc_path.to_str().unwrap()) - .stdin(Stdio::piped()) - .stderr(Stdio::piped()) .stdout(Stdio::piped()) - .spawn() - .map_err(|_err| ContractVerifierError::InternalError)?; - let stdin = child.stdin.as_mut().unwrap(); - stdin - .write_all(&content) - .await - .map_err(|_err| ContractVerifierError::InternalError)?; - stdin - .flush() - .await - .map_err(|_err| ContractVerifierError::InternalError)?; + .stderr(Stdio::piped()); + match input { + CompilerInput::StandardJson(input) => { + let mut child = command + .arg("--standard-json") + .stdin(Stdio::piped()) + .spawn() + .map_err(|_err| ContractVerifierError::InternalError)?; + let stdin = child.stdin.as_mut().unwrap(); + let content = serde_json::to_vec(input).unwrap(); + stdin + .write_all(&content) + .await + .map_err(|_err| ContractVerifierError::InternalError)?; + stdin + .flush() + .await + .map_err(|_err| ContractVerifierError::InternalError)?; - let output = child - .wait_with_output() - .await - .map_err(|_err| ContractVerifierError::InternalError)?; - if output.status.success() { - Ok(serde_json::from_slice(&output.stdout).expect("Compiler output must be valid JSON")) - } else { - Err(ContractVerifierError::ZkSolcError( - String::from_utf8_lossy(&output.stderr).to_string(), - )) + let output = child + .wait_with_output() + .await + .map_err(|_err| ContractVerifierError::InternalError)?; + if output.status.success() { + Ok(CompilerOutput::StandardJson( + serde_json::from_slice(&output.stdout) + .expect("Compiler output must be valid JSON"), + )) + } else { + Err(ContractVerifierError::ZkSolcError( + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } + CompilerInput::YulSingleFile(content) => { + let mut file = tempfile::Builder::new() + .prefix("input") + .suffix(".yul") + .rand_bytes(0) + .tempfile() + .map_err(|_err| ContractVerifierError::InternalError)?; + file.write_all(content.as_bytes()) + .map_err(|_err| ContractVerifierError::InternalError)?; + let child = command + .arg(file.path().to_str().unwrap()) + .arg("--optimization") + .arg("3") + .arg("--yul") + .arg("--bin") + .spawn() + .map_err(|_err| ContractVerifierError::InternalError)?; + let output = child + .wait_with_output() + .await + .map_err(|_err| ContractVerifierError::InternalError)?; + if output.status.success() { + Ok(CompilerOutput::YulSingleFile( + String::from_utf8(output.stdout).expect("Couldn't parse string"), + )) + } else { + Err(ContractVerifierError::ZkSolcError( + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } } } } diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml new file mode 100644 index 000000000000..f96d4d77b3a9 --- /dev/null +++ b/core/bin/external_node/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "zksync_external_node" +version = "1.0.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +publish = false # We don't want to publish our binaries. + +[dependencies] +zksync_core = { path = "../zksync_core", version = "1.0" } +zksync_dal = { path = "../../lib/dal", version = "1.0" } +zksync_config = { path = "../../lib/config", version = "1.0" } +zksync_storage = { path = "../../lib/storage", version = "1.0" } +zksync_eth_client = { path = "../../lib/eth_client", version = "1.0" } +zksync_basic_types = { path = "../../lib/basic_types", version = "1.0" } +prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } +zksync_health_check = { path = "../../lib/health_check", version = "0.1.0" } +vlog = { path = "../../lib/vlog", version = "1.0" } + +anyhow = "1.0" +tokio = { version = "1", features = ["time"] } diff --git a/core/bin/external_node/README.md b/core/bin/external_node/README.md new file mode 100644 index 000000000000..e6a2e95ab8c5 --- /dev/null +++ b/core/bin/external_node/README.md @@ -0,0 +1,45 @@ +# zkSync External Node + +This application is a read replica that can sync from the main node and serve the state locally. + +Note: this README is under construction. + +## Local development + +This section describes how to run the external node locally + +### Configuration + +Right now, external node requires all the configuration parameters that are required for the main node. It also has one +unique parameter: `API_WEB3_JSON_RPC_MAIN_NODE_URL` -- the address of the main node to fetch the state from. + +The easiest way to see everything that is used is to compile the `ext-node` config and see the contents of the resulting +`.env` file. + +Note: not all the config values from the main node are actually used, so this is temporary, and in the future external +node would require a much smaller set of config variables. + +To change the configuration, edit the `etc/env/ext-node.toml`, add the overrides from the `base` config if you need any. +Remove `etc/env/ext-node.env`, if it exists. On the next launch of the external node, new config would be compiled and +will be written to the `etc/env/ext-node.env` file. + +### Running + +To run the binary: + +```sh +ZKSYNC_ENV=ext-node zk f cargo run --release --bin zksync_external_node +``` + +### Clearing the state + +This command will reset the Postgres and RocksDB databases used by the external node: + +```sh +ZKSYNC_ENV=ext-node zk db reset && rm -r $ZKSYNC_HOME/en_db +``` + +## Building & pushing + +Use the `External Node - Build & push docker image` GitHub action. By default, it'll publish the image with `latest2.0` +tag. diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs new file mode 100644 index 000000000000..7c52a64d3a9f --- /dev/null +++ b/core/bin/external_node/src/main.rs @@ -0,0 +1,255 @@ +use prometheus_exporter::run_prometheus_exporter; +use tokio::{sync::watch, task, time::sleep}; + +use std::{sync::Arc, time::Duration}; +use zksync_basic_types::L2ChainId; +use zksync_config::ZkSyncConfig; +use zksync_core::{ + api_server::{healthcheck, tx_sender::TxSenderBuilder, web3::ApiBuilder}, + block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert}, + consistency_checker::ConsistencyChecker, + data_fetchers::token_list::TokenListFetcher, + l1_gas_price::MainNodeGasPriceFetcher, + metadata_calculator::{MetadataCalculator, TreeImplementation}, + reorg_detector::ReorgDetector, + setup_sigint_handler, + state_keeper::{ + batch_executor::MainBatchExecutorBuilder, seal_criteria::SealManager, ZkSyncStateKeeper, + }, + sync_layer::{ + batch_status_updater::run_batch_status_updater, external_io::ExternalIO, + fetcher::MainNodeFetcher, genesis::perform_genesis_if_needed, ActionQueue, + ExternalNodeSealer, SyncState, + }, + wait_for_tasks, +}; +use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; +use zksync_health_check::CheckHealth; +use zksync_storage::RocksDB; + +/// Creates the state keeper configured to work in the external node mode. +fn build_state_keeper( + action_queue: ActionQueue, + state_keeper_db_path: String, + main_node_url: String, + connection_pool: ConnectionPool, + sync_state: SyncState, + stop_receiver: watch::Receiver, +) -> ZkSyncStateKeeper { + let en_sealer = ExternalNodeSealer::new(action_queue.clone()); + let sealer = SealManager::custom( + None, + vec![en_sealer.clone().into_unconditional_batch_seal_criterion()], + vec![en_sealer.into_miniblock_seal_criterion()], + ); + + // These config values are used on the main node, and depending on these values certain transactions can + // be *rejected* (that is, not included into the block). However, external node only mirrors what the main + // node has already executed, so we can safely set these values to the maximum possible values - if the main + // node has already executed the transaction, then the external node must execute it too. + let max_allowed_l2_tx_gas_limit = u32::MAX.into(); + let validation_computational_gas_limit = u32::MAX; + // We don't need call traces on the external node. + let save_call_traces = false; + + let batch_executor_base: Box = + Box::new(MainBatchExecutorBuilder::new( + state_keeper_db_path, + connection_pool.clone(), + max_allowed_l2_tx_gas_limit, + save_call_traces, + validation_computational_gas_limit, + )); + + let io = Box::new(ExternalIO::new( + connection_pool, + action_queue, + sync_state, + main_node_url, + )); + + ZkSyncStateKeeper::new(stop_receiver, io, batch_executor_base, sealer) +} + +async fn init_tasks( + config: ZkSyncConfig, + connection_pool: ConnectionPool, +) -> (Vec>, watch::Sender) { + let main_node_url = config.api.web3_json_rpc.main_node_url.as_ref().unwrap(); + let (stop_sender, stop_receiver) = watch::channel::(false); + let mut healthchecks: Vec> = Vec::new(); + // Create components. + let gas_adjuster = Arc::new(MainNodeGasPriceFetcher::new(main_node_url)); + + let sync_state = SyncState::new(); + let action_queue = ActionQueue::new(); + let state_keeper = build_state_keeper( + action_queue.clone(), + config.db.state_keeper_db_path.clone(), + main_node_url.clone(), + connection_pool.clone(), + sync_state.clone(), + stop_receiver.clone(), + ); + let fetcher = MainNodeFetcher::new( + ConnectionPool::new(Some(1), true), + main_node_url, + action_queue.clone(), + sync_state.clone(), + stop_receiver.clone(), + ); + let metadata_calculator = MetadataCalculator::lightweight(&config, TreeImplementation::New); + healthchecks.push(Box::new(metadata_calculator.tree_health_check())); + let consistency_checker = ConsistencyChecker::new( + &config.eth_client.web3_url, + config.contracts.validator_timelock_addr, + 10, + ConnectionPool::new(Some(1), true), + ); + // We need this component to fetch "well-known" tokens. + // And we need to know "well-known" tokens since there are paymaster-related + // checks which depend on this particular token quality. + let token_list_fetcher = TokenListFetcher::new(config.clone()); + + // Run the components. + let prometheus_task = run_prometheus_exporter(config.api.prometheus.clone(), false); + let tree_stop_receiver = stop_receiver.clone(); + let tree_handle = task::spawn_blocking(move || { + let pool = ConnectionPool::new(Some(1), true); + metadata_calculator.run(&pool, tree_stop_receiver); + }); + let consistency_checker_handle = tokio::spawn(consistency_checker.run(stop_receiver.clone())); + let updater_stop_receiver = stop_receiver.clone(); + let updater_handle = task::spawn_blocking(move || { + run_batch_status_updater( + ConnectionPool::new(Some(1), true), + action_queue, + updater_stop_receiver, + ) + }); + let sk_handle = task::spawn_blocking(|| state_keeper.run()); + let fetcher_handle = tokio::spawn(fetcher.run()); + let gas_adjuster_handle = tokio::spawn(gas_adjuster.clone().run(stop_receiver.clone())); + + let tx_sender = { + let mut tx_sender_builder = + TxSenderBuilder::new(config.clone().into(), connection_pool.clone()) + .with_main_connection_pool(connection_pool.clone()) + .with_tx_proxy(main_node_url.clone()) + .with_state_keeper_config(config.chain.state_keeper.clone()); + + // Add rate limiter if enabled. + if let Some(transactions_per_sec_limit) = + config.api.web3_json_rpc.transactions_per_sec_limit + { + tx_sender_builder = tx_sender_builder.with_rate_limiter(transactions_per_sec_limit); + }; + + tx_sender_builder.build(gas_adjuster, config.chain.state_keeper.default_aa_hash) + }; + + let http_api_handle = + ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool.clone()) + .http(config.api.web3_json_rpc.http_port) + .with_filter_limit(config.api.web3_json_rpc.filters_limit()) + .with_threads(config.api.web3_json_rpc.threads_per_server as usize) + .with_tx_sender(tx_sender.clone()) + .with_sync_state(sync_state.clone()) + .build(stop_receiver.clone()); + + let token_list_fetcher_handle = + tokio::spawn(token_list_fetcher.run(connection_pool.clone(), stop_receiver.clone())); + + let mut task_handles = ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool) + .ws(config.api.web3_json_rpc.ws_port) + .with_filter_limit(config.api.web3_json_rpc.filters_limit()) + .with_subscriptions_limit(config.api.web3_json_rpc.subscriptions_limit()) + .with_polling_interval(config.api.web3_json_rpc.pubsub_interval()) + .with_tx_sender(tx_sender) + .with_sync_state(sync_state) + .build(stop_receiver.clone()); + + healthchecks.push(Box::new(ConnectionPoolHealthCheck::new( + ConnectionPool::new(Some(1), true), + ))); + let healthcheck_handle = healthcheck::start_server_thread_detached( + config.api.healthcheck.bind_addr(), + healthchecks, + stop_receiver, + ); + + task_handles.extend(http_api_handle); + task_handles.extend([ + prometheus_task, + sk_handle, + fetcher_handle, + updater_handle, + tree_handle, + gas_adjuster_handle, + consistency_checker_handle, + healthcheck_handle, + token_list_fetcher_handle, + ]); + + (task_handles, stop_sender) +} + +async fn shutdown_components(stop_sender: watch::Sender) { + let _ = stop_sender.send(true); + RocksDB::await_rocksdb_termination(); + // Sleep for some time to let components gracefully stop. + sleep(Duration::from_secs(10)).await; +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // Initial setup. + + let _sentry_guard = vlog::init(); + let connection_pool = ConnectionPool::new(None, true); + let config = ZkSyncConfig::from_env(); + let main_node_url = config.api.web3_json_rpc.main_node_url.as_ref().unwrap(); + let sigint_receiver = setup_sigint_handler(); + + vlog::info!("Started the external node"); + vlog::info!("Main node URL is: {}", main_node_url); + + // Make sure that genesis is performed. + perform_genesis_if_needed( + &mut connection_pool.access_storage_blocking(), + L2ChainId(config.chain.eth.zksync_network_id), + config.chain.state_keeper.base_system_contracts_hashes(), + main_node_url.clone(), + ) + .await; + + let (task_handles, stop_sender) = init_tasks(config.clone(), connection_pool.clone()).await; + + let reorg_detector = ReorgDetector::new(main_node_url, connection_pool.clone()); + let reorg_detector_handle = tokio::spawn(reorg_detector.run()); + + tokio::select! { + _ = wait_for_tasks(task_handles, false) => {}, + _ = sigint_receiver => { + vlog::info!("Stop signal received, shutting down"); + }, + last_correct_batch = reorg_detector_handle => { + if let Ok(last_correct_batch) = last_correct_batch { + vlog::info!("Performing rollback to block {}", last_correct_batch); + shutdown_components(stop_sender).await; + BlockReverter::new(config, connection_pool, L1ExecutedBatchesRevert::Allowed) + .rollback_db(last_correct_batch, BlockReverterFlags::all()) + .await; + vlog::info!("Rollback successfully completed, the node has to restart to continue working"); + return Ok(()); + } else { + vlog::error!("Reorg detector actor failed"); + } + } + } + + // Reaching this point means that either some actor exited unexpectedly or we received a stop signal. + // Broadcast the stop signal to all actors and exit. + shutdown_components(stop_sender).await; + Ok(()) +} diff --git a/core/bin/prover/Cargo.lock b/core/bin/prover/Cargo.lock index 7854ce070c58..81e95a2b831a 100644 --- a/core/bin/prover/Cargo.lock +++ b/core/bin/prover/Cargo.lock @@ -118,7 +118,7 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -175,6 +175,12 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + [[package]] name = "async-channel" version = "1.8.0" @@ -381,6 +387,18 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "backon" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f34fac4d7cdaefa2deded0eda2d5d59dbfd43370ff3f856209e72340ae84c294" +dependencies = [ + "futures 0.3.27", + "pin-project", + "rand 0.8.5", + "tokio 1.28.0", +] + [[package]] name = "backtrace" version = "0.3.67" @@ -432,7 +450,7 @@ dependencies = [ [[package]] name = "bellman_ce" version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#3aa6226d04d60c539ff4ee480479ac5b92871a20" +source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ "arrayvec 0.7.2", "bit-vec", @@ -479,7 +497,7 @@ version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "clap", @@ -511,6 +529,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84" + [[package]] name = "bitvec" version = "0.20.4" @@ -557,7 +581,7 @@ dependencies = [ [[package]] name = "blake2s_const" version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#3aa6226d04d60c539ff4ee480479ac5b92871a20" +source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ "arrayref", "arrayvec 0.5.2", @@ -758,7 +782,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap", "unicode-width", @@ -771,7 +795,7 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -1111,7 +1135,7 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" dependencies = [ "proc-macro-error", "proc-macro2 1.0.52", @@ -1341,16 +1365,6 @@ dependencies = [ "signature", ] -[[package]] -name = "eip712-signature" -version = "0.1.0" -source = "git+https://github.com/vladbochok/eip712-signature#30b11455e7d613313e8c12d2aad961fd4bf902fe" -dependencies = [ - "ethereum-types", - "parity-crypto", - "thiserror", -] - [[package]] name = "either" version = "1.8.1" @@ -1919,9 +1933,8 @@ dependencies = [ [[package]] name = "google-cloud-auth" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44074eed3f9f0c05a522090f0cf1cfcdaef29965424d07908a6a372ffdee0985" +version = "0.9.1" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", "base64 0.21.0", @@ -1934,16 +1947,15 @@ dependencies = [ "serde_json", "thiserror", "time 0.3.20", - "tokio 1.26.0", + "tokio 1.28.0", "tracing", "urlencoding", ] [[package]] name = "google-cloud-default" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d47d0a154793b622b0aa39fda79d40694b6ef9aa8c932c0342f2088502aa3ea" +version = "0.1.1" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", "google-cloud-auth", @@ -1954,19 +1966,17 @@ dependencies = [ [[package]] name = "google-cloud-metadata" version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "reqwest", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", ] [[package]] name = "google-cloud-storage" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ed4e4f53bc4816db6f5669fb079338a8b6375a985fd6c9a1f3f8a864922541" +version = "0.10.0" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-stream", "base64 0.21.0", @@ -1985,8 +1995,7 @@ dependencies = [ "sha2 0.10.6", "thiserror", "time 0.3.20", - "tokio 1.26.0", - "tokio-util 0.7.7", + "tokio 1.28.0", "tracing", "url", ] @@ -1994,8 +2003,7 @@ dependencies = [ [[package]] name = "google-cloud-token" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9fa5c241ab09d3531496127ef107a29cc2a8fde63676f7cbbe56a8a5e75883" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" dependencies = [ "async-trait", ] @@ -2003,7 +2011,7 @@ dependencies = [ [[package]] name = "gpu-ffi" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "bindgen", "crossbeam 0.8.2", @@ -2016,7 +2024,7 @@ dependencies = [ [[package]] name = "gpu-prover" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -2054,7 +2062,7 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-util 0.7.7", "tracing", ] @@ -2107,7 +2115,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bytes 1.4.0", "headers-core", "http", @@ -2277,7 +2285,7 @@ dependencies = [ "itoa 1.0.6", "pin-project-lite 0.2.9", "socket2", - "tokio 1.26.0", + "tokio 1.28.0", "tower-service", "tracing", "want", @@ -2294,7 +2302,7 @@ dependencies = [ "log", "rustls", "rustls-native-certs", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-rustls", "webpki-roots", ] @@ -2307,7 +2315,7 @@ checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-io-timeout", ] @@ -2320,7 +2328,7 @@ dependencies = [ "bytes 1.4.0", "hyper", "native-tls", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-native-tls", ] @@ -2555,7 +2563,7 @@ dependencies = [ "rustls-native-certs", "soketto", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-rustls", "tokio-util 0.7.7", "tracing", @@ -2586,7 +2594,7 @@ dependencies = [ "serde_json", "soketto", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", "tracing", "wasm-bindgen-futures", ] @@ -2606,7 +2614,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", "tracing", ] @@ -2638,7 +2646,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-stream", "tokio-util 0.7.7", "tower", @@ -2932,7 +2940,7 @@ dependencies = [ "portable-atomic", "quanta", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", "tracing", ] @@ -3047,7 +3055,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "libc", "static_assertions", @@ -3282,7 +3290,7 @@ version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3369,7 +3377,7 @@ dependencies = [ "prost-build", "reqwest", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", "tonic", "tonic-build", ] @@ -3722,7 +3730,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "concurrent-queue", "libc", @@ -3820,7 +3828,7 @@ version = "1.0.0" dependencies = [ "metrics", "metrics-exporter-prometheus", - "tokio 1.26.0", + "tokio 1.28.0", "vlog", "zksync_config", ] @@ -3881,7 +3889,7 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "api", "bincode", @@ -4134,7 +4142,7 @@ version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4174,7 +4182,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4190,9 +4198,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "cce168fea28d3e05f158bda4576cf0c844d5045bc2cc3620fa0292ed5bb5814c" dependencies = [ "aho-corasick", "memchr", @@ -4210,9 +4218,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "reqwest" @@ -4245,7 +4253,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-native-tls", "tokio-rustls", "tokio-util 0.7.7", @@ -4395,7 +4403,7 @@ version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", @@ -4552,7 +4560,7 @@ version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -4595,7 +4603,7 @@ dependencies = [ "sentry-core", "sentry-debug-images", "sentry-panic", - "tokio 1.26.0", + "tokio 1.28.0", "ureq", ] @@ -4993,7 +5001,7 @@ dependencies = [ "atoi", "base64 0.13.1", "bigdecimal", - "bitflags", + "bitflags 1.3.2", "byteorder", "bytes 1.4.0", "chrono", @@ -5170,15 +5178,25 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +dependencies = [ + "proc-macro2 1.0.52", + "quote 1.0.26", + "unicode-ident", +] + [[package]] name = "sync_vm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" +version = "1.3.2" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" dependencies = [ "arrayvec 0.7.2", "cs_derive", "derivative", - "eip712-signature", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", "hex", "itertools", @@ -5358,14 +5376,13 @@ dependencies = [ [[package]] name = "tokio" -version = "1.26.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" dependencies = [ "autocfg 1.1.0", "bytes 1.4.0", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", @@ -5373,7 +5390,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -5383,18 +5400,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.0", ] [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.52", "quote 1.0.26", - "syn 1.0.109", + "syn 2.0.12", ] [[package]] @@ -5404,7 +5421,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", - "tokio 1.26.0", + "tokio 1.28.0", ] [[package]] @@ -5414,7 +5431,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", - "tokio 1.26.0", + "tokio 1.28.0", "webpki", ] @@ -5426,7 +5443,7 @@ checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.0", ] [[package]] @@ -5440,7 +5457,7 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.0", ] [[package]] @@ -5454,7 +5471,7 @@ dependencies = [ "futures-io", "futures-sink", "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.0", "tracing", ] @@ -5507,7 +5524,7 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-stream", "tokio-util 0.6.10", "tower", @@ -5542,7 +5559,7 @@ dependencies = [ "pin-project-lite 0.2.9", "rand 0.8.5", "slab", - "tokio 1.26.0", + "tokio 1.28.0", "tokio-util 0.7.7", "tower-layer", "tower-service", @@ -6104,13 +6121,13 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -6119,7 +6136,16 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] @@ -6128,13 +6154,28 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] @@ -6143,42 +6184,84 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winnow" version = "0.3.5" @@ -6211,25 +6294,21 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zk_evm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#c3d405d2a45526d87a803792745297860a19916b" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.2#397683815115d21c6f9d314463b1ffaafdfc1951" dependencies = [ - "blake2 0.10.6", - "k256", "lazy_static", "num 0.4.0", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", "static_assertions", "zkevm_opcode_defs", ] [[package]] name = "zkevm-assembly" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#0ddd7e70d8d3d13725a937ec1553c8088fb61fda" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#77a55f8427a2b44a19e213c06440da5248edbd2c" dependencies = [ "env_logger 0.9.3", "hex", @@ -6248,22 +6327,24 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#bb7888c83599bb9ee98041abea11f6524556d4e9" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#261b48e9369b356bbd65023d20227b45b47915a2" dependencies = [ - "bitflags", + "bitflags 2.3.1", + "blake2 0.10.6", "ethereum-types", + "k256", "lazy_static", "sha2 0.10.6", + "sha3 0.10.6", ] [[package]] name = "zkevm_test_harness" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.1#2a568f7bdbd61cdf389370be1162977d93ed9625" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#1364026143d4060550130dc3f644ea74ee245441" dependencies = [ "bincode", - "blake2 0.10.6", "circuit_testing", "codegen 0.2.0", "crossbeam 0.8.2", @@ -6276,8 +6357,6 @@ dependencies = [ "rayon", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", "smallvec", "structopt", "sync_vm", @@ -6299,14 +6378,16 @@ dependencies = [ name = "zksync_circuit_breaker" version = "1.0.0" dependencies = [ + "assert_matches", "async-trait", + "backon", "convert_case 0.6.0", "futures 0.3.27", "hex", "serde", "serde_json", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", "zksync_config", "zksync_contracts", "zksync_dal", @@ -6328,6 +6409,7 @@ dependencies = [ "serde_json", "url", "zksync_basic_types", + "zksync_contracts", "zksync_utils", ] @@ -6378,7 +6460,7 @@ dependencies = [ "vm", "zksync_config", "zksync_contracts", - "zksync_object_store", + "zksync_health_check", "zksync_state", "zksync_storage", "zksync_types", @@ -6398,7 +6480,7 @@ dependencies = [ "parity-crypto", "serde", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", "vlog", "zksync_config", "zksync_contracts", @@ -6424,6 +6506,10 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_health_check" +version = "0.1.0" + [[package]] name = "zksync_mini_merkle_tree" version = "1.0.0" @@ -6438,12 +6524,13 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ + "bincode", "google-cloud-auth", "google-cloud-default", "google-cloud-storage", "http", "metrics", - "tokio 1.26.0", + "tokio 1.28.0", "vlog", "zksync_config", "zksync_types", @@ -6470,12 +6557,13 @@ dependencies = [ "serde_json", "setup_key_generator_and_server", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", "vlog", "zkevm_test_harness", "zksync_circuit_breaker", "zksync_config", "zksync_dal", + "zksync_eth_client", "zksync_object_store", "zksync_prover_utils", "zksync_types", @@ -6487,8 +6575,12 @@ dependencies = [ name = "zksync_prover_utils" version = "1.0.0" dependencies = [ + "ctrlc", + "futures 0.3.27", "metrics", + "regex", "reqwest", + "tokio 1.28.0", "vlog", "zksync_config", "zksync_utils", @@ -6498,6 +6590,7 @@ dependencies = [ name = "zksync_state" version = "1.0.0" dependencies = [ + "metrics", "vlog", "zksync_storage", "zksync_types", @@ -6560,11 +6653,13 @@ dependencies = [ "envy", "futures 0.3.27", "hex", + "itertools", "num 0.3.1", "reqwest", "serde", "thiserror", - "tokio 1.26.0", + "tokio 1.28.0", + "vlog", "zk_evm", "zksync_basic_types", ] diff --git a/core/bin/prover/Cargo.toml b/core/bin/prover/Cargo.toml index 807cef8532d0..8cbc06e73b92 100644 --- a/core/bin/prover/Cargo.toml +++ b/core/bin/prover/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -17,6 +17,7 @@ zksync_config = { path = "../../lib/config", version = "1.0" } zksync_utils = {path = "../../lib/utils", version = "1.0" } zksync_prover_utils = {path = "../../lib/prover_utils", version = "1.0" } zksync_circuit_breaker = {path = "../../lib/circuit_breaker", version = "1.0" } +zksync_eth_client = {path = "../../lib/eth_client", version = "1.0" } prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server", version = "1.0" } @@ -24,10 +25,11 @@ zksync_object_store = { path = "../../lib/object_store", version = "1.0" } setup_key_generator_and_server = { path = "../setup_key_generator_and_server", version = "1.0" } -api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "main", features=["gpu"], default-features=false} -prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "main", features=["gpu"], default-features=false} +api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["gpu"], default-features=false} +prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["gpu"], default-features=false} + +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.2"} -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "main"} tokio = { version = "1", features = ["time"] } futures = { version = "0.3", features = ["compat"] } diff --git a/core/bin/prover/src/artifact_provider.rs b/core/bin/prover/src/artifact_provider.rs index a12819b25b7f..9122fe56a5b2 100644 --- a/core/bin/prover/src/artifact_provider.rs +++ b/core/bin/prover/src/artifact_provider.rs @@ -7,6 +7,7 @@ use zksync_verification_key_server::get_vk_for_circuit_type; #[derive(Debug)] pub struct ProverArtifactProvider; + impl ArtifactProvider for ProverArtifactProvider { type ArtifactError = String; diff --git a/core/bin/prover/src/main.rs b/core/bin/prover/src/main.rs index ad3f75441149..74938dd98426 100644 --- a/core/bin/prover/src/main.rs +++ b/core/bin/prover/src/main.rs @@ -1,13 +1,12 @@ -use std::cell::RefCell; use std::env; use std::sync::{Arc, Mutex}; use api::gpu_prover; -use futures::{channel::mpsc, executor::block_on, future, SinkExt, StreamExt}; +use futures::future; use local_ip_address::local_ip; use prover_service::run_prover::run_prover_with_remote_synthesizer; use queues::Buffer; -use tokio::task::JoinHandle; +use tokio::{sync::oneshot, task::JoinHandle}; use zksync_circuit_breaker::{vks::VksChecker, CircuitBreakerChecker}; use zksync_config::configs::prover_group::ProverGroupConfig; @@ -17,11 +16,13 @@ use zksync_config::{ }; use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; use zksync_dal::ConnectionPool; +use zksync_eth_client::clients::http::PKSigningClient; +use zksync_object_store::ObjectStoreFactory; +use zksync_prover_utils::region_fetcher::{get_region, get_zone}; use crate::artifact_provider::ProverArtifactProvider; use crate::prover::ProverReporter; use crate::prover_params::ProverParams; -use zksync_prover_utils::region_fetcher::get_region; use crate::socket_listener::incoming_socket_listener; use crate::synthesized_circuit_provider::SynthesizedCircuitProvider; @@ -34,11 +35,11 @@ mod synthesized_circuit_provider; pub async fn wait_for_tasks(task_futures: Vec>) { match future::select_all(task_futures).await.0 { Ok(_) => { - graceful_shutdown(); + graceful_shutdown().await; vlog::info!("One of the actors finished its run, while it wasn't expected to do it"); } Err(error) => { - graceful_shutdown(); + graceful_shutdown().await; vlog::info!( "One of the tokio actors unexpectedly finished with error: {:?}", error @@ -47,18 +48,17 @@ pub async fn wait_for_tasks(task_futures: Vec>) { } } -fn graceful_shutdown() { +async fn graceful_shutdown() { let pool = ConnectionPool::new(Some(1), true); let host = local_ip().expect("Failed obtaining local IP address"); let port = ProverConfigs::from_env().non_gpu.assembly_receiver_port; - let address = SocketAddress { - host, - port, - }; + let region = get_region().await; + let zone = get_zone().await; + let address = SocketAddress { host, port }; pool.clone() .access_storage_blocking() .gpu_prover_queue_dal() - .update_prover_instance_status(address, GpuProverInstanceStatus::Dead, 0); + .update_prover_instance_status(address, GpuProverInstanceStatus::Dead, 0, region, zone); } fn get_ram_per_gpu() -> u64 { @@ -68,29 +68,35 @@ fn get_ram_per_gpu() -> u64 { ram_in_gb } -fn get_prover_config_for_machine_type() -> ProverConfig { +fn get_prover_config_for_machine_type() -> (ProverConfig, u8) { let prover_configs = ProverConfigs::from_env(); - let actual_num_gpus = gpu_prover::cuda_bindings::devices().unwrap() as usize; + let actual_num_gpus = match gpu_prover::cuda_bindings::devices() { + Ok(gpus) => gpus as u8, + Err(err) => { + vlog::error!("unable to get number of GPUs: {err:?}"); + panic!("unable to get number of GPUs: {:?}", err); + } + }; vlog::info!("detected number of gpus: {}", actual_num_gpus); let ram_in_gb = get_ram_per_gpu(); match actual_num_gpus { 1 => { vlog::info!("Detected machine type with 1 GPU and 80GB RAM"); - prover_configs.one_gpu_eighty_gb_mem + (prover_configs.one_gpu_eighty_gb_mem, actual_num_gpus) } 2 => { if ram_in_gb > 39 { vlog::info!("Detected machine type with 2 GPU and 80GB RAM"); - prover_configs.two_gpu_eighty_gb_mem + (prover_configs.two_gpu_eighty_gb_mem, actual_num_gpus) } else { vlog::info!("Detected machine type with 2 GPU and 40GB RAM"); - prover_configs.two_gpu_forty_gb_mem + (prover_configs.two_gpu_forty_gb_mem, actual_num_gpus) } } 4 => { vlog::info!("Detected machine type with 4 GPU and 80GB RAM"); - prover_configs.four_gpu_eighty_gb_mem + (prover_configs.four_gpu_eighty_gb_mem, actual_num_gpus) } _ => panic!("actual_num_gpus: {} not supported yet", actual_num_gpus), } @@ -100,7 +106,7 @@ fn get_prover_config_for_machine_type() -> ProverConfig { async fn main() { let sentry_guard = vlog::init(); let config = ZkSyncConfig::from_env(); - let prover_config = get_prover_config_for_machine_type(); + let (prover_config, num_gpu) = get_prover_config_for_machine_type(); let prometheus_config = PrometheusConfig { listener_port: prover_config.prometheus_port, ..ApiConfig::from_env().prometheus @@ -114,17 +120,16 @@ async fn main() { None => vlog::info!("No sentry url configured"), } let region = get_region().await; + let zone = get_zone().await; - let (stop_signal_sender, mut stop_signal_receiver) = mpsc::channel(256); - - { - let stop_signal_sender = RefCell::new(stop_signal_sender.clone()); - ctrlc::set_handler(move || { - let mut sender = stop_signal_sender.borrow_mut(); - block_on(sender.send(true)).expect("Ctrl+C signal send"); - }) - .expect("Error setting Ctrl+C handler"); - } + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .expect("Error setting Ctrl+C handler"); zksync_prover_utils::ensure_initial_setup_keys_present( &prover_config.initial_setup_key_path, @@ -132,8 +137,12 @@ async fn main() { ); env::set_var("CRS_FILE", prover_config.initial_setup_key_path.clone()); + let eth_client = PKSigningClient::from_config(&config); let circuit_breaker_checker = CircuitBreakerChecker::new( - vec![Box::new(VksChecker::new(&config))], + vec![Box::new(VksChecker::new( + &config.chain.circuit_breaker, + eth_client, + ))], &config.chain.circuit_breaker, ); circuit_breaker_checker @@ -148,7 +157,11 @@ async fn main() { let circuit_ids = ProverGroupConfig::from_env() .get_circuit_ids_for_group_id(prover_config.specialized_prover_group_id); - vlog::info!("Starting proof generation for circuits: {:?} in region: {} with group-id: {}", circuit_ids, region, prover_config.specialized_prover_group_id); + vlog::info!( + "Starting proof generation for circuits: {circuit_ids:?} \ + in region: {region} and zone: {zone} with group-id: {}", + prover_config.specialized_prover_group_id + ); let mut tasks: Vec> = vec![]; tasks.push(prometheus_exporter::run_prometheus_exporter( @@ -168,33 +181,34 @@ async fn main() { host: local_ip, port: prover_config.assembly_receiver_port, }; - let synthesized_circuit_provider = - SynthesizedCircuitProvider::new(consumer, ConnectionPool::new(Some(1), true), address); + let synthesized_circuit_provider = SynthesizedCircuitProvider::new( + consumer, + ConnectionPool::new(Some(1), true), + address, + region.clone(), + zone.clone(), + ); vlog::info!("local IP address is: {:?}", local_ip); tasks.push(tokio::task::spawn(incoming_socket_listener( local_ip, prover_config.assembly_receiver_port, - prover_config.assembly_receiver_poll_time_in_millis, producer, ConnectionPool::new(Some(1), true), prover_config.specialized_prover_group_id, - region + region, + zone, + num_gpu, ))); - let artifact_provider = ProverArtifactProvider {}; - let prover_job_reporter = ProverReporter { - pool: ConnectionPool::new(Some(1), true), - config: prover_config.clone(), - processed_by: env::var("POD_NAME").unwrap_or("Unknown".to_string()), - }; - - let params: ProverParams = prover_config.clone().into(); + let params = ProverParams::new(&prover_config); + let store_factory = ObjectStoreFactory::from_env(); + let prover_job_reporter = ProverReporter::new(prover_config, &store_factory); tasks.push(tokio::task::spawn_blocking(move || { - run_prover_with_remote_synthesizer::<_, _, _, _>( + run_prover_with_remote_synthesizer( synthesized_circuit_provider, - artifact_provider, + ProverArtifactProvider, prover_job_reporter, circuit_ids, params, @@ -202,11 +216,11 @@ async fn main() { })); tokio::select! { - _ = async { wait_for_tasks(tasks).await } => {}, - _ = async { stop_signal_receiver.next().await } => { + _ = wait_for_tasks(tasks) => {}, + _ = stop_signal_receiver => { vlog::info!("Stop signal received, shutting down"); }, - error = async { cb_receiver.await } => { + error = cb_receiver => { if let Ok(error_msg) = error { vlog::warn!("Circuit breaker received, shutting down. Reason: {}", error_msg); } diff --git a/core/bin/prover/src/prover.rs b/core/bin/prover/src/prover.rs index a38380a43f94..00474aca669c 100644 --- a/core/bin/prover/src/prover.rs +++ b/core/bin/prover/src/prover.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{env, time::Duration}; use prover_service::JobResult::{Failure, ProofGenerated}; use prover_service::{JobReporter, JobResult}; @@ -7,20 +7,30 @@ use zkevm_test_harness::pairing::bn256::Bn256; use zksync_config::ProverConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::object_store::{create_object_store_from_env, PROVER_JOBS_BUCKET_PATH}; +use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory}; #[derive(Debug)] pub struct ProverReporter { - pub(crate) pool: ConnectionPool, - pub(crate) config: ProverConfig, - pub(crate) processed_by: String, + pool: ConnectionPool, + config: ProverConfig, + processed_by: String, + object_store: Box, } -pub fn assembly_debug_blob_url(job_id: usize, circuit_id: u8) -> String { +fn assembly_debug_blob_url(job_id: usize, circuit_id: u8) -> String { format!("assembly_debugging_{}_{}.bin", job_id, circuit_id) } impl ProverReporter { + pub(crate) fn new(config: ProverConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + pool: ConnectionPool::new(Some(1), true), + config, + processed_by: env::var("POD_NAME").unwrap_or("Unknown".to_string()), + object_store: store_factory.create_store(), + } + } + fn handle_successful_proof_generation( &self, job_id: usize, @@ -31,26 +41,22 @@ impl ProverReporter { let circuit_type = self.get_circuit_type(job_id); let serialized = bincode::serialize(&proof).expect("Failed to serialize proof"); vlog::info!( - "Successfully generated proof with id {:?} and type: {} for index: {}. Size: {:?}KB took: {}", + "Successfully generated proof with id {:?} and type: {} for index: {}. Size: {:?}KB took: {:?}", job_id, - circuit_type.clone(), + circuit_type, index, serialized.len() >> 10, - duration.as_secs() as f64, + duration, ); metrics::histogram!( "server.prover.proof_generation_time", - duration.as_secs() as f64, + duration, "circuit_type" => circuit_type, ); let job_id = job_id as u32; let mut connection = self.pool.access_storage_blocking(); let mut transaction = connection.start_transaction_blocking(); - // Lock `prover_jobs` table. - // It is needed to have only one transaction at the moment - // that calls `successful_proofs_count` method to avoid race condition. - transaction.prover_dal().lock_prover_jobs_table_exclusive(); transaction .prover_dal() .save_proof(job_id, duration, serialized, &self.processed_by); @@ -59,38 +65,7 @@ impl ProverReporter { .get_prover_job_by_id(job_id) .unwrap_or_else(|| panic!("No job with id: {} exist", job_id)); - if let Some(next_round) = prover_job_metadata.aggregation_round.next() { - // for Basic, Leaf and Node rounds we need to mark the next job as `queued` - // if all the dependent proofs are computed - - let successful_proofs_count = transaction.prover_dal().successful_proofs_count( - prover_job_metadata.block_number, - prover_job_metadata.aggregation_round, - ); - - let required_proofs_count = transaction - .witness_generator_dal() - .required_proofs_count(prover_job_metadata.block_number, next_round); - - vlog::info!( - "Generated {}/{} {:?} circuits of block {:?}", - successful_proofs_count, - required_proofs_count, - prover_job_metadata.aggregation_round, - prover_job_metadata.block_number.0 - ); - - if successful_proofs_count == required_proofs_count { - vlog::info!( - "Marking {:?} job for l1 batch number {:?} as queued", - next_round, - prover_job_metadata.block_number - ); - transaction - .witness_generator_dal() - .mark_witness_job_as_queued(prover_job_metadata.block_number, next_round); - } - } else { + if prover_job_metadata.aggregation_round.next().is_none() { let block = transaction .blocks_dal() .get_block_header(prover_job_metadata.block_number) @@ -119,7 +94,7 @@ impl JobReporter for ProverReporter { fn send_report(&mut self, report: JobResult) { match report { Failure(job_id, error) => { - vlog::info!( + vlog::error!( "Failed to generate proof for id {:?}. error reason; {}", job_id, error @@ -135,44 +110,48 @@ impl JobReporter for ProverReporter { JobResult::Synthesized(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); - vlog::info!( - "Successfully synthesized circuit with id {:?} and type: {}. took: {}", + vlog::trace!( + "Successfully synthesized circuit with id {:?} and type: {}. took: {:?}", job_id, - circuit_type.clone(), - duration.as_secs() as f64, + circuit_type, + duration, ); metrics::histogram!( "server.prover.circuit_synthesis_time", - duration.as_secs() as f64, + duration, "circuit_type" => circuit_type, ); } JobResult::AssemblyFinalized(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); - vlog::info!( - "Successfully finalized assembly with id {:?} and type: {}. took: {}", + vlog::trace!( + "Successfully finalized assembly with id {:?} and type: {}. took: {:?}", job_id, - circuit_type.clone(), - duration.as_secs() as f64, + circuit_type, + duration, ); metrics::histogram!( "server.prover.assembly_finalize_time", - duration.as_secs() as f64, + duration, "circuit_type" => circuit_type, ); } JobResult::SetupLoaded(job_id, duration, cache_miss) => { let circuit_type = self.get_circuit_type(job_id); - vlog::info!( - "Successfully setup loaded with id {:?} and type: {}. took: {:?} and had cache_miss: {}", + vlog::trace!( + "Successfully setup loaded with id {:?} and type: {}. \ + took: {:?} and had cache_miss: {}", job_id, - circuit_type.clone(), - duration.as_secs() as f64, + circuit_type, + duration, cache_miss ); - metrics::histogram!("server.prover.setup_load_time", duration.as_secs() as f64, - "circuit_type" => circuit_type.clone(),); + metrics::histogram!( + "server.prover.setup_load_time", + duration, + "circuit_type" => circuit_type.clone() + ); metrics::counter!( "server.prover.setup_loading_cache_miss", 1, @@ -181,83 +160,73 @@ impl JobReporter for ProverReporter { } JobResult::AssemblyEncoded(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); - vlog::info!( - "Successfully encoded assembly with id {:?} and type: {}. took: {}", + vlog::trace!( + "Successfully encoded assembly with id {:?} and type: {}. took: {:?}", job_id, - circuit_type.clone(), - duration.as_secs() as f64, + circuit_type, + duration, ); metrics::histogram!( "server.prover.assembly_encoding_time", - duration.as_secs() as f64, + duration, "circuit_type" => circuit_type, ); } JobResult::AssemblyDecoded(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); - vlog::info!( - "Successfully decoded assembly with id {:?} and type: {}. took: {}", + vlog::trace!( + "Successfully decoded assembly with id {:?} and type: {}. took: {:?}", job_id, - circuit_type.clone(), - duration.as_secs() as f64, + circuit_type, + duration, ); metrics::histogram!( "server.prover.assembly_decoding_time", - duration.as_secs() as f64, + duration, "circuit_type" => circuit_type, ); } JobResult::FailureWithDebugging(job_id, circuit_id, assembly, error) => { - let mut object_store = create_object_store_from_env(); - vlog::info!( + vlog::trace!( "Failed assembly decoding for job-id {} and circuit-type: {}. error: {}", job_id, circuit_id, error, ); let blob_url = assembly_debug_blob_url(job_id, circuit_id); - object_store - .put(PROVER_JOBS_BUCKET_PATH, blob_url, assembly) + self.object_store + .put_raw(Bucket::ProverJobs, &blob_url, assembly) .expect("Failed saving debug assembly to GCS"); } JobResult::AssemblyTransferred(job_id, duration) => { let circuit_type = self.get_circuit_type(job_id); - vlog::info!( - "Successfully transferred assembly with id {:?} and type: {}. took: {}", + vlog::trace!( + "Successfully transferred assembly with id {:?} and type: {}. took: {:?}", job_id, - circuit_type.clone(), - duration.as_secs() as f64, + circuit_type, + duration, ); metrics::histogram!( "server.prover.assembly_transferring_time", - duration.as_secs() as f64, + duration, "circuit_type" => circuit_type, ); } JobResult::ProverWaitedIdle(prover_id, duration) => { - vlog::info!( - "Prover wait idle time: {} for prover-id: {:?}", - duration.as_secs() as f64, + vlog::trace!( + "Prover wait idle time: {:?} for prover-id: {:?}", + duration, prover_id ); - metrics::histogram!( - "server.prover.prover_wait_idle_time", - duration.as_secs() as f64, - ); + metrics::histogram!("server.prover.prover_wait_idle_time", duration,); } JobResult::SetupLoaderWaitedIdle(duration) => { - vlog::info!("Setup load wait idle time: {}", duration.as_secs() as f64,); - metrics::histogram!( - "server.prover.setup_load_wait_wait_idle_time", - duration.as_secs() as f64, - ); + vlog::trace!("Setup load wait idle time: {:?}", duration); + metrics::histogram!("server.prover.setup_load_wait_wait_idle_time", duration,); } JobResult::SchedulerWaitedIdle(duration) => { - vlog::info!("Scheduler wait idle time: {}", duration.as_secs() as f64,); - metrics::histogram!( - "server.prover.scheduler_wait_idle_time", - duration.as_secs() as f64, - ); + vlog::trace!("Scheduler wait idle time: {:?}", duration); + metrics::histogram!("server.prover.scheduler_wait_idle_time", duration,); } } } diff --git a/core/bin/prover/src/prover_params.rs b/core/bin/prover/src/prover_params.rs index 112b71c4428d..558e9058ed61 100644 --- a/core/bin/prover/src/prover_params.rs +++ b/core/bin/prover/src/prover_params.rs @@ -11,9 +11,9 @@ pub struct ProverParams { number_of_setup_slots: u8, } -impl From for ProverParams { - fn from(config: ProverConfig) -> Self { - ProverParams { +impl ProverParams { + pub(crate) fn new(config: &ProverConfig) -> Self { + Self { number_of_threads: config.number_of_threads as u8, polling_duration: Duration::from_millis(config.polling_duration_in_millis), number_of_setup_slots: config.number_of_setup_slots, @@ -23,7 +23,7 @@ impl From for ProverParams { impl Params for ProverParams { fn number_of_parallel_synthesis(&self) -> u8 { - self.number_of_threads as u8 + self.number_of_threads } fn number_of_setup_slots(&self) -> u8 { diff --git a/core/bin/prover/src/socket_listener.rs b/core/bin/prover/src/socket_listener.rs index 3f251cf7dca9..4da5f4a9e9ba 100644 --- a/core/bin/prover/src/socket_listener.rs +++ b/core/bin/prover/src/socket_listener.rs @@ -2,19 +2,20 @@ use crate::synthesized_circuit_provider::SharedAssemblyQueue; use queues::IsQueue; use std::io::copy; use std::net::{IpAddr, SocketAddr, TcpListener, TcpStream}; -use std::time::{Duration, Instant}; -use tokio::time::sleep; +use std::time::Instant; use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; use zksync_dal::ConnectionPool; +#[allow(clippy::too_many_arguments)] pub async fn incoming_socket_listener( host: IpAddr, port: u16, - poll_time_in_millis: u64, queue: SharedAssemblyQueue, pool: ConnectionPool, specialized_prover_group_id: u8, region: String, + zone: String, + num_gpu: u8, ) { let listening_address = SocketAddr::new(host, port); vlog::info!( @@ -26,24 +27,36 @@ pub async fn incoming_socket_listener( .unwrap_or_else(|_| panic!("Failed binding address: {:?}", listening_address)); let address = SocketAddress { host, port }; - pool.clone() - .access_storage_blocking() + pool.access_storage_blocking() .gpu_prover_queue_dal() .insert_prover_instance( address.clone(), queue.lock().unwrap().capacity(), specialized_prover_group_id, - region + region.clone(), + zone.clone(), + num_gpu, ); - loop { - match listener.incoming().next() { - Some(stream) => { - let stream = stream.expect("Stream closed early"); - handle_incoming_file(stream, queue.clone(), pool.clone(), address.clone()); - } - None => sleep(Duration::from_millis(poll_time_in_millis)).await, - } + let mut now = Instant::now(); + + for stream in listener.incoming() { + vlog::trace!( + "Received new assembly send connection, waited for {}ms.", + now.elapsed().as_millis() + ); + + let stream = stream.expect("Stream closed early"); + handle_incoming_file( + stream, + queue.clone(), + pool.clone(), + address.clone(), + region.clone(), + zone.clone(), + ); + + now = Instant::now(); } } @@ -52,12 +65,14 @@ fn handle_incoming_file( queue: SharedAssemblyQueue, pool: ConnectionPool, address: SocketAddress, + region: String, + zone: String, ) { let mut assembly: Vec = vec![]; let started_at = Instant::now(); copy(&mut stream, &mut assembly).expect("Failed reading from stream"); let file_size_in_gb = assembly.len() / (1024 * 1024 * 1024); - vlog::info!( + vlog::trace!( "Read file of size: {}GB from stream took: {} seconds", file_size_in_gb, started_at.elapsed().as_secs() @@ -73,12 +88,13 @@ fn handle_incoming_file( GpuProverInstanceStatus::Available }; - pool.clone() - .access_storage_blocking() + pool.access_storage_blocking() .gpu_prover_queue_dal() .update_prover_instance_status( address, status, assembly_queue.capacity() - assembly_queue.size(), + region, + zone, ); } diff --git a/core/bin/prover/src/synthesized_circuit_provider.rs b/core/bin/prover/src/synthesized_circuit_provider.rs index 811e78ee3190..1424ee9e7a24 100644 --- a/core/bin/prover/src/synthesized_circuit_provider.rs +++ b/core/bin/prover/src/synthesized_circuit_provider.rs @@ -4,6 +4,7 @@ use std::sync::{Arc, Mutex}; use prover_service::RemoteSynthesizer; use queues::{Buffer, IsQueue}; + use zksync_dal::gpu_prover_queue_dal::SocketAddress; use zksync_dal::ConnectionPool; @@ -13,14 +14,24 @@ pub struct SynthesizedCircuitProvider { queue: SharedAssemblyQueue, pool: ConnectionPool, address: SocketAddress, + region: String, + zone: String, } impl SynthesizedCircuitProvider { - pub fn new(queue: SharedAssemblyQueue, pool: ConnectionPool, address: SocketAddress) -> Self { + pub fn new( + queue: SharedAssemblyQueue, + pool: ConnectionPool, + address: SocketAddress, + region: String, + zone: String, + ) -> Self { Self { queue, pool, address, + region, + zone, } } } @@ -34,15 +45,16 @@ impl RemoteSynthesizer for SynthesizedCircuitProvider { let queue_free_slots = assembly_queue.capacity() - assembly_queue.size(); if is_full { self.pool - .clone() .access_storage_blocking() .gpu_prover_queue_dal() .update_prover_instance_from_full_to_available( self.address.clone(), queue_free_slots, + self.region.clone(), + self.zone.clone(), ); } - vlog::info!( + vlog::trace!( "Queue free slot {} for capacity {}", queue_free_slots, assembly_queue.capacity() diff --git a/core/bin/setup_key_generator_and_server/Cargo.lock b/core/bin/setup_key_generator_and_server/Cargo.lock index 6e50836f3760..cf2cbde931d9 100644 --- a/core/bin/setup_key_generator_and_server/Cargo.lock +++ b/core/bin/setup_key_generator_and_server/Cargo.lock @@ -118,7 +118,7 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -276,7 +276,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bellman_ce" version = "0.3.2" -source = "git+https://github.com/matter-labs/bellman?branch=dev#3aa6226d04d60c539ff4ee480479ac5b92871a20" +source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ "arrayvec 0.7.2", "bit-vec", @@ -323,7 +323,7 @@ version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "clap", @@ -355,6 +355,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84" + [[package]] name = "bitvec" version = "0.20.4" @@ -401,7 +407,7 @@ dependencies = [ [[package]] name = "blake2s_const" version = "0.6.0" -source = "git+https://github.com/matter-labs/bellman?branch=dev#3aa6226d04d60c539ff4ee480479ac5b92871a20" +source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ "arrayref", "arrayvec 0.5.2", @@ -564,7 +570,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap", "unicode-width", @@ -577,7 +583,7 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -848,7 +854,7 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" dependencies = [ "proc-macro-error", "proc-macro2 1.0.52", @@ -1021,16 +1027,6 @@ dependencies = [ "signature", ] -[[package]] -name = "eip712-signature" -version = "0.1.0" -source = "git+https://github.com/vladbochok/eip712-signature#30b11455e7d613313e8c12d2aad961fd4bf902fe" -dependencies = [ - "ethereum-types", - "parity-crypto", - "thiserror", -] - [[package]] name = "either" version = "1.8.1" @@ -1496,7 +1492,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gpu-ffi" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "bindgen", "crossbeam 0.8.2", @@ -1509,7 +1505,7 @@ dependencies = [ [[package]] name = "gpu-prover" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -1579,7 +1575,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bytes 1.4.0", "headers-core", "http", @@ -2373,7 +2369,7 @@ version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "foreign-types", "libc", @@ -2874,7 +2870,7 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" dependencies = [ "api", "bincode", @@ -3136,7 +3132,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -3313,7 +3309,7 @@ version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", @@ -3458,7 +3454,7 @@ version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -3888,13 +3884,12 @@ dependencies = [ [[package]] name = "sync_vm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" +version = "1.3.2" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" dependencies = [ "arrayvec 0.7.2", "cs_derive", "derivative", - "eip712-signature", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", "hex", "itertools", @@ -4834,25 +4829,21 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zk_evm" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#c3d405d2a45526d87a803792745297860a19916b" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.2#397683815115d21c6f9d314463b1ffaafdfc1951" dependencies = [ - "blake2 0.10.6", - "k256", "lazy_static", "num 0.4.0", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", "static_assertions", "zkevm_opcode_defs", ] [[package]] name = "zkevm-assembly" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#0ddd7e70d8d3d13725a937ec1553c8088fb61fda" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#77a55f8427a2b44a19e213c06440da5248edbd2c" dependencies = [ "env_logger 0.9.3", "hex", @@ -4871,22 +4862,24 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#bb7888c83599bb9ee98041abea11f6524556d4e9" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#261b48e9369b356bbd65023d20227b45b47915a2" dependencies = [ - "bitflags", + "bitflags 2.3.1", + "blake2 0.10.6", "ethereum-types", + "k256", "lazy_static", "sha2 0.10.6", + "sha3 0.10.6", ] [[package]] name = "zkevm_test_harness" -version = "1.3.1" -source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.1#2a568f7bdbd61cdf389370be1162977d93ed9625" +version = "1.3.2" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#1364026143d4060550130dc3f644ea74ee245441" dependencies = [ "bincode", - "blake2 0.10.6", "circuit_testing", "codegen 0.2.0", "crossbeam 0.8.2", @@ -4899,8 +4892,6 @@ dependencies = [ "rayon", "serde", "serde_json", - "sha2 0.10.6", - "sha3 0.10.6", "smallvec", "structopt", "sync_vm", @@ -4930,6 +4921,7 @@ dependencies = [ "serde_json", "url", "zksync_basic_types", + "zksync_contracts", "zksync_utils", ] @@ -5011,11 +5003,13 @@ dependencies = [ "envy", "futures", "hex", + "itertools", "num 0.3.1", "reqwest", "serde", "thiserror", "tokio 1.26.0", + "vlog", "zk_evm", "zksync_basic_types", ] diff --git a/core/bin/setup_key_generator_and_server/Cargo.toml b/core/bin/setup_key_generator_and_server/Cargo.toml index 0e714b389f16..6c4340160f46 100644 --- a/core/bin/setup_key_generator_and_server/Cargo.toml +++ b/core/bin/setup_key_generator_and_server/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -23,9 +23,10 @@ vlog = { path = "../../lib/vlog", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } circuit_testing = {git = "https://github.com/matter-labs/era-circuit_testing.git", branch = "main"} -api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "main", features=["gpu"], default-features=false} -prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "main", features=["gpu"], default-features=false} -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "main"} +api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["gpu"], default-features=false} +prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["gpu"], default-features=false} +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.2"} + structopt = "0.3.26" itertools = "0.10.5" diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index ea7d4ad8d95d..b7c5c5c91042 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -129,7 +129,8 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst scope.raw( vec![ - "", + " ", + " ", ] .join("\n"), ); diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 592761a67116..9c27634c1ed5 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -11,7 +11,7 @@ use vm::{ BootloaderJobType, DerivedBlockContext, TxExecutionMode, }, zk_evm::{aux_structures::Timestamp, zkevm_opcode_defs::BOOTLOADER_HEAP_PAGE}, - OracleTools, + HistoryEnabled, OracleTools, }; use zksync_contracts::{ load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, BaseSystemContracts, @@ -157,7 +157,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let bootloader_balane_key = storage_key_for_eth_balance(&BOOTLOADER_ADDRESS); storage_ptr.set_value(&bootloader_balane_key, u256_to_h256(U256([0, 0, 1, 0]))); - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let bytecode = read_bootloader_test_code("transfer_test"); let hash = hash_bytecode(&bytecode); @@ -272,7 +272,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( storage_ptr.set_value(&tx_gas_price_key, u256_to_h256(U256([1, 0, 0, 0]))); } - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, @@ -293,7 +293,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( None, ); let tx_execution_result = vm - .execute_next_tx(u32::MAX) + .execute_next_tx(u32::MAX, false) .expect("Bootloader failed while processing transaction"); total_gas_refunded += tx_execution_result.gas_refunded; @@ -320,6 +320,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( 0, // The number of contracts deployed is irrelevant for our needs result.full_result.contracts_used, result.full_result.cycles_used, + result.full_result.computational_gas_used, ); VmSpentResourcesResult { diff --git a/core/bin/verification_key_generator_and_server/README.md b/core/bin/verification_key_generator_and_server/README.md new file mode 100644 index 000000000000..a3f9d6f7c768 --- /dev/null +++ b/core/bin/verification_key_generator_and_server/README.md @@ -0,0 +1,39 @@ +# Verification keys + +We currently have around 20 different circuits like: Scheduler, Leaf, KeccakPrecompile etc (for the full list - look at +CircuitType enum in sync_vm repo). + +Each such circuit requires a separate verification key. + +This create fulfills 2 roles: + +- it has the binaries that can generate the updated versions of the keys (for example if VM code changes) +- it provides the libraries that can be used by other components that need to use these keys (for example provers) - + behaving like a key server. + +Moreover, all these keys are submitted as code within the repo in `verification_XX_key.json` files. + +## zksync_verification_key_server + +This is the library that can be used by other components to fetch the verification key for a given circuit (using +`get_vk_for_circuit_type` function). + +## zksync_verification_key_generator + +The main binary that generates verification key for given circuits. Most of the heavy lifting is done by the +`create_vk_for_padding_size_log_2` method from circuit_testing repo. + +The results are writte to the `verification_XX_key.json` files in the current repository. + +## zksync_json_to_binary_vk_converter + +Converts the local json verification keys into the binary format (and stores them in the output directory). + +## zksync_commitment_generator + +This tool takes the 3 commitments (one for all the basic circuits, one for node and one for leaf), computed based on the +current verification keys - and updates the contract.toml config file (which is located in etc/env/base/contracts.toml). + +These commitments are later used in one of the circuit breakers - to compare their values to the commitments that L1 +contract holds (so that we can 'panic' properly - if we notice that our server's commitments differ from the L1 +contracts - which would result in failed L1 transactions). diff --git a/core/bin/verification_key_generator_and_server/data/verification_0_key.json b/core/bin/verification_key_generator_and_server/data/verification_0_key.json index f58dd8b77c96..c3262193a4fd 100644 --- a/core/bin/verification_key_generator_and_server/data/verification_0_key.json +++ b/core/bin/verification_key_generator_and_server/data/verification_0_key.json @@ -6,121 +6,121 @@ "gate_setup_commitments": [ { "x": [ - 6996639307385550831, - 17319912920749338549, - 13743098072687682109, - 647002596780190748 + 14745348174000482855, + 2839037062185937123, + 3369862715588854899, + 1495909583940713128 ], "y": [ - 14937211687798818996, - 3145678279503192900, - 2393479722380641473, - 1832345501773476397 + 6859454683840363585, + 11340551061368171664, + 9528805406487149561, + 3414144677220223705 ], "infinity": false }, { "x": [ - 17598614517615535171, - 848181507441828018, - 5850486576278841220, - 32839070190916981 + 9215749870136224396, + 18418669114332753377, + 13140219601461030180, + 2381098845928447331 ], "y": [ - 15638119861902952636, - 15990070541865097016, - 9938241278726680530, - 3253668464465009821 + 8834765081837029169, + 4424842234296363904, + 13294547557836067005, + 414624398145171890 ], "infinity": false }, { "x": [ - 15820142264037469263, - 8568150507675978934, - 15184738710625145743, - 2918079076708036341 + 2148575411987453084, + 16730180692461995258, + 12423475767707134837, + 3014264170083149730 ], "y": [ - 11416456459976315626, - 3402571070523181186, - 10711656809635850215, - 3458605442614241783 + 10870860158804422503, + 14060279526953529989, + 2266257082861680293, + 22356173050560284 ], "infinity": false }, { "x": [ - 3247185907660573884, - 8680893512058284381, - 15293663808369282826, - 254464349321261856 + 17803008042411335770, + 5713064950476621403, + 17979342410816871746, + 491265656076548841 ], "y": [ - 13343058922756376449, - 12317703331224821391, - 11806868346310958492, - 2684579758117701042 + 9823492080506672630, + 3637386621225409615, + 8776978043600973097, + 2514196809208915768 ], "infinity": false }, { "x": [ - 13800337384513968192, - 7175236405886576174, - 13570768133887085852, - 387814232289887216 + 3768479078383323179, + 16153057542709544671, + 10578964798085613273, + 2831188075764800753 ], "y": [ - 9885901018229566019, - 523828011130471196, - 1519861256484738763, - 2952234907395154335 + 2387514805820590694, + 15085489652142686165, + 8141513931186597223, + 1582376980242699819 ], "infinity": false }, { "x": [ - 18087005260147532945, - 17928529254779250966, - 15212090268560149518, - 2873489351192112694 + 5395455814671474247, + 5013790368139874617, + 8671649443504728767, + 839142828943885970 ], "y": [ - 9754679093955511335, - 15228868340103952717, - 7400010337361881225, - 2135269756274591123 + 11231626069154926735, + 5078347962234771017, + 17373886182204596447, + 513647957075879347 ], "infinity": false }, { "x": [ - 3911797286444259863, - 2822641825033566015, - 17971742906231839373, - 3182860756772797703 + 8940485327950054531, + 9156997542069636576, + 14316753178152000598, + 3357551869664255582 ], "y": [ - 12343345623334385235, - 285662075504430273, - 3999467118287193716, - 2995587335427070590 + 14102490706504125272, + 4494991810930729808, + 15532318871086968466, + 1537365238286274178 ], "infinity": false }, { "x": [ - 1905795732733198067, - 11557924895297067947, - 6594413365276397340, - 840436611656519074 + 13914906478277300859, + 6213896071228541481, + 4364409818367302306, + 659097390118096039 ], "y": [ - 5150542979696082917, - 5795660884865753383, - 17893634013801397437, - 2242493309707647496 + 7328372274594390887, + 2650332638498669615, + 15455628473476960005, + 3119379427019958230 ], "infinity": false } @@ -128,31 +128,31 @@ "gate_selectors_commitments": [ { "x": [ - 8185829286974780081, - 15004561001743944578, - 3280208794976361430, - 2047845739237089281 + 9438200511694036157, + 11094162170960057340, + 9123678872696723713, + 2950597355117190054 ], "y": [ - 13820263889085061406, - 4214367347259390974, - 2949054642305024413, - 1108871040338712662 + 6153972960518016517, + 8045683598100955864, + 13410633858416643489, + 988361678931464913 ], "infinity": false }, { "x": [ - 14580608619995336140, - 14569188170612185050, - 1922583079876338495, - 1008434097725400856 + 805964423710846142, + 13603470797942296854, + 11292123377140077447, + 1455913517812009773 ], "y": [ - 10727948391864029299, - 13894372243681777679, - 15203339060867180632, - 67905455398516005 + 4541622738043214385, + 8186357170000535775, + 4765839113294831637, + 3026863977499737494 ], "infinity": false } @@ -160,78 +160,78 @@ "permutation_commitments": [ { "x": [ - 4136655882081070061, - 10002634152140233885, - 980031450563900168, - 2157352299456443493 + 1851039213129741497, + 11907960788190413713, + 2882727828085561070, + 1451278944954982956 ], "y": [ - 11834666410496287947, - 12048212945911710814, - 12632184988200405344, - 3084449143866571881 + 15245785050592773860, + 1774295027236395480, + 3373069120056880915, + 1080245109458702174 ], "infinity": false }, { "x": [ - 16491067695009214283, - 7420133316471415099, - 6689890872349113887, - 457551328045298948 + 9366052859968548005, + 12275028918364559591, + 2472023927159177225, + 1052535074027277666 ], "y": [ - 7262445873345515147, - 15297330015248906010, - 6131646910537731978, - 1498048652184369950 + 2428574557555628629, + 15067392861858369528, + 16949255188095910778, + 2297925771936569168 ], "infinity": false }, { "x": [ - 8869864687692391690, - 7984673273578638559, - 16574497274063903001, - 110512104436182781 + 17016009610362956206, + 4047659663396753591, + 1832464593155416403, + 2725142957049914767 ], "y": [ - 13333735955399942427, - 3229330830010457349, - 15279257314107395654, - 661229224879455791 + 12447928856414787240, + 3072280375285720285, + 12294239288643819494, + 613511140380288958 ], "infinity": false }, { "x": [ - 2551282574896295260, - 9760668035586690469, - 2137355194036876554, - 997985933947120674 + 6312774245791141720, + 496150993329472460, + 12773767122915456934, + 3404402910494500531 ], "y": [ - 14631704616305988699, - 10049029090982316688, - 18177176779365761171, - 465292341046541191 + 13852578578747731084, + 9030931732410275304, + 17159996848865265705, + 1696956882146098553 ], "infinity": false } ], - "total_lookup_entries_length": 1073528, + "total_lookup_entries_length": 1073530, "lookup_selector_commitment": { "x": [ - 2876161092882109885, - 11236384672929432721, - 3859399786505175224, - 3407285818214399211 + 4441974708940861232, + 11325614820129407652, + 7273013871150456559, + 2270181644629652201 ], "y": [ - 10435488890608201079, - 17599366864863653972, - 8044023367771821543, - 2530994762013802379 + 3070631142979677922, + 15247189094202672776, + 12651459662740804392, + 1832216259472686694 ], "infinity": false }, @@ -299,16 +299,16 @@ ], "lookup_table_type_commitment": { "x": [ - 4651855694129232729, - 1837151187589998268, - 10423659239329633637, - 1354887747950126245 + 7312875299592476003, + 313526216906044060, + 13914875394436353152, + 3424388477700656316 ], "y": [ - 11281536115673624785, - 9372850764821773580, - 16691907077441992896, - 1840015931540579970 + 2572062173996296044, + 5984767625164919974, + 12005537293370417131, + 616463121946800406 ], "infinity": false }, diff --git a/core/bin/verification_key_generator_and_server/data/verification_10_key.json b/core/bin/verification_key_generator_and_server/data/verification_10_key.json index ffccb01e6ec7..ec9d3727bff9 100644 --- a/core/bin/verification_key_generator_and_server/data/verification_10_key.json +++ b/core/bin/verification_key_generator_and_server/data/verification_10_key.json @@ -6,121 +6,121 @@ "gate_setup_commitments": [ { "x": [ - 1201163354318640229, - 2055219705754088415, - 2073513857941343608, - 1963241099003853619 + 4364720487844379181, + 17010766725144227333, + 1022678199111276314, + 1146578362772127376 ], "y": [ - 17799409318346209032, - 4569825428251693578, - 6381298955182636653, - 2931304030683520542 + 10340654727439455072, + 12691578856596245032, + 837883495763401146, + 2135776887902289239 ], "infinity": false }, { "x": [ - 10205334960232870709, - 8738143462199324053, - 10144063519726047206, - 1927681263581782011 + 14564870240038241482, + 16001391704613609683, + 16397364612792898214, + 1316914335235774452 ], "y": [ - 1230087240085406483, - 122158889944391944, - 15489966604787936285, - 1330866847855106152 + 2386942353392090183, + 4642131766714508143, + 16789479723446408276, + 2261353401184907401 ], "infinity": false }, { "x": [ - 15032728768196374721, - 14862087582458614750, - 7160777336710671141, - 1839914978510952544 + 6081056006818109026, + 14051483412950926523, + 8605392534710099348, + 1527183574619010123 ], "y": [ - 9167736299854863275, - 11029351644449385190, - 11704926274847189145, - 260928699177925870 + 3896696527234063839, + 12862398541231039501, + 1005646628007936886, + 3479645512156004366 ], "infinity": false }, { "x": [ - 6154048382441357511, - 6065973151607458483, - 10376828199029605678, - 637872484884018456 + 11266242489999219523, + 8100856016495224488, + 6788749864393617587, + 482299081118345826 ], "y": [ - 14591679681463835063, - 11525093582596803352, - 1709443408871507900, - 3429218151747917770 + 225211373180020785, + 6498635074385582091, + 4274055525472487569, + 2578651815252093838 ], "infinity": false }, { "x": [ - 1815602336806763776, - 13593648969739071827, - 2250784759987498478, - 1383101671644568596 + 10378455392293934375, + 13391940670290769236, + 10463014668466536299, + 472544008986099462 ], "y": [ - 5019124305555384450, - 17601640867098758221, - 9775997368301268404, - 1408209289935626655 + 1502016714118108544, + 14252801754530793876, + 2203844491975584716, + 1116114255465135672 ], "infinity": false }, { "x": [ - 15529551917580920354, - 15991144646297655273, - 9486652439657094814, - 1390602104586342411 + 9703616742074407567, + 9691703077434834222, + 7366620887865105973, + 36165572355418066 ], "y": [ - 3929940190863560223, - 8236698157821025721, - 5646571270823092175, - 1332794415076748898 + 7430304832706471782, + 5173267152399523091, + 14416699599905226230, + 2681204653630184824 ], "infinity": false }, { "x": [ - 16414056192599840640, - 9354012131211735456, - 9596248347509541885, - 618105062913462260 + 9347312215430913530, + 13606433894103359668, + 14013475178334262360, + 2947181048682744075 ], "y": [ - 14706399904358885528, - 18407654245399576822, - 12492666645205976157, - 1948689159065703317 + 4001199390012145932, + 4622813642635649819, + 16433672063298879053, + 1247842462976799965 ], "infinity": false }, { "x": [ - 13558528973202397344, - 16490213789147112711, - 2545050042457701112, - 2461984285815921195 + 1639425503718708209, + 8242804754724970899, + 11043260258533730377, + 2245145560504199089 ], "y": [ - 1243522511418786191, - 14024991914114903033, - 3240957311806204415, - 1331963310566154043 + 14202551139064230506, + 4307109380979442947, + 13141687433511141087, + 1913204959448290015 ], "infinity": false } @@ -128,31 +128,31 @@ "gate_selectors_commitments": [ { "x": [ - 2355207075120322239, - 154382368370147482, - 17535588518046215656, - 951301722877602968 + 17540836040216578409, + 14577118461028955096, + 2300935836423716880, + 427649651480863044 ], "y": [ - 8143574239198426927, - 15181046710423727756, - 939512270474677381, - 2189391407681219550 + 13066723755606073272, + 17324941433857131282, + 1679499122173566851, + 3298750515604566671 ], "infinity": false }, { "x": [ - 2890553312916912039, - 17705725012328133701, - 2022719624271833539, - 2637957613463216068 + 14709152157752642079, + 13510549649315108277, + 3019440420162710858, + 627188607991231846 ], "y": [ - 5236464205678137999, - 16757283308499630897, - 8200824660959901166, - 2919902965709603522 + 16615706301470133997, + 915024441316023047, + 13798541787831785917, + 3340602253234308653 ], "infinity": false } @@ -160,61 +160,61 @@ "permutation_commitments": [ { "x": [ - 16714273949872413156, - 2992548526703231722, - 12994550586454036656, - 2850293963773054147 + 12626704863583094704, + 3308474372162220296, + 16088806788444947642, + 636430705662147361 ], "y": [ - 20936503166899629, - 6605606497308863677, - 17099277659192707089, - 1380542399287382407 + 17052785040105865748, + 11203395113497209978, + 2939609765212411460, + 3167290643533167611 ], "infinity": false }, { "x": [ - 13795422779792735167, - 5844171721881847978, - 15538273242292518413, - 1058423441485970566 + 3075146465184418179, + 11559452521956513155, + 1656597085428845901, + 1618447062156730856 ], "y": [ - 6786146000425626930, - 16000492908624161349, - 14508879795995142132, - 2058605944332981830 + 2010693621773175313, + 2977509893150409878, + 9431891659616951962, + 1776222288355278384 ], "infinity": false }, { "x": [ - 18200274038480135632, - 16055911430166580378, - 300150758763362260, - 3469399278233580352 + 6408318860212838666, + 9847136022608767026, + 18080834927350013528, + 3306285138140631107 ], "y": [ - 13410997741446184872, - 18426661601342750165, - 13675996097717919382, - 2823744450040807611 + 16064928058583899597, + 461689523483649779, + 13572099112445223829, + 1563453638232968523 ], "infinity": false }, { "x": [ - 2441841452202772744, - 10073497087340947500, - 6034950516307738609, - 2764263384383384439 + 327171445663828020, + 12706053900720413614, + 9237483585964880752, + 1960293149538216528 ], "y": [ - 2932218572245153404, - 6171621796632231602, - 14552010019679184040, - 290197788459327299 + 11030775691809003651, + 11089052388657955457, + 3209890793790993499, + 1198867574642866523 ], "infinity": false } @@ -222,16 +222,16 @@ "total_lookup_entries_length": 5202052, "lookup_selector_commitment": { "x": [ - 4679962009764158864, - 12984488634629810805, - 12089544576783397583, - 2452985478928538759 + 781239045644769777, + 14316527640474633593, + 2443643435827373112, + 3049372365263474427 ], "y": [ - 16434102449642732058, - 2996812911497483791, - 3668848517208777011, - 2423647953023799522 + 4073012743593667819, + 16009537994875540924, + 11173412503242869179, + 1513208421597995174 ], "infinity": false }, @@ -299,16 +299,16 @@ ], "lookup_table_type_commitment": { "x": [ - 156247164488818630, - 15253439856813229615, - 8663309459020264968, - 3311090483224496957 + 7603211811706190713, + 2486982239745271096, + 11528266448545919500, + 3080741880407152411 ], "y": [ - 10512458335337838200, - 8831597928021087370, - 4017939162489373323, - 1056856470188779191 + 7967754771633653173, + 6016822892450616749, + 9688696792558711613, + 2682562048141398047 ], "infinity": false }, diff --git a/core/bin/verification_key_generator_and_server/data/verification_3_key.json b/core/bin/verification_key_generator_and_server/data/verification_3_key.json index dcf6f058ba87..d6435ac58dcc 100644 --- a/core/bin/verification_key_generator_and_server/data/verification_3_key.json +++ b/core/bin/verification_key_generator_and_server/data/verification_3_key.json @@ -6,121 +6,121 @@ "gate_setup_commitments": [ { "x": [ - 10550376187145162963, - 12863286379272137663, - 5328709993963115843, - 2172775919728224150 + 16132018988898810280, + 14629326668513402207, + 13927012963228438244, + 438836257494388653 ], "y": [ - 16641463475462267374, - 9954127684318722109, - 17488757377000951923, - 897495197662393128 + 9208149293484976939, + 6928926474381817269, + 9049839661709678601, + 3159086914561291422 ], "infinity": false }, { "x": [ - 6563322001445008336, - 797353186481493880, - 13315537264768069768, - 1248024693457931662 + 8167991757189792621, + 12144200201207014477, + 11734943669898586373, + 2193141783069775476 ], "y": [ - 17871396538659348629, - 11120452368184545101, - 4860719086907382388, - 545958236928374726 + 17360142870886900923, + 13115961790852995117, + 18410766760283752426, + 58598073722965045 ], "infinity": false }, { "x": [ - 292978148064211492, - 14411686124929797817, - 4896230459039474260, - 3325585646245466745 + 6146007756603285924, + 6323811950493532548, + 13106922048360852624, + 466032134849380491 ], "y": [ - 6546733926726165682, - 13661029324178790447, - 8144639210545978915, - 1916046104003446938 + 18129312257576062028, + 1862682333985860999, + 11375866511232479537, + 1993295577101228431 ], "infinity": false }, { "x": [ - 522387150147797243, - 8068708650984191567, - 9541497069690935578, - 1760680995799985067 + 2809427848829662407, + 12284258556184215755, + 7636306187033076241, + 2114748180158946072 ], "y": [ - 3038250252977779269, - 11127852903590962676, - 17196318053761803653, - 2056822949845331380 + 14938290724462753010, + 4469670089738748357, + 4333038821107617112, + 3325252483617185630 ], "infinity": false }, { "x": [ - 13013233153912437234, - 3481939096748005263, - 11703430308701963632, - 699965740798448577 + 2811571661472266293, + 10805076128876270563, + 13466352093104839476, + 2679418577316861424 ], "y": [ - 2376338211072159395, - 3649208867362862370, - 14621597111684436147, - 2544346533921699116 + 6289932497598671988, + 10287193347450766117, + 1002213271037575380, + 2919063136646142593 ], "infinity": false }, { "x": [ - 17820130262026588600, - 12964227826098503201, - 12061824382203771777, - 1319872583253268021 + 18040905380309423259, + 2992050369681309630, + 7602002287396364748, + 610279260467961564 ], "y": [ - 5311405529601443696, - 4018470607034478338, - 12467574722491214286, - 1680065035418478766 + 13064134520386336340, + 16129927882519803745, + 2367043478173028808, + 756367716463615162 ], "infinity": false }, { "x": [ - 16492013190113062691, - 4078199263259563511, - 5012129511641877781, - 41349067130114428 + 11205248788812451673, + 12805888223872356565, + 3192506617534475900, + 1788055857068087500 ], "y": [ - 1114102693178536110, - 2503559827484609194, - 17580206671275585606, - 1867813293022735930 + 14249524255052314472, + 2804915545701549755, + 8453552885321586295, + 1517275418947033059 ], "infinity": false }, { "x": [ - 16110849929343439596, - 6211162944442225023, - 4176530808959958823, - 776245536604962044 + 3370213179722093102, + 15232829601741289326, + 15264565963863545427, + 3399686965437331565 ], "y": [ - 16924432738061870794, - 12887029707706499486, - 4536220723940362359, - 3066276249014284578 + 695972907603137570, + 1065210392506346562, + 13377574163460762697, + 1438903077274895969 ], "infinity": false } @@ -128,31 +128,31 @@ "gate_selectors_commitments": [ { "x": [ - 1387561443159160261, - 477775943378385877, - 18289682494460818312, - 1225127103248480226 + 2916037360975110368, + 2115449174392144212, + 14608640754181148556, + 971169600851407725 ], "y": [ - 13861633555716877419, - 11021086605287795744, - 16936179868445624555, - 1675422904480642708 + 8394386574107401239, + 11833730770085674615, + 2274226308564527013, + 757668483662158276 ], "infinity": false }, { "x": [ - 15468643478719493386, - 16296293513431599325, - 16896241103993257585, - 2527892380069637821 + 15790049926918974381, + 1498916798022404031, + 7512683194829329777, + 130629875891522412 ], "y": [ - 6924618732580187946, - 4145636937666672779, - 7746613394270955608, - 3066347309052150921 + 10570165756172650947, + 16024260722575267127, + 10403393574807352915, + 2193441270275759098 ], "infinity": false } @@ -160,78 +160,78 @@ "permutation_commitments": [ { "x": [ - 14066357332293623201, - 5590408864608988441, - 13212552532626677878, - 1266149383570298600 + 16937140281571163722, + 8528685593141239391, + 14604264781363495282, + 137274269481496519 ], "y": [ - 8869085471956584718, - 5205295041620019258, - 3809406423704825921, - 1179747793942763876 + 12832201597709954736, + 14731869942190515158, + 6098275778634872340, + 1894980855192645736 ], "infinity": false }, { "x": [ - 14164174487380098248, - 399714379143206482, - 17932555173948549907, - 1510320565295811683 + 9506243675355227355, + 4921543690455935123, + 4864521994209504368, + 1194838968611913113 ], "y": [ - 16262108781366800885, - 11903929495245543058, - 2030047105830251389, - 3175594667863213623 + 3514343005912152815, + 4590983463867380289, + 4810604720730504186, + 2370963330363647136 ], "infinity": false }, { "x": [ - 12424381171397409053, - 9816009036263118486, - 123424474575366759, - 2076055869458958907 + 17229604967143530892, + 10436078466088746474, + 8131435850212543426, + 62279533258920234 ], "y": [ - 9044471714502471176, - 3761425429363202673, - 2034795243747202222, - 2173356352931419454 + 13233331203150147346, + 1683012122586045301, + 12023735178019542936, + 209604889173254535 ], "infinity": false }, { "x": [ - 15867531630171378979, - 18364941470214608297, - 14848155053348599697, - 1692409159029904984 + 2317615578050559984, + 3409863650741942555, + 2800913349454859982, + 1145393423472998888 ], "y": [ - 15366054888699024816, - 17348713584488930215, - 15195830143979672152, - 572727508810720113 + 3313805684145217681, + 8698235642305641581, + 9555080423733977624, + 1868569776379940006 ], "infinity": false } ], - "total_lookup_entries_length": 15088282, + "total_lookup_entries_length": 15224202, "lookup_selector_commitment": { "x": [ - 862345184327547040, - 7262652051250145030, - 10964591811473823871, - 1435380748710871870 + 8259805827727145280, + 16824074282046227807, + 3121477555564412954, + 1865943569871175431 ], "y": [ - 10151301439783108881, - 1188186211158585113, - 8160902585771466656, - 833565525567445435 + 16644549437179353127, + 14517598437143192812, + 10007139167811704538, + 1155056757464351841 ], "infinity": false }, @@ -253,16 +253,16 @@ }, { "x": [ - 11928160479317928747, - 15728917754258005897, - 10320536119352799654, - 270214245519994465 + 11118240121224901946, + 9394562257959111170, + 9026436993514314918, + 1751747619588842429 ], "y": [ - 10541702140730698371, - 16733093124167020021, - 11131412253235036732, - 300062717716678428 + 6039590802345873394, + 17531716309156986038, + 1711770599161550805, + 1941094644175870288 ], "infinity": false }, @@ -299,16 +299,16 @@ ], "lookup_table_type_commitment": { "x": [ - 12316913295088139285, - 11480691069159889893, - 7996535489614067794, - 809390717012814237 + 15847017871013531611, + 15599972677346614519, + 7829590182636204190, + 1175429517043722092 ], "y": [ - 6230277635134941386, - 3986531911879547545, - 3909527337181494200, - 3269190001359038160 + 3566505229345713693, + 2712054860970341853, + 7992126959188622741, + 416242544149800237 ], "infinity": false }, diff --git a/core/bin/verification_key_generator_and_server/data/verification_4_key.json b/core/bin/verification_key_generator_and_server/data/verification_4_key.json index 1fca402a57d1..8d42dcd66a75 100644 --- a/core/bin/verification_key_generator_and_server/data/verification_4_key.json +++ b/core/bin/verification_key_generator_and_server/data/verification_4_key.json @@ -6,121 +6,121 @@ "gate_setup_commitments": [ { "x": [ - 5624259952235159639, - 12493453537753524096, - 5162742944723295422, - 874801217568058344 + 15923176050075197, + 8963905519117333456, + 5333091548039957996, + 1660697180439834807 ], "y": [ - 9255161020512573415, - 363459688878681289, - 820936643727254318, - 2429296485867858134 + 13105864494044341635, + 10079874572012628853, + 4164109084931753781, + 1860950003357484648 ], "infinity": false }, { "x": [ - 2832540416149381652, - 15430317050493808950, - 17179036781265260663, - 186820416950920400 + 8216018177730810417, + 13660800917029254431, + 2933384097067755755, + 2823425599268575868 ], "y": [ - 1595111043068965121, - 12787589346811970515, - 16036754970398854466, - 1936430940635933371 + 8768863192718196559, + 10146282684570870426, + 8275806247588563419, + 605489936306033583 ], "infinity": false }, { "x": [ - 8954537073230160476, - 17141264894256989510, - 6295027223169328386, - 945694272488560390 + 4277344855257545209, + 11172040917478096607, + 4489086903928758598, + 289283798032159440 ], "y": [ - 12946393132907201326, - 15831815351998984402, - 11467477440888834372, - 2122439063028644692 + 10444137083253378550, + 12133212848977612596, + 6748791972701343485, + 286274227999569844 ], "infinity": false }, { "x": [ - 16668856378658574030, - 12464185126307046024, - 4782167605155661077, - 2925818815088278657 + 8861797510071553254, + 12734094237204882518, + 13692967202881086499, + 641906135411222522 ], "y": [ - 14254029004937284994, - 16838260511439178142, - 18330455412567106782, - 907257260948957347 + 6831762763487302461, + 11965405347371646114, + 6218256502970252800, + 3201462388136754725 ], "infinity": false }, { "x": [ - 3757073371674290600, - 14749158543939658543, - 500288168398934349, - 899019556764130326 + 12385743015818134054, + 16282219738575446638, + 3256359841301423419, + 505673042938576760 ], "y": [ - 2551967063854357353, - 11943083947319003127, - 3713483551270573853, - 3048346390727492861 + 6744956686738207932, + 8994291190634790001, + 16789606231722015883, + 2027930268272962928 ], "infinity": false }, { "x": [ - 9681195336192488789, - 4385315926758329080, - 11387599749194291462, - 2590055918991698961 + 13671822069226357541, + 818021157447551159, + 10542481209144358852, + 2459295197762128786 ], "y": [ - 9441467182736837048, - 14806656083518409337, - 6289102250953692061, - 2535387000517398099 + 1072649761929447549, + 6089126583512618706, + 1178131210084507361, + 1066836948212725576 ], "infinity": false }, { "x": [ - 13507468900709913557, - 15716156111774497427, - 816956999229367707, - 1522047434717677609 + 16878956366815094090, + 364977461173568122, + 5439594588743996145, + 1265442855735725449 ], "y": [ - 3702769098715132058, - 3982451445376700785, - 6723421039598308657, - 2251542644425584421 + 11461704536083653156, + 660278441271820299, + 4314245569905306892, + 1438663846765259508 ], "infinity": false }, { "x": [ - 1475691146597603932, - 17022584496562609257, - 17821269401726564016, - 802662685627574735 + 9038539654045396650, + 539827912679485452, + 15399544523862100757, + 1256406598444490417 ], "y": [ - 10096079063677634763, - 12234172521086631920, - 297465434937655535, - 3243745710298231123 + 5422113905848106255, + 4943961807853536385, + 10022409325033689104, + 3200702511424842211 ], "infinity": false } @@ -128,31 +128,31 @@ "gate_selectors_commitments": [ { "x": [ - 17529827936208631386, - 9261627982909469647, - 961082867947678862, - 960578719286164751 + 7750990741566547331, + 12040155777441846781, + 3000981333322867315, + 2393292192734976436 ], "y": [ - 8405963553525505153, - 907040572117159306, - 14956302475326239995, - 547263587163632990 + 3394853839941291504, + 944019051205640111, + 1104911864338577098, + 2127308956089601096 ], "infinity": false }, { "x": [ - 16590598224397602624, - 9925399142289127105, - 3290399501509259487, - 2600139476199697777 + 4735140124663926465, + 16935779121597983173, + 17111626619540374574, + 2327973550601526140 ], "y": [ - 11547014733601362211, - 8210713950853401970, - 18225836752365218802, - 3416518080459492792 + 8990848735371189388, + 4589751206662798166, + 7575424772436241307, + 2798852347400154642 ], "infinity": false } @@ -160,78 +160,78 @@ "permutation_commitments": [ { "x": [ - 1361004974540175066, - 16274313641371783602, - 6174733117825004502, - 2094853940073638978 + 4765077060699177749, + 15235935045874519477, + 2022237788491579392, + 354385727984957703 ], "y": [ - 562696341739915410, - 14890673686457558264, - 3661460202790599374, - 2475743175658913489 + 11620113321350620961, + 2521830680983779826, + 14047226057605943635, + 2718701882953208503 ], "infinity": false }, { "x": [ - 17517940283060625271, - 5779867718792326928, - 17745057071421714730, - 1985060149839739251 + 12967015398643083015, + 1100660813730542482, + 7835181433213557652, + 803165211156388599 ], "y": [ - 9540288339316398759, - 2058552121996607541, - 7871901128942825027, - 2699744308553389629 + 8557385569712401227, + 535900682745452035, + 16083571717847325979, + 396765644246918860 ], "infinity": false }, { "x": [ - 10135642768859521260, - 10297677444020519573, - 9219235108573499828, - 2065187692845085359 + 6868107733370365435, + 17106601841261210672, + 12219407605084986215, + 2345246684976405066 ], "y": [ - 8372351253470713532, - 6842701361290620477, - 11926122104604392352, - 1882199601947395362 + 17532412968783851743, + 9996315626158111485, + 17970945522106166231, + 1003764081419207606 ], "infinity": false }, { "x": [ - 7787083258082044412, - 8798544784294490279, - 15252059285839385340, - 209159930416492510 + 7011201477832405407, + 8818123127103997131, + 2979445003396953339, + 318603240233076406 ], "y": [ - 9464645197696091031, - 12735510170331867214, - 10126980317766617408, - 1956203655559965748 + 11712108043964996282, + 3474989587891133574, + 3983451673298542860, + 1181581919257021598 ], "infinity": false } ], - "total_lookup_entries_length": 8484858, + "total_lookup_entries_length": 8484642, "lookup_selector_commitment": { "x": [ - 709495900213098376, - 5439249685673083978, - 10044314386307915207, - 868534819499649206 + 27459247093738343, + 1785927757103538268, + 14972116880195568621, + 1034224917068963325 ], "y": [ - 7744104606837258411, - 9395404917324005459, - 5711899910359603748, - 3413594411142959024 + 17453858127001596558, + 6200103235089742197, + 16245568162666829501, + 651193715230511441 ], "infinity": false }, @@ -299,16 +299,16 @@ ], "lookup_table_type_commitment": { "x": [ - 6342996221389543983, - 2956974825746967379, - 5313102419815794231, - 2142845372908172595 + 14868101692362122308, + 8135288013508071846, + 9460482611527381887, + 512823635961282598 ], "y": [ - 11930073583427772667, - 3300840902988996263, - 13848383192378869374, - 1678905557563667573 + 8358211286664762188, + 3532634521932288534, + 5862145521507736138, + 1807935137626658536 ], "infinity": false }, diff --git a/core/bin/verification_key_generator_and_server/src/lib.rs b/core/bin/verification_key_generator_and_server/src/lib.rs index 66fb37dcaf4c..ee888cdb2cf8 100644 --- a/core/bin/verification_key_generator_and_server/src/lib.rs +++ b/core/bin/verification_key_generator_and_server/src/lib.rs @@ -102,7 +102,7 @@ pub fn get_circuits_for_vk() -> Vec> fn ensure_setup_key_exist() { if !Path::new("setup_2^26.key").exists() { - panic!("File setup_2^26.key is required to be present in current directory for verification keys generation. \ndownload from https://storage.googleapis.com/universal-setup/setup_2^26.key"); + panic!("File setup_2^26.key is required to be present in current directory for verification keys generation. \ndownload from https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key"); } } fn get_file_path(circuit_type: u8) -> String { diff --git a/core/bin/verification_key_generator_and_server/src/main.rs b/core/bin/verification_key_generator_and_server/src/main.rs index a05c112ec528..e556bfd24f59 100644 --- a/core/bin/verification_key_generator_and_server/src/main.rs +++ b/core/bin/verification_key_generator_and_server/src/main.rs @@ -1,21 +1,20 @@ -use serde_json::Value; use std::collections::HashSet; use std::env; -use std::fs::File; -use std::io::{BufReader, Read}; -use std::iter::FromIterator; use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::cs::PlonkCsWidth4WithNextStepAndCustomGatesParams; use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; use zksync_verification_key_server::{get_circuits_for_vk, save_vk_for_circuit_type}; +/// Creates verification keys for the given circuit. fn main() { let args: Vec = env::args().collect(); - let mut circuit_types: HashSet = (3..17).collect(); - if args.len() > 1 { - circuit_types = HashSet::from_iter([get_and_ensure_valid_circuit_type(args[1].clone())]); - } + + let circuit_types: HashSet = if args.len() > 1 { + [get_and_ensure_valid_circuit_type(args[1].clone())].into() + } else { + (3..17).collect() + }; vlog::info!("Starting verification key generation!"); get_circuits_for_vk() .into_iter() @@ -44,39 +43,3 @@ fn generate_verification_key(circuit: ZkSyncCircuit = serde_json::from_str(&data).expect("malformed JSON"); - for mut item in json { - let kv = item.as_object_mut().unwrap(); - _build_and_save_verification_key(kv); - } -} - -fn _build_and_save_verification_key(kv: &mut serde_json::Map) { - let key: &str = kv - .get("key") - .expect("key must be present in json") - .as_str() - .expect("key must be of type string"); - let circuit_type: u8 = key - .chars() - .filter(|c| c.is_ascii_digit()) - .collect::() - .parse::() - .unwrap(); - let value: String = kv - .get("value") - .expect("value must be present in json") - .as_str() - .expect("value must be of type string") - .replace("E'\\\\x", "") - .replace('\'', ""); - let bytes = hex::decode(value).expect("Invalid hex string for verification key"); - let vk = bincode::deserialize_from(BufReader::new(bytes.as_slice())).unwrap(); - vlog::info!("Extracted circuit_type: {:?} vk : {:?}", circuit_type, vk); - save_vk_for_circuit_type(circuit_type, vk); -} diff --git a/core/bin/witness_generator/Cargo.toml b/core/bin/witness_generator/Cargo.toml new file mode 100644 index 000000000000..be022dfdf223 --- /dev/null +++ b/core/bin/witness_generator/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "zksync_witness_generator" +version = "1.0.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +publish = false # We don't want to publish our binaries. + +[dependencies] +zksync_dal = { path = "../../lib/dal", version = "1.0" } +zksync_config = { path = "../../lib/config", version = "1.0" } +prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } +vlog = { path = "../../lib/vlog", version = "1.0" } +zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } +vm = { path = "../../lib/vm", version = "0.1.0" } +zksync_object_store = { path = "../../lib/object_store", version = "1.0" } +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_state = { path = "../../lib/state", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } +zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server", version = "1.0" } +zksync_prover_utils = { path = "../../lib/prover_utils", version = "1.0" } +zksync_db_storage_provider = { path = "../../lib/db_storage_provider", version = "1.0" } + +tokio = { version = "1", features = ["time"] } +futures = { version = "0.3", features = ["compat"] } +metrics = "0.20" +serde = { version = "1.0", features = ["derive"] } +async-trait = "0.1" +rand = "0.8" +structopt = "0.3.26" + +[dev-dependencies] +bincode = "1" +const-decoder = "0.3.0" diff --git a/core/bin/witness_generator/README.md b/core/bin/witness_generator/README.md new file mode 100644 index 000000000000..00a805e0b7e5 --- /dev/null +++ b/core/bin/witness_generator/README.md @@ -0,0 +1,52 @@ +# WitnessGenerator + +Please read this +[doc](https://www.notion.so/matterlabs/Draft-FRI-Prover-Integration-Prover-Shadowing-c4b1373786eb43779a93118be4be5d99) +for rationale of this binary, alongside the existing one in zk-core. + +The component is responsible for generating prover jobs and saving artifacts needed for the next round of proof +aggregation. That is, every aggregation round needs two sets of input: + +- computed proofs from the previous round +- some artifacts that the witness generator of previous round(s) returns. There are four rounds of proofs for every + block, each of them starts with an invocation of `{Round}WitnessGenerator` with a corresponding + `WitnessGeneratorJobType`: + +## BasicCircuitsWitnessGenerator + +- generates basic circuits (circuits like `Main VM` - up to 50 \* 48 = 2400 circuits): +- input table: `basic_circuit_witness_jobs` +- artifact/output table: `leaf_aggregation_jobs` (also creates job stubs in `node_aggregation_jobs` and + `scheduler_aggregation_jobs`) value in `aggregation_round` field of `prover_jobs` table: 0 + +## LeafAggregationWitnessGenerator + +- generates leaf aggregation circuits (up to 48 circuits of type `LeafAggregation`) +- input table: `leaf_aggregation_jobs` +- artifact/output table: `node_aggregation_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 1 + +## NodeAggregationWitnessGenerator + +- generates one circuit of type `NodeAggregation` +- input table: `leaf_aggregation_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 2 + +## SchedulerWitnessGenerator + +- generates one circuit of type `Scheduler` +- input table: `scheduler_witness_jobs` +- value in `aggregation_round` field of `prover_jobs` table: 3 + +One round of prover generation consists of: + +- `WitnessGenerator` picks up the next `queued` job in its input table and processes it (invoking the corresponding + helper function in `zkevm_test_harness` repo) +- it saves the generated circuis to `prover_jobs` table and the other artifacts to its output table +- the individual proofs are picked up by the provers, processed, and marked as complete. +- when the last proof for this round is computed, the prover updates the row in the output table setting its status to + `queued` +- `WitnessGenerator` picks up such job and proceeds to the next round + +Note that the very first input table (`witness_inputs`) is populated by the tree (as the input artifact for the +`WitnessGeneratorJobType::BasicCircuits` is the merkle proofs) diff --git a/core/bin/witness_generator/src/basic_circuits.rs b/core/bin/witness_generator/src/basic_circuits.rs new file mode 100644 index 000000000000..079497465008 --- /dev/null +++ b/core/bin/witness_generator/src/basic_circuits.rs @@ -0,0 +1,511 @@ +use std::cell::RefCell; +use std::collections::hash_map::DefaultHasher; +use std::collections::{HashMap, HashSet}; +use std::hash::{Hash, Hasher}; +use std::rc::Rc; +use std::sync::Arc; +use std::time::Instant; + +use async_trait::async_trait; +use rand::Rng; +use serde::{Deserialize, Serialize}; + +use vm::zk_evm::ethereum_types::H256; +use vm::{memory::SimpleMemory, HistoryDisabled, StorageOracle, MAX_CYCLES_FOR_TX}; +use zksync_config::configs::WitnessGeneratorConfig; +use zksync_config::constants::BOOTLOADER_ADDRESS; +use zksync_dal::ConnectionPool; +use zksync_db_storage_provider::DbStorageProvider; +use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; +use zksync_queued_job_processor::JobProcessor; +use zksync_state::storage_view::StorageView; +use zksync_types::zkevm_test_harness::toolset::GeometryConfig; +use zksync_types::{ + circuit::GEOMETRY_CONFIG, + proofs::{AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::bn256::Bn256, + witness::full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, + witness::oracle::VmWitnessOracle, + SchedulerCircuitInstanceWitness, + }, + Address, L1BatchNumber, U256, +}; +use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; + +use crate::precalculated::PrecalculatedMerklePathsProvider; +use crate::utils::{ + expand_bootloader_contents, save_prover_input_artifacts, track_witness_generation_stage, +}; + +pub struct BasicCircuitArtifacts { + basic_circuits: BlockBasicCircuits, + basic_circuits_inputs: BlockBasicCircuitsPublicInputs, + scheduler_witness: SchedulerCircuitInstanceWitness, + circuits: Vec>>, +} + +#[derive(Debug)] +struct BlobUrls { + basic_circuits_url: String, + basic_circuits_inputs_url: String, + scheduler_witness_url: String, + circuit_types_and_urls: Vec<(&'static str, String)>, +} + +#[derive(Clone)] +pub struct BasicWitnessGeneratorJob { + block_number: L1BatchNumber, + job: PrepareBasicCircuitsJob, +} + +#[derive(Debug)] +pub struct BasicWitnessGenerator { + config: WitnessGeneratorConfig, + object_store: Arc, +} + +impl BasicWitnessGenerator { + pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + config, + object_store: store_factory.create_store().into(), + } + } + + fn process_job_sync( + object_store: &dyn ObjectStore, + connection_pool: ConnectionPool, + basic_job: BasicWitnessGeneratorJob, + started_at: Instant, + ) -> Option { + let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); + let BasicWitnessGeneratorJob { block_number, job } = basic_job; + + if let Some(blocks_proving_percentage) = config.blocks_proving_percentage { + // Generate random number in (0; 100). + let threshold = rand::thread_rng().gen_range(1..100); + // We get value higher than `blocks_proving_percentage` with prob = `1 - blocks_proving_percentage`. + // In this case job should be skipped. + if threshold > blocks_proving_percentage { + metrics::counter!("server.witness_generator.skipped_blocks", 1); + vlog::info!( + "Skipping witness generation for block {}, blocks_proving_percentage: {}", + block_number.0, + blocks_proving_percentage + ); + let mut storage = connection_pool.access_storage_blocking(); + storage + .witness_generator_dal() + .mark_witness_job_as_skipped(block_number, AggregationRound::BasicCircuits); + return None; + } + } + + metrics::counter!("server.witness_generator.sampled_blocks", 1); + vlog::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::BasicCircuits, + block_number.0 + ); + + Some(process_basic_circuits_job( + object_store, + config, + connection_pool, + started_at, + block_number, + job, + )) + } + + fn get_artifacts(&self, block_number: L1BatchNumber) -> BasicWitnessGeneratorJob { + let job = self.object_store.get(block_number).unwrap(); + BasicWitnessGeneratorJob { block_number, job } + } + + fn save_artifacts( + &self, + block_number: L1BatchNumber, + artifacts: BasicCircuitArtifacts, + ) -> BlobUrls { + let basic_circuits_url = self + .object_store + .put(block_number, &artifacts.basic_circuits) + .unwrap(); + let basic_circuits_inputs_url = self + .object_store + .put(block_number, &artifacts.basic_circuits_inputs) + .unwrap(); + let scheduler_witness_url = self + .object_store + .put(block_number, &artifacts.scheduler_witness) + .unwrap(); + + let circuit_types_and_urls = save_prover_input_artifacts( + block_number, + &artifacts.circuits, + &*self.object_store, + AggregationRound::BasicCircuits, + ); + BlobUrls { + basic_circuits_url, + basic_circuits_inputs_url, + scheduler_witness_url, + circuit_types_and_urls, + } + } +} + +#[async_trait] +impl JobProcessor for BasicWitnessGenerator { + type Job = BasicWitnessGeneratorJob; + type JobId = L1BatchNumber; + // The artifact is optional to support skipping blocks when sampling is enabled. + type JobArtifacts = Option; + + const SERVICE_NAME: &'static str = "basic_circuit_witness_generator"; + + async fn get_next_job( + &self, + connection_pool: ConnectionPool, + ) -> Option<(Self::JobId, Self::Job)> { + let mut connection = connection_pool.access_storage_blocking(); + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + + match connection + .witness_generator_dal() + .get_next_basic_circuit_witness_job( + self.config.witness_generation_timeout(), + self.config.max_attempts, + last_l1_batch_to_process, + ) { + Some(metadata) => { + let job = self.get_artifacts(metadata.block_number); + Some((job.block_number, job)) + } + None => None, + } + } + + async fn save_failure( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + error: String, + ) { + connection_pool + .access_storage_blocking() + .witness_generator_dal() + .mark_witness_job_as_failed( + job_id, + AggregationRound::BasicCircuits, + started_at.elapsed(), + error, + self.config.max_attempts, + ); + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + connection_pool: ConnectionPool, + job: BasicWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + let object_store = Arc::clone(&self.object_store); + tokio::task::spawn_blocking(move || { + Self::process_job_sync(&*object_store, connection_pool, job, started_at) + }) + } + + async fn save_result( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + optional_artifacts: Option, + ) { + match optional_artifacts { + None => (), + Some(artifacts) => { + let blob_urls = self.save_artifacts(job_id, artifacts); + update_database(connection_pool, started_at, job_id, blob_urls); + } + } + } +} + +fn process_basic_circuits_job( + object_store: &dyn ObjectStore, + config: WitnessGeneratorConfig, + connection_pool: ConnectionPool, + started_at: Instant, + block_number: L1BatchNumber, + job: PrepareBasicCircuitsJob, +) -> BasicCircuitArtifacts { + let witness_gen_input = + build_basic_circuits_witness_generator_input(connection_pool.clone(), job, block_number); + let (basic_circuits, basic_circuits_inputs, scheduler_witness) = + generate_witness(object_store, config, connection_pool, witness_gen_input); + let circuits = basic_circuits.clone().into_flattened_set(); + + vlog::info!( + "Witness generation for block {} is complete in {:?}. Number of circuits: {}", + block_number.0, + started_at.elapsed(), + circuits.len() + ); + + BasicCircuitArtifacts { + basic_circuits, + basic_circuits_inputs, + scheduler_witness, + circuits, + } +} + +fn update_database( + connection_pool: ConnectionPool, + started_at: Instant, + block_number: L1BatchNumber, + blob_urls: BlobUrls, +) { + let mut connection = connection_pool.access_storage_blocking(); + let mut transaction = connection.start_transaction_blocking(); + + transaction.witness_generator_dal().create_aggregation_jobs( + block_number, + &blob_urls.basic_circuits_url, + &blob_urls.basic_circuits_inputs_url, + blob_urls.circuit_types_and_urls.len(), + &blob_urls.scheduler_witness_url, + ); + transaction.prover_dal().insert_prover_jobs( + block_number, + blob_urls.circuit_types_and_urls, + AggregationRound::BasicCircuits, + ); + transaction + .witness_generator_dal() + .mark_witness_job_as_successful( + block_number, + AggregationRound::BasicCircuits, + started_at.elapsed(), + ); + + transaction.commit_blocking(); + track_witness_generation_stage(started_at, AggregationRound::BasicCircuits); +} + +// If making changes to this method, consider moving this logic to the DAL layer and make +// `PrepareBasicCircuitsJob` have all fields of `BasicCircuitWitnessGeneratorInput`. +fn build_basic_circuits_witness_generator_input( + connection_pool: ConnectionPool, + witness_merkle_input: PrepareBasicCircuitsJob, + block_number: L1BatchNumber, +) -> BasicCircuitWitnessGeneratorInput { + let mut connection = connection_pool.access_storage_blocking(); + let block_header = connection + .blocks_dal() + .get_block_header(block_number) + .unwrap(); + let previous_block_header = connection + .blocks_dal() + .get_block_header(block_number - 1) + .unwrap(); + let previous_block_hash = connection + .blocks_dal() + .get_block_state_root(block_number - 1) + .expect("cannot generate witness before the root hash is computed"); + BasicCircuitWitnessGeneratorInput { + block_number, + previous_block_timestamp: previous_block_header.timestamp, + previous_block_hash, + block_timestamp: block_header.timestamp, + used_bytecodes_hashes: block_header.used_contract_hashes, + initial_heap_content: block_header.initial_bootloader_contents, + merkle_paths_input: witness_merkle_input, + } +} + +fn generate_witness( + object_store: &dyn ObjectStore, + config: WitnessGeneratorConfig, + connection_pool: ConnectionPool, + input: BasicCircuitWitnessGeneratorInput, +) -> ( + BlockBasicCircuits, + BlockBasicCircuitsPublicInputs, + SchedulerCircuitInstanceWitness, +) { + let mut connection = connection_pool.access_storage_blocking(); + let header = connection + .blocks_dal() + .get_block_header(input.block_number) + .unwrap(); + let bootloader_code_bytes = connection + .storage_dal() + .get_factory_dep(header.base_system_contracts_hashes.bootloader) + .expect("Bootloader bytecode should exist"); + let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); + let account_bytecode_bytes = connection + .storage_dal() + .get_factory_dep(header.base_system_contracts_hashes.default_aa) + .expect("Default aa bytecode should exist"); + let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); + let bootloader_contents = expand_bootloader_contents(input.initial_heap_content); + let account_code_hash = h256_to_u256(header.base_system_contracts_hashes.default_aa); + + let hashes: HashSet = input + .used_bytecodes_hashes + .iter() + // SMA-1555: remove this hack once updated to the latest version of zkevm_test_harness + .filter(|&&hash| hash != h256_to_u256(header.base_system_contracts_hashes.bootloader)) + .map(|hash| u256_to_h256(*hash)) + .collect(); + + let mut used_bytecodes = connection.storage_dal().get_factory_deps(&hashes); + if input.used_bytecodes_hashes.contains(&account_code_hash) { + used_bytecodes.insert(account_code_hash, account_bytecode); + } + let factory_dep_bytecode_hashes: HashSet = used_bytecodes + .clone() + .keys() + .map(|&hash| u256_to_h256(hash)) + .collect(); + let missing_deps: HashSet<_> = hashes + .difference(&factory_dep_bytecode_hashes) + .cloned() + .collect(); + if !missing_deps.is_empty() { + vlog::error!("{:?} factory deps are not found in DB", missing_deps); + } + + // `DbStorageProvider` was designed to be used in API, so it accepts miniblock numbers. + // Probably, we should make it work with L1 batch numbers too. + let (_, last_miniblock_number) = connection + .blocks_dal() + .get_miniblock_range_of_l1_batch(input.block_number - 1) + .expect("L1 batch should contain at least one miniblock"); + let db_storage_provider = DbStorageProvider::new(connection, last_miniblock_number, true); + let mut tree = PrecalculatedMerklePathsProvider::new( + input.merkle_paths_input, + input.previous_block_hash.0, + ); + + let storage_ptr: &mut dyn vm::storage::Storage = &mut StorageView::new(db_storage_provider); + let storage_oracle = StorageOracle::::new(Rc::new(RefCell::new(storage_ptr))); + let memory = SimpleMemory::::default(); + let mut hasher = DefaultHasher::new(); + GEOMETRY_CONFIG.hash(&mut hasher); + vlog::info!( + "generating witness for block {} using geometry config hash: {}", + input.block_number.0, + hasher.finish() + ); + if config + .dump_arguments_for_blocks + .contains(&input.block_number.0) + { + save_run_with_fixed_params_args_to_gcs( + object_store, + input.block_number.0, + last_miniblock_number.0, + Address::zero(), + BOOTLOADER_ADDRESS, + bootloader_code.clone(), + bootloader_contents.clone(), + false, + account_code_hash, + used_bytecodes.clone(), + Vec::default(), + MAX_CYCLES_FOR_TX as usize, + GEOMETRY_CONFIG, + tree.clone(), + ); + } + + zksync_types::zkevm_test_harness::external_calls::run_with_fixed_params( + Address::zero(), + BOOTLOADER_ADDRESS, + bootloader_code, + bootloader_contents, + false, + account_code_hash, + used_bytecodes, + Vec::default(), + MAX_CYCLES_FOR_TX as usize, + GEOMETRY_CONFIG, + storage_oracle, + memory, + &mut tree, + ) +} + +#[allow(clippy::too_many_arguments)] +fn save_run_with_fixed_params_args_to_gcs( + object_store: &dyn ObjectStore, + l1_batch_number: u32, + last_miniblock_number: u32, + caller: Address, + entry_point_address: Address, + entry_point_code: Vec<[u8; 32]>, + initial_heap_content: Vec, + zk_porter_is_available: bool, + default_aa_code_hash: U256, + used_bytecodes: HashMap>, + ram_verification_queries: Vec<(u32, U256)>, + cycle_limit: usize, + geometry: GeometryConfig, + tree: PrecalculatedMerklePathsProvider, +) { + let run_with_fixed_params_input = RunWithFixedParamsInput { + l1_batch_number, + last_miniblock_number, + caller, + entry_point_address, + entry_point_code, + initial_heap_content, + zk_porter_is_available, + default_aa_code_hash, + used_bytecodes, + ram_verification_queries, + cycle_limit, + geometry, + tree, + }; + object_store + .put(L1BatchNumber(l1_batch_number), &run_with_fixed_params_input) + .unwrap(); +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +struct RunWithFixedParamsInput { + l1_batch_number: u32, + last_miniblock_number: u32, + caller: Address, + entry_point_address: Address, + entry_point_code: Vec<[u8; 32]>, + initial_heap_content: Vec, + zk_porter_is_available: bool, + default_aa_code_hash: U256, + used_bytecodes: HashMap>, + ram_verification_queries: Vec<(u32, U256)>, + cycle_limit: usize, + geometry: GeometryConfig, + tree: PrecalculatedMerklePathsProvider, +} + +impl StoredObject for RunWithFixedParamsInput { + const BUCKET: Bucket = Bucket::WitnessInput; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("run_with_fixed_params_input_{}.bin", key) + } + + zksync_object_store::serialize_using_bincode!(); +} diff --git a/core/bin/witness_generator/src/leaf_aggregation.rs b/core/bin/witness_generator/src/leaf_aggregation.rs new file mode 100644 index 000000000000..8d4ca0391909 --- /dev/null +++ b/core/bin/witness_generator/src/leaf_aggregation.rs @@ -0,0 +1,291 @@ +use std::collections::HashMap; +use std::time::Instant; + +use async_trait::async_trait; + +use crate::utils::{save_prover_input_artifacts, track_witness_generation_stage}; +use zksync_config::configs::WitnessGeneratorConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::{ + circuit::LEAF_SPLITTING_FACTOR, + proofs::{AggregationRound, PrepareLeafAggregationCircuitsJob, WitnessGeneratorJobMetadata}, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, + bellman::plonk::better_better_cs::setup::VerificationKey, + encodings::recursion_request::RecursionRequest, encodings::QueueSimulator, witness, + witness::oracle::VmWitnessOracle, LeafAggregationOutputDataWitness, + }, + L1BatchNumber, +}; +use zksync_verification_key_server::{ + get_ordered_vks_for_basic_circuits, get_vks_for_basic_circuits, get_vks_for_commitment, +}; + +pub struct LeafAggregationArtifacts { + leaf_layer_subqueues: Vec, 2, 2>>, + aggregation_outputs: Vec>, + leaf_circuits: Vec>>, +} + +#[derive(Debug)] +struct BlobUrls { + leaf_layer_subqueues_url: String, + aggregation_outputs_url: String, + circuit_types_and_urls: Vec<(&'static str, String)>, +} + +#[derive(Clone)] +pub struct LeafAggregationWitnessGeneratorJob { + block_number: L1BatchNumber, + job: PrepareLeafAggregationCircuitsJob, +} + +#[derive(Debug)] +pub struct LeafAggregationWitnessGenerator { + config: WitnessGeneratorConfig, + object_store: Box, +} + +impl LeafAggregationWitnessGenerator { + pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + config, + object_store: store_factory.create_store(), + } + } + + fn process_job_sync( + leaf_job: LeafAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> LeafAggregationArtifacts { + let LeafAggregationWitnessGeneratorJob { block_number, job } = leaf_job; + + vlog::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::LeafAggregation, + block_number.0 + ); + process_leaf_aggregation_job(started_at, block_number, job) + } + + fn get_artifacts( + &self, + metadata: WitnessGeneratorJobMetadata, + ) -> LeafAggregationWitnessGeneratorJob { + let basic_circuits = self.object_store.get(metadata.block_number).unwrap(); + let basic_circuits_inputs = self.object_store.get(metadata.block_number).unwrap(); + + LeafAggregationWitnessGeneratorJob { + block_number: metadata.block_number, + job: PrepareLeafAggregationCircuitsJob { + basic_circuits_inputs, + basic_circuits_proofs: metadata.proofs, + basic_circuits, + }, + } + } + + fn save_artifacts( + &self, + block_number: L1BatchNumber, + artifacts: LeafAggregationArtifacts, + ) -> BlobUrls { + let leaf_layer_subqueues_url = self + .object_store + .put(block_number, &artifacts.leaf_layer_subqueues) + .unwrap(); + let aggregation_outputs_url = self + .object_store + .put(block_number, &artifacts.aggregation_outputs) + .unwrap(); + let circuit_types_and_urls = save_prover_input_artifacts( + block_number, + &artifacts.leaf_circuits, + &*self.object_store, + AggregationRound::LeafAggregation, + ); + BlobUrls { + leaf_layer_subqueues_url, + aggregation_outputs_url, + circuit_types_and_urls, + } + } +} + +#[async_trait] +impl JobProcessor for LeafAggregationWitnessGenerator { + type Job = LeafAggregationWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = LeafAggregationArtifacts; + + const SERVICE_NAME: &'static str = "leaf_aggregation_witness_generator"; + + async fn get_next_job( + &self, + connection_pool: ConnectionPool, + ) -> Option<(Self::JobId, Self::Job)> { + let mut connection = connection_pool.access_storage_blocking(); + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + + match connection + .witness_generator_dal() + .get_next_leaf_aggregation_witness_job( + self.config.witness_generation_timeout(), + self.config.max_attempts, + last_l1_batch_to_process, + ) { + Some(metadata) => { + let job = self.get_artifacts(metadata); + Some((job.block_number, job)) + } + None => None, + } + } + + async fn save_failure( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + error: String, + ) { + connection_pool + .access_storage_blocking() + .witness_generator_dal() + .mark_witness_job_as_failed( + job_id, + AggregationRound::LeafAggregation, + started_at.elapsed(), + error, + self.config.max_attempts, + ); + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _connection_pool: ConnectionPool, + job: LeafAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle { + tokio::task::spawn_blocking(move || Self::process_job_sync(job, started_at)) + } + + async fn save_result( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: LeafAggregationArtifacts, + ) { + let leaf_circuits_len = artifacts.leaf_circuits.len(); + let blob_urls = self.save_artifacts(job_id, artifacts); + update_database( + connection_pool, + started_at, + job_id, + leaf_circuits_len, + blob_urls, + ); + } +} + +fn process_leaf_aggregation_job( + started_at: Instant, + block_number: L1BatchNumber, + job: PrepareLeafAggregationCircuitsJob, +) -> LeafAggregationArtifacts { + let stage_started_at = Instant::now(); + + let verification_keys: HashMap< + u8, + VerificationKey>>, + > = get_vks_for_basic_circuits(); + + vlog::info!( + "Verification keys loaded in {:?}", + stage_started_at.elapsed() + ); + + // we need the list of vks that matches the list of job.basic_circuit_proofs + let vks_for_aggregation: Vec< + VerificationKey>>, + > = get_ordered_vks_for_basic_circuits(&job.basic_circuits, &verification_keys); + + let (all_vk_committments, set_committment, g2_points) = + witness::recursive_aggregation::form_base_circuits_committment(get_vks_for_commitment( + verification_keys, + )); + + vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); + + let stage_started_at = Instant::now(); + + let (leaf_layer_subqueues, aggregation_outputs, leaf_circuits) = + witness::recursive_aggregation::prepare_leaf_aggregations( + job.basic_circuits, + job.basic_circuits_inputs, + job.basic_circuits_proofs, + vks_for_aggregation, + LEAF_SPLITTING_FACTOR, + all_vk_committments, + set_committment, + g2_points, + ); + + vlog::info!( + "prepare_leaf_aggregations took {:?}", + stage_started_at.elapsed() + ); + vlog::info!( + "Leaf witness generation for block {} is complete in {:?}. Number of circuits: {}", + block_number.0, + started_at.elapsed(), + leaf_circuits.len() + ); + + LeafAggregationArtifacts { + leaf_layer_subqueues, + aggregation_outputs, + leaf_circuits, + } +} + +fn update_database( + connection_pool: ConnectionPool, + started_at: Instant, + block_number: L1BatchNumber, + leaf_circuits_len: usize, + blob_urls: BlobUrls, +) { + let mut connection = connection_pool.access_storage_blocking(); + let mut transaction = connection.start_transaction_blocking(); + + // inserts artifacts into the node_aggregation_witness_jobs table + // and advances it to waiting_for_proofs status + transaction + .witness_generator_dal() + .save_leaf_aggregation_artifacts( + block_number, + leaf_circuits_len, + &blob_urls.leaf_layer_subqueues_url, + &blob_urls.aggregation_outputs_url, + ); + transaction.prover_dal().insert_prover_jobs( + block_number, + blob_urls.circuit_types_and_urls, + AggregationRound::LeafAggregation, + ); + transaction + .witness_generator_dal() + .mark_witness_job_as_successful( + block_number, + AggregationRound::LeafAggregation, + started_at.elapsed(), + ); + + transaction.commit_blocking(); + track_witness_generation_stage(started_at, AggregationRound::LeafAggregation); +} diff --git a/core/bin/witness_generator/src/main.rs b/core/bin/witness_generator/src/main.rs new file mode 100644 index 000000000000..a6a6622be4a5 --- /dev/null +++ b/core/bin/witness_generator/src/main.rs @@ -0,0 +1,100 @@ +use std::time::Instant; + +use futures::StreamExt; +use prometheus_exporter::run_prometheus_exporter; +use zksync_config::configs::WitnessGeneratorConfig; +use zksync_config::ZkSyncConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStoreFactory; +use zksync_prover_utils::{get_stop_signal_receiver, wait_for_tasks}; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::proofs::AggregationRound; + +use crate::basic_circuits::BasicWitnessGenerator; +use crate::leaf_aggregation::LeafAggregationWitnessGenerator; +use crate::node_aggregation::NodeAggregationWitnessGenerator; +use crate::scheduler::SchedulerWitnessGenerator; +use structopt::StructOpt; + +mod basic_circuits; +mod leaf_aggregation; +mod node_aggregation; +mod precalculated; +mod scheduler; +mod utils; + +#[derive(Debug, StructOpt)] +#[structopt( + name = "Run witness generator for different aggregation round", + about = "Component for generating witness" +)] +struct Opt { + /// Number of times witness generator should be run. + #[structopt(short = "b", long = "batch_size")] + batch_size: Option, + /// aggregation round for the witness generator. + #[structopt(short = "r", long = "round")] + round: AggregationRound, +} + +#[tokio::main] +async fn main() { + let opt = Opt::from_args(); + let _sentry_guard = vlog::init(); + let connection_pool = ConnectionPool::new(None, true); + let zksync_config = ZkSyncConfig::from_env(); + let (stop_sender, stop_receiver) = tokio::sync::watch::channel::(false); + let started_at = Instant::now(); + vlog::info!( + "initializing the {:?} witness generator, batch size: {:?}", + opt.round, + opt.batch_size + ); + let use_push_gateway = opt.batch_size.is_some(); + + let config = WitnessGeneratorConfig::from_env(); + let store_factory = ObjectStoreFactory::from_env(); + let witness_generator_task = match opt.round { + AggregationRound::BasicCircuits => { + let generator = BasicWitnessGenerator::new(config, &store_factory); + generator.run(connection_pool, stop_receiver, opt.batch_size) + } + AggregationRound::LeafAggregation => { + let generator = LeafAggregationWitnessGenerator::new(config, &store_factory); + generator.run(connection_pool, stop_receiver, opt.batch_size) + } + AggregationRound::NodeAggregation => { + let generator = NodeAggregationWitnessGenerator::new(config, &store_factory); + generator.run(connection_pool, stop_receiver, opt.batch_size) + } + AggregationRound::Scheduler => { + let generator = SchedulerWitnessGenerator::new(config, &store_factory); + generator.run(connection_pool, stop_receiver, opt.batch_size) + } + }; + + let witness_generator_task = tokio::spawn(witness_generator_task); + vlog::info!( + "initialized {:?} witness generator in {:?}", + opt.round, + started_at.elapsed() + ); + metrics::gauge!( + "server.init.latency", + started_at.elapsed(), + "stage" => format!("witness_generator_{:?}", opt.round) + ); + let tasks = vec![ + run_prometheus_exporter(zksync_config.api.prometheus, use_push_gateway), + witness_generator_task, + ]; + + let mut stop_signal_receiver = get_stop_signal_receiver(); + tokio::select! { + _ = wait_for_tasks(tasks) => {}, + _ = stop_signal_receiver.next() => { + vlog::info!("Stop signal received, shutting down"); + }, + } + let _ = stop_sender.send(true); +} diff --git a/core/bin/witness_generator/src/node_aggregation.rs b/core/bin/witness_generator/src/node_aggregation.rs new file mode 100644 index 000000000000..3179db434943 --- /dev/null +++ b/core/bin/witness_generator/src/node_aggregation.rs @@ -0,0 +1,333 @@ +use std::collections::HashMap; +use std::env; +use std::time::Instant; + +use async_trait::async_trait; + +use crate::utils::{save_prover_input_artifacts, track_witness_generation_stage}; +use zksync_config::configs::WitnessGeneratorConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::{ + circuit::{ + LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, NODE_SPLITTING_FACTOR, + }, + proofs::{AggregationRound, PrepareNodeAggregationCircuitJob, WitnessGeneratorJobMetadata}, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::bn256::Bn256, + bellman::plonk::better_better_cs::setup::VerificationKey, + ff::to_hex, + witness::{ + self, + oracle::VmWitnessOracle, + recursive_aggregation::{erase_vk_type, padding_aggregations}, + }, + NodeAggregationOutputDataWitness, + }, + L1BatchNumber, +}; +use zksync_verification_key_server::{ + get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, +}; + +pub struct NodeAggregationArtifacts { + final_node_aggregation: NodeAggregationOutputDataWitness, + node_circuits: Vec>>, +} + +#[derive(Debug)] +struct BlobUrls { + node_aggregations_url: String, + circuit_types_and_urls: Vec<(&'static str, String)>, +} + +#[derive(Clone)] +pub struct NodeAggregationWitnessGeneratorJob { + block_number: L1BatchNumber, + job: PrepareNodeAggregationCircuitJob, +} + +#[derive(Debug)] +pub struct NodeAggregationWitnessGenerator { + config: WitnessGeneratorConfig, + object_store: Box, +} + +impl NodeAggregationWitnessGenerator { + pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + config, + object_store: store_factory.create_store(), + } + } + + fn process_job_sync( + node_job: NodeAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> NodeAggregationArtifacts { + let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); + let NodeAggregationWitnessGeneratorJob { block_number, job } = node_job; + + vlog::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::NodeAggregation, + block_number.0 + ); + process_node_aggregation_job(config, started_at, block_number, job) + } + + fn get_artifacts( + &self, + metadata: WitnessGeneratorJobMetadata, + ) -> NodeAggregationWitnessGeneratorJob { + let leaf_layer_subqueues = self + .object_store + .get(metadata.block_number) + .expect("leaf_layer_subqueues not found in queued `node_aggregation_witness_jobs` job"); + let aggregation_outputs = self + .object_store + .get(metadata.block_number) + .expect("aggregation_outputs not found in queued `node_aggregation_witness_jobs` job"); + + NodeAggregationWitnessGeneratorJob { + block_number: metadata.block_number, + job: PrepareNodeAggregationCircuitJob { + previous_level_proofs: metadata.proofs, + previous_level_leafs_aggregations: aggregation_outputs, + previous_sequence: leaf_layer_subqueues, + }, + } + } + + fn save_artifacts( + &self, + block_number: L1BatchNumber, + artifacts: NodeAggregationArtifacts, + ) -> BlobUrls { + let node_aggregations_url = self + .object_store + .put(block_number, &artifacts.final_node_aggregation) + .unwrap(); + let circuit_types_and_urls = save_prover_input_artifacts( + block_number, + &artifacts.node_circuits, + &*self.object_store, + AggregationRound::NodeAggregation, + ); + BlobUrls { + node_aggregations_url, + circuit_types_and_urls, + } + } +} + +#[async_trait] +impl JobProcessor for NodeAggregationWitnessGenerator { + type Job = NodeAggregationWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = NodeAggregationArtifacts; + + const SERVICE_NAME: &'static str = "node_aggregation_witness_generator"; + + async fn get_next_job( + &self, + connection_pool: ConnectionPool, + ) -> Option<(Self::JobId, Self::Job)> { + let mut connection = connection_pool.access_storage_blocking(); + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + + match connection + .witness_generator_dal() + .get_next_node_aggregation_witness_job( + self.config.witness_generation_timeout(), + self.config.max_attempts, + last_l1_batch_to_process, + ) { + Some(metadata) => { + let job = self.get_artifacts(metadata); + return Some((job.block_number, job)); + } + None => None, + } + } + + async fn save_failure( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + error: String, + ) { + connection_pool + .access_storage_blocking() + .witness_generator_dal() + .mark_witness_job_as_failed( + job_id, + AggregationRound::NodeAggregation, + started_at.elapsed(), + error, + self.config.max_attempts, + ); + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _connection_pool: ConnectionPool, + job: NodeAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle { + tokio::task::spawn_blocking(move || Self::process_job_sync(job, started_at)) + } + + async fn save_result( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: NodeAggregationArtifacts, + ) { + let blob_urls = self.save_artifacts(job_id, artifacts); + update_database(connection_pool, started_at, job_id, blob_urls); + } +} + +pub fn process_node_aggregation_job( + config: WitnessGeneratorConfig, + started_at: Instant, + block_number: L1BatchNumber, + job: PrepareNodeAggregationCircuitJob, +) -> NodeAggregationArtifacts { + let stage_started_at = Instant::now(); + zksync_prover_utils::ensure_initial_setup_keys_present( + &config.initial_setup_key_path, + &config.key_download_url, + ); + env::set_var("CRS_FILE", config.initial_setup_key_path); + vlog::info!("Keys loaded in {:?}", stage_started_at.elapsed()); + let stage_started_at = Instant::now(); + + let verification_keys: HashMap< + u8, + VerificationKey>>, + > = get_vks_for_basic_circuits(); + + let padding_aggregations = padding_aggregations(NODE_SPLITTING_FACTOR); + + let (_, set_committment, g2_points) = + witness::recursive_aggregation::form_base_circuits_committment(get_vks_for_commitment( + verification_keys, + )); + + let node_aggregation_vk = get_vk_for_circuit_type(NODE_CIRCUIT_INDEX); + + let leaf_aggregation_vk = get_vk_for_circuit_type(LEAF_CIRCUIT_INDEX); + + let (_, leaf_aggregation_vk_committment) = + witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( + leaf_aggregation_vk.clone(), + )); + + let (_, node_aggregation_vk_committment) = + witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( + node_aggregation_vk, + )); + + vlog::info!( + "commitments: basic set: {:?}, leaf: {:?}, node: {:?}", + to_hex(&set_committment), + to_hex(&leaf_aggregation_vk_committment), + to_hex(&node_aggregation_vk_committment) + ); + vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); + + // fs::write("previous_level_proofs.bincode", bincode::serialize(&job.previous_level_proofs).unwrap()).unwrap(); + // fs::write("leaf_aggregation_vk.bincode", bincode::serialize(&leaf_aggregation_vk).unwrap()).unwrap(); + // fs::write("previous_level_leafs_aggregations.bincode", bincode::serialize(&job.previous_level_leafs_aggregations).unwrap()).unwrap(); + // fs::write("previous_sequence.bincode", bincode::serialize(&job.previous_sequence).unwrap()).unwrap(); + // fs::write("padding_aggregations.bincode", bincode::serialize(&padding_aggregations).unwrap()).unwrap(); + // fs::write("set_committment.bincode", bincode::serialize(&set_committment).unwrap()).unwrap(); + // fs::write("node_aggregation_vk_committment.bincode", bincode::serialize(&node_aggregation_vk_committment).unwrap()).unwrap(); + // fs::write("leaf_aggregation_vk_committment.bincode", bincode::serialize(&leaf_aggregation_vk_committment).unwrap()).unwrap(); + // fs::write("g2_points.bincode", bincode::serialize(&g2_points).unwrap()).unwrap(); + + let stage_started_at = Instant::now(); + let (_, final_node_aggregations, node_circuits) = + zksync_types::zkevm_test_harness::witness::recursive_aggregation::prepare_node_aggregations( + job.previous_level_proofs, + leaf_aggregation_vk, + true, + 0, + job.previous_level_leafs_aggregations, + Vec::default(), + job.previous_sequence, + LEAF_SPLITTING_FACTOR, + NODE_SPLITTING_FACTOR, + padding_aggregations, + set_committment, + node_aggregation_vk_committment, + leaf_aggregation_vk_committment, + g2_points, + ); + + vlog::info!( + "prepare_node_aggregations took {:?}", + stage_started_at.elapsed() + ); + + assert_eq!( + node_circuits.len(), + 1, + "prepare_node_aggregations returned more than one circuit" + ); + assert_eq!( + final_node_aggregations.len(), + 1, + "prepare_node_aggregations returned more than one node aggregation" + ); + + vlog::info!( + "Node witness generation for block {} is complete in {:?}. Number of circuits: {}", + block_number.0, + started_at.elapsed(), + node_circuits.len() + ); + + NodeAggregationArtifacts { + final_node_aggregation: final_node_aggregations.into_iter().next().unwrap(), + node_circuits, + } +} + +fn update_database( + connection_pool: ConnectionPool, + started_at: Instant, + block_number: L1BatchNumber, + blob_urls: BlobUrls, +) { + let mut connection = connection_pool.access_storage_blocking(); + let mut transaction = connection.start_transaction_blocking(); + + // inserts artifacts into the scheduler_witness_jobs table + // and advances it to waiting_for_proofs status + transaction + .witness_generator_dal() + .save_node_aggregation_artifacts(block_number, &blob_urls.node_aggregations_url); + transaction.prover_dal().insert_prover_jobs( + block_number, + blob_urls.circuit_types_and_urls, + AggregationRound::NodeAggregation, + ); + transaction + .witness_generator_dal() + .mark_witness_job_as_successful( + block_number, + AggregationRound::NodeAggregation, + started_at.elapsed(), + ); + + transaction.commit_blocking(); + track_witness_generation_stage(started_at, AggregationRound::NodeAggregation); +} diff --git a/core/bin/witness_generator/src/precalculated/mod.rs b/core/bin/witness_generator/src/precalculated/mod.rs new file mode 100644 index 000000000000..cc3cc24376fb --- /dev/null +++ b/core/bin/witness_generator/src/precalculated/mod.rs @@ -0,0 +1,264 @@ +use serde::{Deserialize, Serialize}; + +use std::collections::VecDeque; + +use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_types::zkevm_test_harness::blake2::Blake2s256; +use zksync_types::zkevm_test_harness::witness::tree::BinaryHasher; +use zksync_types::zkevm_test_harness::witness::tree::{ + BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, +}; + +#[cfg(test)] +mod tests; + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct PrecalculatedMerklePathsProvider { + // We keep the root hash of the last processed leaf, as it is needed by the the witness generator. + root_hash: [u8; 32], + // The list of expected leaves to be interacted with. + pending_leaves: VecDeque, + // The index that would be assigned to the next new leaf. + next_enumeration_index: u64, + // For every Storage Write Log we expect two invocations: `get_leaf` and `insert_leaf`. + // We set this flag to `true` after the initial `get_leaf` is invoked. + is_get_leaf_invoked: bool, +} + +impl PrecalculatedMerklePathsProvider { + pub fn new(input: PrepareBasicCircuitsJob, root_hash: [u8; 32]) -> Self { + let next_enumeration_index = input.next_enumeration_index(); + vlog::debug!( + "Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, \ + initial next_enumeration_index: {:?}", + root_hash, + next_enumeration_index + ); + + Self { + root_hash, + pending_leaves: input.into_merkle_paths().collect(), + next_enumeration_index, + is_get_leaf_invoked: false, + } + } +} + +impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> + for PrecalculatedMerklePathsProvider +{ + fn empty() -> Self { + unreachable!("`empty` must not be invoked by the witness generator code"); + } + + fn next_enumeration_index(&self) -> u64 { + self.next_enumeration_index + } + + fn set_next_enumeration_index(&mut self, _value: u64) { + unreachable!( + "`set_next_enumeration_index` must not be invoked by the witness generator code" + ); + } + + fn root(&self) -> [u8; 32] { + self.root_hash + } + + fn get_leaf(&mut self, index: &[u8; 32]) -> LeafQuery<256, 32, 32, 32, ZkSyncStorageLeaf> { + vlog::trace!( + "Invoked get_leaf({:?}). pending leaves size: {:?}. current root: {:?}", + index, + self.pending_leaves.len(), + self.root() + ); + assert!( + !self.is_get_leaf_invoked, + "`get_leaf()` invoked more than once or get_leaf is invoked when insert_leaf was expected" + ); + let next = self.pending_leaves.front().unwrap_or_else(|| { + panic!( + "invoked `get_leaf({:?})` with empty `pending_leaves`", + index + ) + }); + self.root_hash = next.root_hash; + + assert_eq!( + &next.leaf_hashed_key_array(), + index, + "`get_leaf` hashed key mismatch" + ); + + let mut res = LeafQuery { + leaf: ZkSyncStorageLeaf { + index: next.leaf_enumeration_index, + value: next.value_read, + }, + first_write: next.first_write, + index: *index, + merkle_path: next.clone().into_merkle_paths_array(), + }; + + if next.is_write { + // If it is a write, the next invocation will be `insert_leaf` with the very same parameters + self.is_get_leaf_invoked = true; + if res.first_write { + res.leaf.index = 0; + } + } else { + // If it is a read, the next invocation will relate to the next `pending_leaf` + self.pending_leaves.pop_front(); + }; + + res + } + + fn insert_leaf( + &mut self, + index: &[u8; 32], + leaf: ZkSyncStorageLeaf, + ) -> LeafQuery<256, 32, 32, 32, ZkSyncStorageLeaf> { + vlog::trace!( + "Invoked insert_leaf({:?}). pending leaves size: {:?}. current root: {:?}", + index, + self.pending_leaves.len(), + self.root() + ); + + assert!( + self.is_get_leaf_invoked, + "`get_leaf()` is expected to be invoked before `insert_leaf()`" + ); + let next = self.pending_leaves.pop_front().unwrap(); + self.root_hash = next.root_hash; + + assert!( + next.is_write, + "invoked `insert_leaf({:?})`, but get_leaf() expected", + index + ); + + assert_eq!( + &next.leaf_hashed_key_array(), + index, + "insert_leaf hashed key mismatch", + ); + + assert_eq!( + &next.value_written, &leaf.value, + "insert_leaf enumeration index mismatch", + ); + + // reset is_get_leaf_invoked for the next get/insert invocation + self.is_get_leaf_invoked = false; + + // if this insert was in fact the very first insert, it should bump the `next_enumeration_index` + self.next_enumeration_index = self + .next_enumeration_index + .max(next.leaf_enumeration_index + 1); + + LeafQuery { + leaf: ZkSyncStorageLeaf { + index: next.leaf_enumeration_index, + value: next.value_written, + }, + first_write: next.first_write, + index: *index, + merkle_path: next.into_merkle_paths_array(), + } + } + + // Method to segregate the given leafs into 2 types: + // * leafs that are updated for first time + // * leafs that are not updated for the first time. + // The consumer of method must ensure that the length of passed argument indexes and leafs are same, + // and the merkle paths specified during the initialization must contains same number of write + // leaf nodes as that of the leafs passed as argument. + fn filter_renumerate<'a>( + &self, + mut indexes: impl Iterator, + mut leafs: impl Iterator, + ) -> ( + u64, + Vec<([u8; 32], ZkSyncStorageLeaf)>, + Vec, + ) { + vlog::trace!( + "invoked filter_renumerate(), pending leaves size: {:?}", + self.pending_leaves.len() + ); + let mut first_writes = vec![]; + let mut updates = vec![]; + + let mut write_pending_leaves = self.pending_leaves.iter().filter(|log| log.is_write); + let write_pending_leaves = &mut write_pending_leaves; + for ((pending_leaf, idx), mut leaf) in + write_pending_leaves.zip(&mut indexes).zip(&mut leafs) + { + leaf.set_index(pending_leaf.leaf_enumeration_index); + if pending_leaf.first_write { + first_writes.push((*idx, leaf)); + } else { + updates.push(leaf); + } + } + + let length = first_writes.len() + updates.len(); + assert!( + write_pending_leaves.next().is_none(), + "pending leaves: len({}) must be of same length as leafs and indexes: len({})", + write_pending_leaves.count() + 1 + length, + // ^ 1 is added because of `next()` getting called in the assertion condition + length + ); + assert!( + indexes.next().is_none(), + "indexes must be of same length as leafs and pending leaves: len({})", + length + ); + assert!( + leafs.next().is_none(), + "leafs must be of same length as indexes and pending leaves: len({})", + length + ); + (self.next_enumeration_index, first_writes, updates) + } + + fn verify_inclusion( + root: &[u8; 32], + query: &LeafQuery<256, 32, 32, 32, ZkSyncStorageLeaf>, + ) -> bool { + //copied from zkevm_test_harness/src/witness/tree/mod.rs with minor changes + vlog::trace!( + "invoked verify_inclusion. Index: {:?}, root: {:?})", + query.index, + root + ); + + let mut leaf_bytes = [0_u8; 8 + 32]; + leaf_bytes[8..].copy_from_slice(query.leaf.value()); + let leaf_index_bytes = query.leaf.current_index().to_be_bytes(); + leaf_bytes[0..8].copy_from_slice(&leaf_index_bytes); + let leaf_hash = Blake2s256::leaf_hash(&leaf_bytes); + + let mut current_hash = leaf_hash; + for level in 0..256 { + let (lhs, rhs) = if is_right_side_node(&query.index, level) { + (&query.merkle_path[level], ¤t_hash) + } else { + (¤t_hash, &query.merkle_path[level]) + }; + current_hash = Blake2s256::node_hash(level, lhs, rhs); + } + *root == current_hash + } +} + +fn is_right_side_node(index: &[u8], depth: usize) -> bool { + debug_assert!(depth < index.len() * 8); + let byte_idx = depth / 8; + let bit_idx = depth % 8; + + index[byte_idx] & (1u8 << bit_idx) != 0 +} diff --git a/core/bin/witness_generator/src/precalculated/tests.rs b/core/bin/witness_generator/src/precalculated/tests.rs new file mode 100644 index 000000000000..3d250097900e --- /dev/null +++ b/core/bin/witness_generator/src/precalculated/tests.rs @@ -0,0 +1,221 @@ +use const_decoder::Decoder::Hex; + +use std::iter; + +use super::PrecalculatedMerklePathsProvider; +use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_types::zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZkSyncStorageLeaf}; +use zksync_types::U256; + +// Sample `StorageLogMetadata` entries. Since we cannot allocate in constants, we store +// the only Merkle path hash separately. +const LOGS_AND_PATHS: [(StorageLogMetadata, [u8; 32]); 3] = [ + generate_storage_log_metadata( + b"DDC60818D8F7CFE42514F8EA3CC52806DDC60818D8F7CFE42514F8EA3CC52806", + b"12E9FF974B0FAEE514AD4AC50E2BDC6E12E9FF974B0FAEE514AD4AC50E2BDC6E", + false, + false, + 1, + ), + generate_storage_log_metadata( + b"BDA1617CC883E2251D3BE0FD9B3F3064BDA1617CC883E2251D3BE0FD9B3F3064", + b"D14917FCB067922F92322025D1BA50B4D14917FCB067922F92322025D1BA50B4", + true, + true, + 2, + ), + generate_storage_log_metadata( + b"77F035AD50811CFABD956F6F1B48E48277F035AD50811CFABD956F6F1B48E482", + b"7CF33B959916CC9B56F21C427ED7CA187CF33B959916CC9B56F21C427ED7CA18", + true, + true, + 3, + ), +]; + +const LEAFS: [ZkSyncStorageLeaf; 2] = [ + generate_leaf( + 1, + b"AD558076F725ED8B5E5B42920422E9BEAD558076F725ED8B5E5B42920422E9BE", + ), + generate_leaf( + 1, + b"98A0EADBD6118391B744252DA348873C98A0EADBD6118391B744252DA348873C", + ), +]; + +const INDICES: [[u8; 32]; 2] = [ + Hex.decode(b"5534D106E0B590953AC0FC7D65CA3B2E5534D106E0B590953AC0FC7D65CA3B2E"), + Hex.decode(b"00309D72EF0AD9786DA9044109E1704B00309D72EF0AD9786DA9044109E1704B"), +]; + +const fn generate_leaf(index: u64, value: &[u8]) -> ZkSyncStorageLeaf { + ZkSyncStorageLeaf { + index, + value: Hex.decode(value), + } +} + +const fn generate_storage_log_metadata( + root_hash: &[u8], + merkle_path: &[u8], + is_write: bool, + first_write: bool, + leaf_enumeration_index: u64, +) -> (StorageLogMetadata, [u8; 32]) { + let log = StorageLogMetadata { + root_hash: Hex.decode(root_hash), + is_write, + first_write, + merkle_paths: Vec::new(), + leaf_hashed_key: U256::zero(), + leaf_enumeration_index, + value_written: [0; 32], + value_read: [0; 32], + }; + (log, Hex.decode(merkle_path)) +} + +fn create_provider() -> PrecalculatedMerklePathsProvider { + let mut job = PrepareBasicCircuitsJob::new(4); + for (mut log, merkle_path) in LOGS_AND_PATHS { + log.merkle_paths = vec![merkle_path]; + job.push_merkle_path(log); + } + PrecalculatedMerklePathsProvider::new(job, [0_u8; 32]) +} + +#[test] +fn test_filter_renumerate_all_first_writes() { + let mut provider = create_provider(); + for log in &mut provider.pending_leaves { + log.first_write = true; + } + + let (_, first_writes, updates) = + provider.filter_renumerate(INDICES.iter(), LEAFS.iter().copied()); + assert_eq!(2, first_writes.len()); + let (first_write_index, first_write_leaf) = first_writes[0]; + assert_eq!(first_write_index, INDICES[0]); + assert_eq!(first_write_leaf.value, LEAFS[0].value); + assert_eq!(0, updates.len()); +} + +#[test] +fn test_filter_renumerate_all_repeated_writes() { + let mut provider = create_provider(); + for log in &mut provider.pending_leaves { + log.first_write = false; + } + + let (_, first_writes, updates) = + provider.filter_renumerate(INDICES.iter(), LEAFS.iter().copied()); + assert_eq!(0, first_writes.len()); + assert_eq!(2, updates.len()); + assert_eq!(updates[0].value, LEAFS[0].value); + assert_eq!(updates[1].value, LEAFS[1].value); +} + +#[test] +fn test_filter_renumerate_repeated_writes_with_first_write() { + let mut provider = create_provider(); + for (i, log) in provider.pending_leaves.iter_mut().enumerate() { + log.first_write = i == 2; + } + + let (_, first_writes, updates) = + provider.filter_renumerate(INDICES.iter(), LEAFS.iter().copied()); + assert_eq!(1, first_writes.len()); + assert_eq!(1, updates.len()); + assert_eq!(3, first_writes[0].1.index); + assert_eq!(2, updates[0].index); +} + +#[test] +#[should_panic(expected = "leafs must be of same length as indexes")] +fn test_filter_renumerate_panic_when_leafs_and_indices_are_of_different_length() { + const ANOTHER_LEAF: ZkSyncStorageLeaf = generate_leaf( + 2, + b"72868932BBB002043AF50363EEB65AE172868932BBB002043AF50363EEB65AE1", + ); + + let provider = create_provider(); + let leafs = LEAFS.iter().copied().chain([ANOTHER_LEAF]); + provider.filter_renumerate(INDICES.iter(), leafs); +} + +#[test] +#[should_panic(expected = "indexes must be of same length as leafs and pending leaves")] +fn test_filter_renumerate_panic_when_indices_and_pending_leaves_are_of_different_length() { + const ANOTHER_INDEX: [u8; 32] = + Hex.decode(b"930058748339A83E06F0D1D22937E92A930058748339A83E06F0D1D22937E92A"); + + let provider = create_provider(); + let indices = INDICES.iter().chain([&ANOTHER_INDEX]); + provider.filter_renumerate(indices, LEAFS.iter().copied()); +} + +#[test] +fn vec_and_vec_deque_serializations_are_compatible() { + let logs = create_provider().pending_leaves; + let serialized = bincode::serialize(&logs).unwrap(); + let logs_vec: Vec = bincode::deserialize(&serialized).unwrap(); + assert_eq!(logs, logs_vec); + let serialized_deque = bincode::serialize(&logs_vec).unwrap(); + assert_eq!(serialized_deque, serialized); +} + +#[test] +fn provider_serialization() { + let provider = create_provider(); + + let serialized = bincode::serialize(&provider).unwrap(); + // Check that logs are serialized in the natural order. + let needle = LOGS_AND_PATHS[0].0.root_hash; + let mut windows = serialized.windows(needle.len()); + windows.position(|window| *window == needle).unwrap(); + let needle = LOGS_AND_PATHS[1].0.root_hash; + windows.position(|window| *window == needle).unwrap(); + let needle = LOGS_AND_PATHS[2].0.root_hash; + windows.position(|window| *window == needle).unwrap(); + + let deserialized: PrecalculatedMerklePathsProvider = bincode::deserialize(&serialized).unwrap(); + assert_eq!(deserialized, provider); +} + +#[test] +fn initializing_provider_with_compacted_merkle_paths() { + let mut provider = create_provider(); + for log in &mut provider.pending_leaves { + let empty_merkle_paths = iter::repeat([0; 32]).take(255); + log.merkle_paths.splice(0..0, empty_merkle_paths); + } + + // First log entry: read + let query = provider.get_leaf(&[0; 32]); + assert!(!query.first_write); + assert!(query.merkle_path[0..255] + .iter() + .all(|hash| *hash == [0; 32])); + + // Second log entry: first write + let query = provider.get_leaf(&[0; 32]); + assert!(query.first_write); + assert!(query.merkle_path[0..255] + .iter() + .all(|hash| *hash == [0; 32])); + assert_ne!(query.merkle_path[255], [0; 32]); + + let query = provider.insert_leaf( + &[0; 32], + ZkSyncStorageLeaf { + index: 2, + value: [0; 32], + }, + ); + assert!(query.first_write); + assert!(query.merkle_path[0..255] + .iter() + .all(|hash| *hash == [0; 32])); + assert_ne!(query.merkle_path[255], [0; 32]); +} diff --git a/core/bin/witness_generator/src/scheduler.rs b/core/bin/witness_generator/src/scheduler.rs new file mode 100644 index 000000000000..46a40c7a3d37 --- /dev/null +++ b/core/bin/witness_generator/src/scheduler.rs @@ -0,0 +1,316 @@ +use std::collections::HashMap; +use std::slice; +use std::time::Instant; + +use async_trait::async_trait; + +use crate::utils::{save_prover_input_artifacts, track_witness_generation_stage}; +use zksync_config::configs::WitnessGeneratorConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::{ + circuit::{ + LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, NODE_SPLITTING_FACTOR, + }, + proofs::{AggregationRound, PrepareSchedulerCircuitJob, WitnessGeneratorJobMetadata}, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{bn256::Bn256, plonk::better_better_cs::setup::VerificationKey}, + sync_vm::scheduler::BlockApplicationWitness, + witness::{self, oracle::VmWitnessOracle, recursive_aggregation::erase_vk_type}, + }, + L1BatchNumber, +}; +use zksync_verification_key_server::{ + get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, +}; + +pub struct SchedulerArtifacts { + final_aggregation_result: BlockApplicationWitness, + scheduler_circuit: ZkSyncCircuit>, +} + +#[derive(Clone)] +pub struct SchedulerWitnessGeneratorJob { + block_number: L1BatchNumber, + job: PrepareSchedulerCircuitJob, +} + +#[derive(Debug)] +pub struct SchedulerWitnessGenerator { + config: WitnessGeneratorConfig, + object_store: Box, +} + +impl SchedulerWitnessGenerator { + pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + config, + object_store: store_factory.create_store(), + } + } + + fn process_job_sync( + scheduler_job: SchedulerWitnessGeneratorJob, + started_at: Instant, + ) -> SchedulerArtifacts { + let SchedulerWitnessGeneratorJob { block_number, job } = scheduler_job; + + vlog::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::Scheduler, + block_number.0 + ); + process_scheduler_job(started_at, block_number, job) + } + + fn get_artifacts( + &self, + metadata: WitnessGeneratorJobMetadata, + previous_aux_hash: [u8; 32], + previous_meta_hash: [u8; 32], + ) -> SchedulerWitnessGeneratorJob { + let scheduler_witness = self.object_store.get(metadata.block_number).unwrap(); + let final_node_aggregations = self.object_store.get(metadata.block_number).unwrap(); + + SchedulerWitnessGeneratorJob { + block_number: metadata.block_number, + job: PrepareSchedulerCircuitJob { + incomplete_scheduler_witness: scheduler_witness, + final_node_aggregations, + node_final_proof_level_proof: metadata.proofs.into_iter().next().unwrap(), + previous_aux_hash, + previous_meta_hash, + }, + } + } + + fn save_artifacts( + &self, + block_number: L1BatchNumber, + scheduler_circuit: &ZkSyncCircuit>, + ) -> Vec<(&'static str, String)> { + save_prover_input_artifacts( + block_number, + slice::from_ref(scheduler_circuit), + &*self.object_store, + AggregationRound::Scheduler, + ) + } +} + +#[async_trait] +impl JobProcessor for SchedulerWitnessGenerator { + type Job = SchedulerWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = SchedulerArtifacts; + + const SERVICE_NAME: &'static str = "scheduler_witness_generator"; + + async fn get_next_job( + &self, + connection_pool: ConnectionPool, + ) -> Option<(Self::JobId, Self::Job)> { + let mut connection = connection_pool.access_storage_blocking(); + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + + match connection + .witness_generator_dal() + .get_next_scheduler_witness_job( + self.config.witness_generation_timeout(), + self.config.max_attempts, + last_l1_batch_to_process, + ) { + Some(metadata) => { + let prev_metadata = connection + .blocks_dal() + .get_block_metadata(metadata.block_number - 1); + let previous_aux_hash = prev_metadata + .as_ref() + .map_or([0u8; 32], |e| e.metadata.aux_data_hash.0); + let previous_meta_hash = + prev_metadata.map_or([0u8; 32], |e| e.metadata.meta_parameters_hash.0); + let job = self.get_artifacts(metadata, previous_aux_hash, previous_meta_hash); + Some((job.block_number, job)) + } + None => None, + } + } + + async fn save_failure( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + error: String, + ) { + connection_pool + .access_storage_blocking() + .witness_generator_dal() + .mark_witness_job_as_failed( + job_id, + AggregationRound::Scheduler, + started_at.elapsed(), + error, + self.config.max_attempts, + ); + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _connection_pool: ConnectionPool, + job: SchedulerWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle { + tokio::task::spawn_blocking(move || Self::process_job_sync(job, started_at)) + } + + async fn save_result( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: SchedulerArtifacts, + ) { + let circuit_types_and_urls = self.save_artifacts(job_id, &artifacts.scheduler_circuit); + update_database( + connection_pool, + started_at, + job_id, + artifacts.final_aggregation_result, + circuit_types_and_urls, + ); + } +} + +pub fn process_scheduler_job( + started_at: Instant, + block_number: L1BatchNumber, + job: PrepareSchedulerCircuitJob, +) -> SchedulerArtifacts { + let stage_started_at = Instant::now(); + + let verification_keys: HashMap< + u8, + VerificationKey>>, + > = get_vks_for_basic_circuits(); + + let (_, set_committment, g2_points) = + witness::recursive_aggregation::form_base_circuits_committment(get_vks_for_commitment( + verification_keys, + )); + + vlog::info!( + "Verification keys loaded in {:?}", + stage_started_at.elapsed() + ); + + let leaf_aggregation_vk = get_vk_for_circuit_type(LEAF_CIRCUIT_INDEX); + + let node_aggregation_vk = get_vk_for_circuit_type(NODE_CIRCUIT_INDEX); + + let (_, leaf_aggregation_vk_committment) = + witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( + leaf_aggregation_vk, + )); + + let (_, node_aggregation_vk_committment) = + witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( + node_aggregation_vk.clone(), + )); + + vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); + let stage_started_at = Instant::now(); + + let (scheduler_circuit, final_aggregation_result) = + witness::recursive_aggregation::prepare_scheduler_circuit( + job.incomplete_scheduler_witness, + job.node_final_proof_level_proof, + node_aggregation_vk, + job.final_node_aggregations, + set_committment, + node_aggregation_vk_committment, + leaf_aggregation_vk_committment, + job.previous_aux_hash, + job.previous_meta_hash, + (LEAF_SPLITTING_FACTOR * NODE_SPLITTING_FACTOR) as u32, + g2_points, + ); + + vlog::info!( + "prepare_scheduler_circuit took {:?}", + stage_started_at.elapsed() + ); + + vlog::info!( + "Scheduler generation for block {} is complete in {:?}", + block_number.0, + started_at.elapsed() + ); + + SchedulerArtifacts { + final_aggregation_result, + scheduler_circuit, + } +} + +pub fn update_database( + connection_pool: ConnectionPool, + started_at: Instant, + block_number: L1BatchNumber, + final_aggregation_result: BlockApplicationWitness, + circuit_types_and_urls: Vec<(&'static str, String)>, +) { + let mut connection = connection_pool.access_storage_blocking(); + let mut transaction = connection.start_transaction_blocking(); + let block = transaction + .blocks_dal() + .get_block_metadata(block_number) + .expect("L1 batch should exist"); + + assert_eq!( + block.metadata.aux_data_hash.0, final_aggregation_result.aux_data_hash, + "Commitment for aux data is wrong" + ); + + assert_eq!( + block.metadata.pass_through_data_hash.0, final_aggregation_result.passthrough_data_hash, + "Commitment for pass through data is wrong" + ); + + assert_eq!( + block.metadata.meta_parameters_hash.0, final_aggregation_result.meta_data_hash, + "Commitment for metadata is wrong" + ); + + assert_eq!( + block.metadata.commitment.0, final_aggregation_result.block_header_hash, + "Commitment is wrong" + ); + + transaction.prover_dal().insert_prover_jobs( + block_number, + circuit_types_and_urls, + AggregationRound::Scheduler, + ); + + transaction + .witness_generator_dal() + .save_final_aggregation_result( + block_number, + final_aggregation_result.aggregation_result_coords, + ); + + transaction + .witness_generator_dal() + .mark_witness_job_as_successful( + block_number, + AggregationRound::Scheduler, + started_at.elapsed(), + ); + + transaction.commit_blocking(); + track_witness_generation_stage(started_at, AggregationRound::Scheduler); +} diff --git a/core/bin/witness_generator/src/utils.rs b/core/bin/witness_generator/src/utils.rs new file mode 100644 index 000000000000..5c8b3f4f14bf --- /dev/null +++ b/core/bin/witness_generator/src/utils.rs @@ -0,0 +1,61 @@ +use std::time::Instant; +use vm::zk_evm::ethereum_types::U256; +use zksync_config::configs::WitnessGeneratorConfig; +use zksync_object_store::{CircuitKey, ObjectStore}; +use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; +use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; +use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; +use zksync_types::USED_BOOTLOADER_MEMORY_BYTES; +use zksync_types::{proofs::AggregationRound, L1BatchNumber}; + +trait WitnessGenerator { + fn new(config: WitnessGeneratorConfig) -> Self; +} + +pub fn expand_bootloader_contents(packed: Vec<(usize, U256)>) -> Vec { + let mut result: Vec = Vec::new(); + result.resize(USED_BOOTLOADER_MEMORY_BYTES, 0); + + for (offset, value) in packed { + value.to_big_endian(&mut result[(offset * 32)..(offset + 1) * 32]); + } + + result.to_vec() +} + +pub fn save_prover_input_artifacts( + block_number: L1BatchNumber, + circuits: &[ZkSyncCircuit>], + object_store: &dyn ObjectStore, + aggregation_round: AggregationRound, +) -> Vec<(&'static str, String)> { + let types_and_urls = circuits + .iter() + .enumerate() + .map(|(sequence_number, circuit)| { + let circuit_type = circuit.short_description(); + let circuit_key = CircuitKey { + block_number, + sequence_number, + circuit_type, + aggregation_round, + }; + let blob_url = object_store.put(circuit_key, circuit).unwrap(); + (circuit_type, blob_url) + }); + types_and_urls.collect() +} + +pub fn track_witness_generation_stage(started_at: Instant, round: AggregationRound) { + let stage = match round { + AggregationRound::BasicCircuits => "basic_circuits", + AggregationRound::LeafAggregation => "leaf_aggregation", + AggregationRound::NodeAggregation => "node_aggregation", + AggregationRound::Scheduler => "scheduler", + }; + metrics::histogram!( + "server.witness_generator.processing_time", + started_at.elapsed(), + "stage" => format!("wit_gen_{}", stage) + ); +} diff --git a/core/bin/zksync_core/Cargo.toml b/core/bin/zksync_core/Cargo.toml index 3106fd55cc01..dcd6b5903ac6 100644 --- a/core/bin/zksync_core/Cargo.toml +++ b/core/bin/zksync_core/Cargo.toml @@ -26,6 +26,7 @@ zksync_circuit_breaker = { path = "../../lib/circuit_breaker", version = "1.0" } vm = { path = "../../lib/vm", version = "0.1.0" } zksync_storage = { path = "../../lib/storage", version = "1.0" } zksync_merkle_tree = { path = "../../lib/merkle_tree", version = "1.0" } +zksync_merkle_tree2 = { path = "../../lib/merkle_tree2", version = "1.0" } zksync_mini_merkle_tree = { path = "../../lib/mini_merkle_tree", version = "1.0" } zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server", version = "1.0" } prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } @@ -33,13 +34,15 @@ zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0", default-feat "server", "client" ] } zksync_object_store = { path = "../../lib/object_store", version = "1.0" } +zksync_health_check = { path = "../../lib/health_check", version = "0.1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } +zksync_db_storage_provider = { path = "../../lib/db_storage_provider", version = "1.0" } +clap = { version = "4.2.4", features = ["derive"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" metrics = "0.20" itertools = "0.10.3" -structopt = "0.3.20" once_cell = "1.7" ctrlc = { version = "3.1", features = ["termination"] } bincode = "1" @@ -51,7 +54,7 @@ chrono = { version = "0.4", features = ["serde", "rustc-serialize"] } anyhow = "1.0" thiserror = "1.0" async-trait = "0.1" -async-std = "1.12.0" +bitflags = "1.3.2" # API dependencies jsonrpc-core = { git = "https://github.com/matter-labs/jsonrpc.git", branch = "master" } diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox.rs b/core/bin/zksync_core/src/api_server/execution_sandbox.rs index 6e60b6c0b003..fe5d3e53e464 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox.rs @@ -1,29 +1,26 @@ -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::time::Instant; -use super::tx_sender::SubmitTxError; -use crate::api_server::web3::backend_jsonrpc::error::internal_error; use thiserror::Error; use tracing::{span, Level}; -use vm::oracles::tracer::{ValidationError, ValidationTracerParams}; -use zksync_types::api::BlockId; -use zksync_types::utils::storage_key_for_eth_balance; -use zksync_types::{PUBLISH_BYTECODE_OVERHEAD, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; -use crate::db_storage_provider::DbStorageProvider; +use vm::oracles::tracer::{ValidationError, ValidationTracerParams}; use vm::vm_with_bootloader::{ derive_base_fee_and_gas_per_pubdata, init_vm, push_transaction_to_bootloader_memory, BlockContext, BlockContextMode, BootloaderJobType, DerivedBlockContext, TxExecutionMode, }; use vm::zk_evm::block_properties::BlockProperties; use vm::{ - storage::Storage, utils::ETH_CALL_GAS_LIMIT, TxRevertReason, VmBlockResult, VmExecutionResult, - VmInstance, + storage::Storage, utils::ETH_CALL_GAS_LIMIT, TxRevertReason, VmExecutionResult, VmInstance, }; +use vm::{HistoryDisabled, HistoryMode}; use zksync_config::constants::ZKPORTER_IS_AVAILABLE; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_db_storage_provider::DbStorageProvider; use zksync_state::storage_view::StorageView; +use zksync_types::api::BlockId; +use zksync_types::utils::storage_key_for_eth_balance; use zksync_types::{ api, event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, @@ -32,13 +29,18 @@ use zksync_types::{ l2::L2Tx, storage_writes_deduplicator::StorageWritesDeduplicator, utils::{decompose_full_nonce, nonces_to_full_nonce}, - AccountTreeId, MiniblockNumber, Nonce, Transaction, U256, + AccountTreeId, MiniblockNumber, Nonce, StorageKey, Transaction, H256, U256, }; +use zksync_types::{PUBLISH_BYTECODE_OVERHEAD, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use zksync_utils::bytecode::{bytecode_len_in_bytes, hash_bytecode, CompressedBytecodeInfo}; use zksync_utils::time::millis_since_epoch; use zksync_utils::{h256_to_u256, u256_to_h256}; use zksync_web3_decl::error::Web3Error; +use crate::api_server::web3::backend_jsonrpc::error::internal_error; + +use super::tx_sender::SubmitTxError; + #[derive(Debug, Error)] pub enum SandboxExecutionError { #[error("Account validation failed: {0}")] @@ -54,13 +56,13 @@ pub enum SandboxExecutionError { #[error("Bootloader failure: {0}")] BootloaderFailure(String), #[error("Revert: {0}")] - Revert(String), + Revert(String, Vec), #[error("Failed to pay for the transaction: {0}")] FailedToPayForTransaction(String), #[error("Bootloader-based tx failed")] InnerTxError, #[error( - "Virtual machine entered unexpected state. Please contact developers and provide transaction details \ + "Virtual machine entered unexpected state. Please contact developers and provide transaction details \ that caused this error. Error description: {0}" )] UnexpectedVMBehavior(String), @@ -68,6 +70,7 @@ pub enum SandboxExecutionError { Unexecutable(String), } +#[allow(clippy::too_many_arguments)] pub fn execute_tx_eth_call( connection_pool: &ConnectionPool, mut tx: L2Tx, @@ -76,6 +79,8 @@ pub fn execute_tx_eth_call( fair_l2_gas_price: u64, enforced_base_fee: Option, base_system_contract: &BaseSystemContracts, + vm_execution_cache_misses_limit: Option, + trace_call: bool, ) -> Result { let mut storage = connection_pool.access_storage_blocking(); let resolved_block_number = storage @@ -94,7 +99,9 @@ pub fn execute_tx_eth_call( let vm_result = execute_tx_in_sandbox( storage, tx.into(), - TxExecutionMode::EthCall, + TxExecutionMode::EthCall { + missed_storage_invocation_limit: vm_execution_cache_misses_limit.unwrap_or(usize::MAX), + }, AccountTreeId::default(), block_id, resolved_block_number, @@ -106,11 +113,13 @@ pub fn execute_tx_eth_call( fair_l2_gas_price, enforced_base_fee, base_system_contract, + trace_call, + &mut Default::default(), ) .1 .map_err(|err| { let submit_tx_error: SubmitTxError = err.into(); - Web3Error::SubmitTransactionError(submit_tx_error.to_string()) + Web3Error::SubmitTransactionError(submit_tx_error.to_string(), submit_tx_error.data()) })?; Ok(vm_result) } @@ -134,7 +143,8 @@ fn get_pending_state( tx, operator_account, enforced_nonce, - base_system_contracts + base_system_contracts, + storage_read_cache ))] #[allow(clippy::too_many_arguments)] pub fn execute_tx_with_pending_state( @@ -148,6 +158,7 @@ pub fn execute_tx_with_pending_state( fair_l2_gas_price: u64, enforced_base_fee: Option, base_system_contracts: &BaseSystemContracts, + storage_read_cache: &mut HashMap, ) -> ( TransactionExecutionMetrics, Result, @@ -177,6 +188,8 @@ pub fn execute_tx_with_pending_state( fair_l2_gas_price, enforced_base_fee, base_system_contracts, + false, + storage_read_cache, ) } @@ -285,7 +298,8 @@ pub(crate) fn adjust_l1_gas_price_for_tx( tx, operator_account, block_timestamp_s, - base_system_contract + base_system_contract, + storage_read_cache ))] fn execute_tx_in_sandbox( connection: StorageProcessor<'_>, @@ -302,11 +316,12 @@ fn execute_tx_in_sandbox( fair_l2_gas_price: u64, enforced_base_fee: Option, base_system_contract: &BaseSystemContracts, + trace_call: bool, + storage_read_cache: &mut HashMap, ) -> ( TransactionExecutionMetrics, Result, ) { - let stage_started_at = Instant::now(); let span = span!(Level::DEBUG, "execute_in_sandbox").entered(); let total_factory_deps = tx @@ -329,17 +344,17 @@ fn execute_tx_in_sandbox( l1_gas_price, fair_l2_gas_price, enforced_base_fee, + storage_read_cache, |vm, tx| { push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); - let VmBlockResult { - full_result: result, - .. - } = vm.execute_till_block_end(job_type); + let result = if trace_call { + vm.execute_till_block_end_with_call_tracer(job_type) + } else { + vm.execute_till_block_end(job_type) + }; - metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "execution"); span.exit(); - - result + result.full_result }, ); @@ -369,7 +384,8 @@ fn apply_vm_in_sandbox( l1_gas_price: u64, fair_l2_gas_price: u64, enforced_base_fee: Option, - apply: impl FnOnce(&mut Box>, Transaction) -> T, + storage_read_cache: &mut HashMap, + apply: impl FnOnce(&mut Box>, Transaction) -> T, ) -> T { let stage_started_at = Instant::now(); let span = span!(Level::DEBUG, "initialization").entered(); @@ -407,7 +423,9 @@ fn apply_vm_in_sandbox( let db_storage_provider = DbStorageProvider::new(connection, state_block_number, false); - let mut storage_view = StorageView::new(db_storage_provider); + // Moving `storage_read_cache` to `storage_view`. It will be moved back once execution is finished and `storage_view` is not needed. + let mut storage_view = + StorageView::new_with_read_keys(db_storage_provider, std::mem::take(storage_read_cache)); let block_timestamp_ms = match block_id { api::BlockId::Number(api::BlockNumber::Pending) => millis_since_epoch(), @@ -440,7 +458,8 @@ fn apply_vm_in_sandbox( storage_view.set_value(&balance_key, u256_to_h256(current_balance + added_balance)); } - let mut oracle_tools = vm::OracleTools::new(&mut storage_view as &mut dyn Storage); + let mut oracle_tools = + vm::OracleTools::new(&mut storage_view as &mut dyn Storage, HistoryDisabled); let block_properties = BlockProperties { default_aa_code_hash: h256_to_u256(base_system_contracts.default_aa.hash), zkporter_is_available: ZKPORTER_IS_AVAILABLE, @@ -474,7 +493,16 @@ fn apply_vm_in_sandbox( metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "initialization"); span.exit(); + let tx_id = format!( + "{:?}-{} ", + tx.initiator_account(), + tx.nonce().unwrap_or(Nonce(0)) + ); + + let stage_started_at = Instant::now(); let result = apply(&mut vm, tx); + let vm_execution_took = stage_started_at.elapsed(); + metrics::histogram!("api.web3.sandbox", vm_execution_took, "stage" => "execution"); let oracles_sizes = record_vm_memory_metrics(vm); let storage_view_cache = storage_view.get_cache_size(); @@ -487,23 +515,62 @@ fn apply_vm_in_sandbox( (oracles_sizes + storage_view_cache) as f64 ); - metrics::histogram!("runtime_context.storage_interaction", storage_view.storage_invocations as f64, "interaction" => "set_value_storage_invocations"); - metrics::histogram!("runtime_context.storage_interaction", storage_view.new_storage_invocations as f64, "interaction" => "set_value_new_storage_invocations"); - metrics::histogram!("runtime_context.storage_interaction", storage_view.get_value_storage_invocations as f64, "interaction" => "set_value_get_value_storage_invocations"); - metrics::histogram!("runtime_context.storage_interaction", storage_view.set_value_storage_invocations as f64, "interaction" => "set_value_set_value_storage_invocations"); + let total_storage_invocations = + storage_view.get_value_storage_invocations + storage_view.set_value_storage_invocations; + let total_time_spent_in_storage = + storage_view.time_spent_on_get_value + storage_view.time_spent_on_set_value; + + metrics::histogram!("runtime_context.storage_interaction.amount", storage_view.storage_invocations_missed as f64, "interaction" => "missed"); + metrics::histogram!("runtime_context.storage_interaction.amount", storage_view.get_value_storage_invocations as f64, "interaction" => "get_value"); + metrics::histogram!("runtime_context.storage_interaction.amount", storage_view.set_value_storage_invocations as f64, "interaction" => "set_value"); + metrics::histogram!("runtime_context.storage_interaction.amount", (total_storage_invocations) as f64, "interaction" => "total"); + + metrics::histogram!("runtime_context.storage_interaction.duration", storage_view.time_spent_on_storage_missed, "interaction" => "missed"); + metrics::histogram!("runtime_context.storage_interaction.duration", storage_view.time_spent_on_get_value, "interaction" => "get_value"); + metrics::histogram!("runtime_context.storage_interaction.duration", storage_view.time_spent_on_set_value, "interaction" => "set_value"); + metrics::histogram!("runtime_context.storage_interaction.duration", total_time_spent_in_storage, "interaction" => "total"); + + if total_storage_invocations > 0 { + metrics::histogram!( + "runtime_context.storage_interaction.duration_per_unit", + total_time_spent_in_storage.div_f64(total_storage_invocations as f64), + "interaction" => "total" + ); + } + if storage_view.storage_invocations_missed > 0 { + metrics::histogram!( + "runtime_context.storage_interaction.duration_per_unit", + storage_view.time_spent_on_storage_missed.div_f64(storage_view.storage_invocations_missed as f64), + "interaction" => "missed" + ); + } + + metrics::histogram!( + "runtime_context.storage_interaction.ratio", + total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64(), + ); const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1000; - if storage_view.storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { + if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { vlog::info!( - "Tx resulted in {} storage_invocations, {} new_storage_invocations, {} get_value_storage_invocations, {} set_value_storage_invocations", - storage_view.storage_invocations, - storage_view.new_storage_invocations, + "Tx {} resulted in {} storage_invocations, {} new_storage_invocations, {} get_value_storage_invocations, {} set_value_storage_invocations, vm execution tool {:?}, storage interaction took {:?} (missed: {:?} get: {:?} set: {:?})", + tx_id, + total_storage_invocations, + storage_view.storage_invocations_missed, storage_view.get_value_storage_invocations, storage_view.set_value_storage_invocations, + vm_execution_took, + total_time_spent_in_storage, + storage_view.time_spent_on_storage_missed, + storage_view.time_spent_on_get_value, + storage_view.time_spent_on_set_value, ); } + // Move `read_storage_keys` from `storage_view` back to cache. + *storage_read_cache = storage_view.take_read_storage_keys(); + result } @@ -600,6 +667,7 @@ fn validate_tx_in_sandbox( l1_gas_price, fair_l2_gas_price, enforced_base_fee, + &mut Default::default(), |vm, tx| { let stage_started_at = Instant::now(); let span = span!(Level::DEBUG, "validation").entered(); @@ -657,14 +725,21 @@ fn collect_tx_execution_metrics( storage_logs: result.storage_log_queries.len(), total_log_queries: result.total_log_queries, cycles_used: result.cycles_used, + computational_gas_used: result.computational_gas_used, } } impl From for SandboxExecutionError { fn from(reason: TxRevertReason) -> Self { match reason { - TxRevertReason::EthCall(reason) => SandboxExecutionError::Revert(reason.to_string()), - TxRevertReason::TxReverted(reason) => SandboxExecutionError::Revert(reason.to_string()), + TxRevertReason::EthCall(reason) => SandboxExecutionError::Revert( + reason.to_user_friendly_string(), + reason.encoded_data(), + ), + TxRevertReason::TxReverted(reason) => SandboxExecutionError::Revert( + reason.to_user_friendly_string(), + reason.encoded_data(), + ), TxRevertReason::FailedToChargeFee(reason) => { SandboxExecutionError::FailedToChargeFee(reason.to_string()) } @@ -692,20 +767,21 @@ impl From for SandboxExecutionError { "The bootloader did not contain enough gas to execute the transaction".to_string(), ), revert_reason @ TxRevertReason::FailedToMarkFactoryDependencies(_) => { - SandboxExecutionError::Revert(revert_reason.to_string()) + SandboxExecutionError::Revert(revert_reason.to_string(), vec![]) } TxRevertReason::PayForTxFailed(reason) => { SandboxExecutionError::FailedToPayForTransaction(reason.to_string()) } TxRevertReason::TooBigGasLimit => { - SandboxExecutionError::Revert(TxRevertReason::TooBigGasLimit.to_string()) + SandboxExecutionError::Revert(TxRevertReason::TooBigGasLimit.to_string(), vec![]) } + TxRevertReason::MissingInvocationLimitReached => SandboxExecutionError::InnerTxError, } } } /// Returns the sum of all oracles' sizes. -fn record_vm_memory_metrics(vm: Box) -> usize { +fn record_vm_memory_metrics(vm: Box>) -> usize { let event_sink_inner = vm.state.event_sink.get_size(); let event_sink_history = vm.state.event_sink.get_history_size(); let memory_inner = vm.state.memory.get_size(); diff --git a/core/bin/zksync_core/src/api_server/explorer/api_decl.rs b/core/bin/zksync_core/src/api_server/explorer/api_decl.rs index 23195569f1b3..3df4ee9e6925 100644 --- a/core/bin/zksync_core/src/api_server/explorer/api_decl.rs +++ b/core/bin/zksync_core/src/api_server/explorer/api_decl.rs @@ -1,5 +1,6 @@ -use zksync_config::ZkSyncConfig; +use zksync_config::configs::api::Explorer as ExplorerApiConfig; use zksync_dal::connection::ConnectionPool; +use zksync_types::Address; use actix_web::web; use futures::channel::mpsc; @@ -12,20 +13,26 @@ pub struct RestApi { pub(super) master_connection_pool: ConnectionPool, pub(super) replica_connection_pool: ConnectionPool, pub(super) network_stats: SharedNetworkStats, - pub(super) config: ZkSyncConfig, + pub(super) api_config: ExplorerApiConfig, + pub(super) l2_erc20_bridge_addr: Address, + pub(super) fee_account_addr: Address, } impl RestApi { pub fn new( master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, - config: ZkSyncConfig, + api_config: ExplorerApiConfig, + l2_erc20_bridge_addr: Address, + fee_account_addr: Address, ) -> Self { Self { master_connection_pool, replica_connection_pool, network_stats: SharedNetworkStats::default(), - config, + api_config, + l2_erc20_bridge_addr, + fee_account_addr, } } @@ -75,7 +82,7 @@ impl RestApi { self.network_stats.clone().start_updater_detached( panic_notify, self.replica_connection_pool.clone(), - self.config.api.explorer.network_stats_interval(), + self.api_config.network_stats_interval(), stop_receiver, ); } diff --git a/core/bin/zksync_core/src/api_server/explorer/api_impl.rs b/core/bin/zksync_core/src/api_server/explorer/api_impl.rs index 55ca4cd09861..1f833f698225 100644 --- a/core/bin/zksync_core/src/api_server/explorer/api_impl.rs +++ b/core/bin/zksync_core/src/api_server/explorer/api_impl.rs @@ -11,7 +11,6 @@ use zksync_types::{ AccountDetails, AccountType, AddressDetails, BlocksQuery, ContractDetails, EventsQuery, L1BatchesQuery, PaginationQuery, TransactionsQuery, VerificationIncomingRequest, }, - storage::L2_ETH_TOKEN_ADDRESS, Address, L1BatchNumber, MiniblockNumber, H256, }; @@ -39,27 +38,21 @@ impl RestApi { ) -> ActixResult { let start = Instant::now(); - let account_type = if *address == Address::zero() { - AccountType::Contract - } else { - self_ - .replica_connection_pool - .access_storage() - .await - .explorer() - .accounts_dal() - .get_account_type(*address) - .unwrap() - }; + let account_type = self_ + .replica_connection_pool + .access_storage_blocking() + .explorer() + .accounts_dal() + .get_account_type(*address) + .unwrap(); let response = match account_type { AccountType::EOA => ok_json(AddressDetails::Account( - self_.account_details_inner(address).await, + self_.account_details_inner(address), )), AccountType::Contract => { // If account type is a contract, then `contract_details_inner` must return `Some`. let contract_details = self_ .contract_details_inner(address) - .await .expect("Failed to get contract info"); ok_json(AddressDetails::Contract(contract_details)) } @@ -69,8 +62,8 @@ impl RestApi { response } - async fn account_details_inner(&self, address: web::Path
) -> AccountDetails { - let mut storage = self.replica_connection_pool.access_storage().await; + fn account_details_inner(&self, address: web::Path
) -> AccountDetails { + let mut storage = self.replica_connection_pool.access_storage_blocking(); let balances = storage .explorer() @@ -83,16 +76,11 @@ impl RestApi { .get_account_nonces(*address) .unwrap(); - // Dirty fix for zero address. - let account_type = if *address == Address::zero() { - AccountType::Contract - } else { - storage - .explorer() - .accounts_dal() - .get_account_type(*address) - .unwrap() - }; + let account_type = storage + .explorer() + .accounts_dal() + .get_account_type(*address) + .unwrap(); AccountDetails { address: *address, @@ -109,26 +97,19 @@ impl RestApi { address: web::Path
, ) -> ActixResult { let start = Instant::now(); - let account_details = self_.account_details_inner(address).await; + let account_details = self_.account_details_inner(address); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "account_details"); ok_json(account_details) } - async fn contract_details_inner(&self, address: web::Path
) -> Option { - // Dirty fix for zero address. - let contract_address = if *address == Address::zero() { - L2_ETH_TOKEN_ADDRESS - } else { - *address - }; - let mut storage = self.replica_connection_pool.access_storage().await; + fn contract_details_inner(&self, address: web::Path
) -> Option { + let mut storage = self.replica_connection_pool.access_storage_blocking(); let contract_info = storage .explorer() .misc_dal() - .get_contract_info(contract_address) + .get_contract_info(*address) .unwrap(); - if let Some(mut contract_info) = contract_info { - contract_info.address = *address; + if let Some(contract_info) = contract_info { let contract_stats = storage .explorer() .misc_dal() @@ -156,7 +137,7 @@ impl RestApi { ) -> ActixResult { let start = Instant::now(); - let response = match self_.contract_details_inner(address).await { + let response = match self_.contract_details_inner(address) { Some(contract_details) => ok_json(contract_details), None => Ok(HttpResponse::NotFound().finish()), }; @@ -184,16 +165,16 @@ impl RestApi { #[tracing::instrument(skip(self))] fn validate_pagination_query(&self, pagination: PaginationQuery) -> Result<(), HttpResponse> { - if pagination.limit > self.config.api.explorer.req_entities_limit() { + if pagination.limit > self.api_config.req_entities_limit() { return Err(HttpResponse::BadRequest().body(format!( "Limit should not exceed {}", - self.config.api.explorer.req_entities_limit() + self.api_config.req_entities_limit() ))); } - if pagination.offset + pagination.limit > self.config.api.explorer.offset_limit() { + if pagination.offset + pagination.limit > self.api_config.offset_limit() { return Err(HttpResponse::BadRequest().body(format!( "(offset + limit) should not exceed {}", - self.config.api.explorer.offset_limit() + self.api_config.offset_limit() ))); } @@ -213,7 +194,7 @@ impl RestApi { return Ok(res); } - let mut storage = self_.replica_connection_pool.access_storage().await; + let mut storage = self_.replica_connection_pool.access_storage_blocking(); if let Some(address) = query.address { match storage .explorer() @@ -237,8 +218,8 @@ impl RestApi { query.tx_position(), query.block_number, query.pagination, - self_.config.api.explorer.offset_limit(), - self_.config.contracts.l2_erc20_bridge_addr, + self_.api_config.offset_limit(), + self_.l2_erc20_bridge_addr, ) .unwrap() } else { @@ -253,8 +234,8 @@ impl RestApi { query.l1_batch_number, query.contract_address, query.pagination, - self_.config.api.explorer.offset_limit(), - self_.config.contracts.l2_erc20_bridge_addr, + self_.api_config.offset_limit(), + self_.l2_erc20_bridge_addr, ) .unwrap() }; @@ -285,11 +266,10 @@ impl RestApi { let tx_details = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .transactions_dal() - .get_transaction_details(*hash, self_.config.contracts.l2_erc20_bridge_addr) + .get_transaction_details(*hash, self_.l2_erc20_bridge_addr) .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "transaction_details"); @@ -311,8 +291,7 @@ impl RestApi { let blocks = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .blocks_dal() .get_blocks_page(query, self_.network_stats.read().await.last_verified) @@ -331,11 +310,10 @@ impl RestApi { let block_details = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .blocks_dal() - .get_block_details(MiniblockNumber(*number)) + .get_block_details(MiniblockNumber(*number), self_.fee_account_addr) .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "block_details"); @@ -355,7 +333,7 @@ impl RestApi { return Ok(res); } let last_verified_miniblock = self_.network_stats.read().await.last_verified; - let mut storage = self_.replica_connection_pool.access_storage().await; + let mut storage = self_.replica_connection_pool.access_storage_blocking(); let last_verified_l1_batch = storage .blocks_web3_dal() @@ -382,8 +360,7 @@ impl RestApi { let l1_batch_details = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .blocks_dal() .get_l1_batch_details(L1BatchNumber(*number)) @@ -405,8 +382,7 @@ impl RestApi { let token_details = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .misc_dal() .get_token_details(*address) @@ -427,7 +403,7 @@ impl RestApi { ) -> ActixResult { let start = Instant::now(); - let mut storage = self_.master_connection_pool.access_storage().await; + let mut storage = self_.master_connection_pool.access_storage_blocking(); if !storage .storage_logs_dal() @@ -467,11 +443,10 @@ impl RestApi { let events = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .events_dal() - .get_events_page(query, self_.config.api.explorer.offset_limit()) + .get_events_page(query, self_.api_config.offset_limit()) .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "events_pagination"); @@ -488,8 +463,7 @@ impl RestApi { let status = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .contract_verification_dal() .get_verification_request_status(*id) @@ -510,8 +484,7 @@ impl RestApi { let versions = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .contract_verification_dal() .get_zksolc_versions() @@ -529,8 +502,7 @@ impl RestApi { let versions = self_ .replica_connection_pool - .access_storage() - .await + .access_storage_blocking() .explorer() .contract_verification_dal() .get_solc_versions() diff --git a/core/bin/zksync_core/src/api_server/explorer/mod.rs b/core/bin/zksync_core/src/api_server/explorer/mod.rs index 27c96d7691e1..95ef3e58290d 100644 --- a/core/bin/zksync_core/src/api_server/explorer/mod.rs +++ b/core/bin/zksync_core/src/api_server/explorer/mod.rs @@ -1,8 +1,9 @@ use std::net::SocketAddr; use std::time::Duration; -use zksync_config::ZkSyncConfig; +use zksync_config::configs::api::Explorer as ExplorerApiConfig; use zksync_dal::connection::ConnectionPool; +use zksync_types::Address; use zksync_utils::panic_notify::{spawn_panic_handler, ThreadPanicNotify}; use actix_cors::Cors; @@ -47,13 +48,14 @@ fn start_server(api: RestApi, bind_to: SocketAddr, threads: usize) -> Server { /// Start HTTP REST API pub fn start_server_thread_detached( - config: &ZkSyncConfig, + api_config: ExplorerApiConfig, + l2_erc20_bridge_addr: Address, + fee_account_addr: Address, master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, mut stop_receiver: watch::Receiver, ) -> JoinHandle<()> { let (handler, panic_sender) = spawn_panic_handler(); - let config = config.clone(); std::thread::Builder::new() .name("explorer-api".to_string()) @@ -61,9 +63,15 @@ pub fn start_server_thread_detached( let _panic_sentinel = ThreadPanicNotify(panic_sender.clone()); actix_rt::System::new().block_on(async move { - let bind_address = config.api.explorer.bind_addr(); - let threads = config.api.explorer.threads_per_server as usize; - let api = RestApi::new(master_connection_pool, replica_connection_pool, config); + let bind_address = api_config.bind_addr(); + let threads = api_config.threads_per_server as usize; + let api = RestApi::new( + master_connection_pool, + replica_connection_pool, + api_config, + l2_erc20_bridge_addr, + fee_account_addr, + ); api.spawn_network_stats_updater(panic_sender, stop_receiver.clone()); let server = start_server(api, bind_address, threads); diff --git a/core/bin/zksync_core/src/api_server/explorer/network_stats.rs b/core/bin/zksync_core/src/api_server/explorer/network_stats.rs index bd546b85d32b..3bad1f26a60f 100644 --- a/core/bin/zksync_core/src/api_server/explorer/network_stats.rs +++ b/core/bin/zksync_core/src/api_server/explorer/network_stats.rs @@ -49,7 +49,7 @@ impl SharedNetworkStats { timer.tick().await; - let mut storage = connection_pool.access_storage().await; + let mut storage = connection_pool.access_storage_blocking(); let last_sealed = storage .blocks_web3_dal() @@ -64,7 +64,7 @@ impl SharedNetworkStats { let new_transactions = storage .explorer() .transactions_dal() - .get_transactions_count_after(prev_stats.last_sealed) + .get_transactions_count_between(prev_stats.last_sealed + 1, last_sealed) .unwrap(); let stats = NetworkStats { diff --git a/core/bin/zksync_core/src/api_server/healthcheck.rs b/core/bin/zksync_core/src/api_server/healthcheck.rs new file mode 100644 index 000000000000..370bfa1efbee --- /dev/null +++ b/core/bin/zksync_core/src/api_server/healthcheck.rs @@ -0,0 +1,68 @@ +use actix_web::dev::Server; +use actix_web::{get, web, App, HttpResponse, HttpServer, Responder}; +use serde::Serialize; +use std::{net::SocketAddr, sync::Arc}; +use tokio::sync::watch; +use zksync_health_check::{CheckHealth, CheckHealthStatus}; +use zksync_utils::panic_notify::{spawn_panic_handler, ThreadPanicNotify}; + +#[derive(Serialize)] +pub struct Response { + pub message: String, +} + +#[get("/health")] +async fn healthcheck(healthchecks: web::Data<[Box]>) -> impl Responder { + for healthcheck in healthchecks.iter() { + match healthcheck.check_health() { + CheckHealthStatus::NotReady(message) => { + let response = Response { message }; + return HttpResponse::ServiceUnavailable().json(response); + } + CheckHealthStatus::Ready => (), + } + } + let response = Response { + message: "Everything is working fine".to_string(), + }; + HttpResponse::Ok().json(response) +} + +fn run_server(bind_address: SocketAddr, healthchecks: Vec>) -> Server { + let healthchecks: Arc<[Box]> = healthchecks.into(); + let data = web::Data::from(healthchecks); + HttpServer::new(move || App::new().service(healthcheck).app_data(data.clone())) + .workers(1) + .bind(bind_address) + .unwrap() + .run() +} + +/// Start HTTP healthcheck API +pub fn start_server_thread_detached( + addr: SocketAddr, + healthchecks: Vec>, + mut stop_receiver: watch::Receiver, +) -> tokio::task::JoinHandle<()> { + let (handler, panic_sender) = spawn_panic_handler(); + std::thread::Builder::new() + .name("healthcheck".to_string()) + .spawn(move || { + let _panic_sentinel = ThreadPanicNotify(panic_sender.clone()); + + actix_rt::System::new().block_on(async move { + let server = run_server(addr, healthchecks); + let close_handle = server.handle(); + actix_rt::spawn(async move { + if stop_receiver.changed().await.is_ok() { + close_handle.stop(true).await; + vlog::info!("Stop signal received, Health api is shutting down"); + } + }); + server.await.expect("Health api crashed"); + }); + }) + .expect("Failed to spawn thread for REST API"); + + handler +} diff --git a/core/bin/zksync_core/src/api_server/mod.rs b/core/bin/zksync_core/src/api_server/mod.rs index dd50dab9f0ff..5476e2775325 100644 --- a/core/bin/zksync_core/src/api_server/mod.rs +++ b/core/bin/zksync_core/src/api_server/mod.rs @@ -1,5 +1,6 @@ // Everywhere in this module the word "block" actually means "miniblock". pub mod execution_sandbox; pub mod explorer; +pub mod healthcheck; pub mod tx_sender; pub mod web3; diff --git a/core/bin/zksync_core/src/api_server/tx_sender/error.rs b/core/bin/zksync_core/src/api_server/tx_sender/error.rs index fbfa3e26c416..e5045b1ee689 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/error.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/error.rs @@ -14,8 +14,8 @@ pub enum SubmitTxError { IncorrectTx(#[from] TxCheckError), #[error("insufficient funds for gas + value. balance: {0}, fee: {1}, value: {2}")] NotEnoughBalanceForFeeValue(U256, U256, U256), - #[error("cannot estimate transaction: {0}.")] - CannotEstimateTransaction(String), + #[error("execution reverted{}{}" , if .0.is_empty() { "" } else { ": " }, .0)] + ExecutionReverted(String, Vec), #[error("exceeds block gas limit")] GasLimitIsTooBig, #[error("{0}")] @@ -65,6 +65,7 @@ pub enum SubmitTxError { #[error("{0}")] ProxyError(#[from] zksync_web3_decl::jsonrpsee::core::Error), } + impl SubmitTxError { pub fn grafana_error_code(&self) -> &'static str { match self { @@ -72,7 +73,7 @@ impl SubmitTxError { SubmitTxError::NonceIsTooLow(_, _, _) => "nonce-is-too-low", SubmitTxError::IncorrectTx(_) => "incorrect-tx", SubmitTxError::NotEnoughBalanceForFeeValue(_, _, _) => "not-enough-balance-for-fee", - SubmitTxError::CannotEstimateTransaction(_) => "cannot-estimate-transaction", + SubmitTxError::ExecutionReverted(_, _) => "execution-reverted", SubmitTxError::GasLimitIsTooBig => "gas-limit-is-too-big", SubmitTxError::Unexecutable(_) => "unexecutable", SubmitTxError::RateLimitExceeded => "rate-limit-exceeded", @@ -96,13 +97,21 @@ impl SubmitTxError { SubmitTxError::ProxyError(_) => "proxy-error", } } + + pub fn data(&self) -> Vec { + if let SubmitTxError::ExecutionReverted(_, data) = self { + data.clone() + } else { + Vec::new() + } + } } impl From for SubmitTxError { fn from(err: SandboxExecutionError) -> SubmitTxError { match err { - SandboxExecutionError::Revert(reason) => { - SubmitTxError::CannotEstimateTransaction(reason) + SandboxExecutionError::Revert(reason, data) => { + SubmitTxError::ExecutionReverted(reason, data) } SandboxExecutionError::BootloaderFailure(reason) => { SubmitTxError::BootloaderFailure(reason) @@ -121,7 +130,7 @@ impl From for SubmitTxError { } SandboxExecutionError::FromIsNotAnAccount => SubmitTxError::FromIsNotAnAccount, SandboxExecutionError::InnerTxError => { - SubmitTxError::CannotEstimateTransaction("Bootloader-based tx failed".to_owned()) + SubmitTxError::ExecutionReverted("Bootloader-based tx failed".to_owned(), vec![]) } SandboxExecutionError::UnexpectedVMBehavior(reason) => { SubmitTxError::UnexpectedVMBehavior(reason) diff --git a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs index 20a078b559d4..5058d93f0d8b 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs @@ -3,7 +3,6 @@ use std::{cmp::min, num::NonZeroU32, sync::Arc, time::Instant}; // External uses -use bigdecimal::BigDecimal; use governor::clock::MonotonicClock; use governor::middleware::NoOpMiddleware; use governor::state::{InMemoryState, NotKeyed}; @@ -12,10 +11,14 @@ use governor::{Quota, RateLimiter}; use vm::vm_with_bootloader::{derive_base_fee_and_gas_per_pubdata, TxExecutionMode}; use vm::zk_evm::zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK; use zksync_config::configs::chain::StateKeeperConfig; +use zksync_contracts::{ + BaseSystemContracts, SystemContractCode, ESTIMATE_FEE_BLOCK_CODE, + PLAYGROUND_BLOCK_BOOTLOADER_CODE, +}; use zksync_dal::transactions_dal::L2TxSubmissionResult; -use zksync_eth_client::clients::http_client::EthereumClient; use vm::transaction_data::TransactionData; +use zksync_config::ZkSyncConfig; use zksync_types::fee::TransactionExecutionMetrics; use zksync_types::{ @@ -23,7 +26,6 @@ use zksync_types::{ MAX_NEW_FACTORY_DEPS, }; -use zksync_config::ZkSyncConfig; use zksync_dal::ConnectionPool; use zksync_types::{ @@ -34,11 +36,10 @@ use zksync_types::{ l2::L2Tx, tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, utils::storage_key_for_eth_balance, - AccountTreeId, Address, L2ChainId, Nonce, H160, H256, U256, + AccountTreeId, Address, Nonce, H160, H256, U256, }; -use zksync_contracts::BaseSystemContracts; -use zksync_utils::h256_to_u256; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; // Local uses use crate::api_server::execution_sandbox::{ @@ -46,10 +47,10 @@ use crate::api_server::execution_sandbox::{ validate_tx_with_pending_state, SandboxExecutionError, }; -use crate::fee_ticker::{error::TickerError, FeeTicker, TokenPriceRequestType}; -use crate::gas_adjuster::GasAdjuster; use crate::gas_tracker::{gas_count_from_tx_and_metrics, gas_count_from_writes}; -use crate::state_keeper::seal_criteria::{SealManager, SealResolution}; +use crate::l1_gas_price::L1GasPriceProvider; +use crate::state_keeper::seal_criteria::conditional_sealer::ConditionalSealer; +use crate::state_keeper::seal_criteria::SealResolution; pub mod error; pub use error::SubmitTxError; @@ -58,77 +59,183 @@ use vm::transaction_data::{derive_overhead, OverheadCoeficients}; pub mod proxy; pub use proxy::TxProxy; -pub struct TxSenderInner { - pub master_connection_pool: ConnectionPool, - pub replica_connection_pool: ConnectionPool, +/// Type alias for the rate limiter implementation. +type TxSenderRateLimiter = + RateLimiter>; + +/// Builder for the `TxSender`. +#[derive(Debug)] +pub struct TxSenderBuilder { + /// Shared TxSender configuration. + config: TxSenderConfig, + /// Connection pool for read requests. + replica_connection_pool: ConnectionPool, + /// Connection pool for write requests. If not set, `proxy` must be set. + master_connection_pool: Option, + /// Rate limiter for tx submissions. + rate_limiter: Option, + /// Proxy to submit transactions to the network. If not set, `master_connection_pool` must be set. + proxy: Option, + /// Actual state keeper configuration, required for tx verification. + /// If not set, transactions would not be checked against seal criteria. + state_keeper_config: Option, +} + +impl TxSenderBuilder { + pub fn new(config: TxSenderConfig, replica_connection_pool: ConnectionPool) -> Self { + Self { + config, + replica_connection_pool, + master_connection_pool: None, + rate_limiter: None, + proxy: None, + state_keeper_config: None, + } + } + + pub fn with_rate_limiter(self, transactions_per_sec: u32) -> Self { + let rate_limiter = RateLimiter::direct_with_clock( + Quota::per_second(NonZeroU32::new(transactions_per_sec).unwrap()), + &MonotonicClock::default(), + ); + Self { + rate_limiter: Some(rate_limiter), + ..self + } + } + + pub fn with_tx_proxy(mut self, main_node_url: String) -> Self { + self.proxy = Some(TxProxy::new(main_node_url)); + self + } + + pub fn with_main_connection_pool(mut self, master_connection_pool: ConnectionPool) -> Self { + self.master_connection_pool = Some(master_connection_pool); + self + } + + pub fn with_state_keeper_config(mut self, state_keeper_config: StateKeeperConfig) -> Self { + self.state_keeper_config = Some(state_keeper_config); + self + } + + pub fn build( + self, + l1_gas_price_source: Arc, + default_aa_hash: H256, + ) -> TxSender { + assert!( + self.master_connection_pool.is_some() || self.proxy.is_some(), + "Either master connection pool or proxy must be set" + ); + + let mut storage = self.replica_connection_pool.access_storage_blocking(); + let default_aa_bytecode = storage + .storage_dal() + .get_factory_dep(default_aa_hash) + .expect("Default AA hash must be present in the database"); + drop(storage); + + let default_aa_contract = SystemContractCode { + code: bytes_to_be_words(default_aa_bytecode), + hash: default_aa_hash, + }; + + let playground_base_system_contracts = BaseSystemContracts { + default_aa: default_aa_contract.clone(), + bootloader: PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(), + }; + + let estimate_fee_base_system_contracts = BaseSystemContracts { + default_aa: default_aa_contract, + bootloader: ESTIMATE_FEE_BLOCK_CODE.clone(), + }; + + TxSender(Arc::new(TxSenderInner { + sender_config: self.config, + master_connection_pool: self.master_connection_pool, + replica_connection_pool: self.replica_connection_pool, + l1_gas_price_source, + playground_base_system_contracts, + estimate_fee_base_system_contracts, + rate_limiter: self.rate_limiter, + proxy: self.proxy, + state_keeper_config: self.state_keeper_config, + })) + } +} + +/// Internal static `TxSender` configuration. +/// This structure is detached from `ZkSyncConfig`, since different node types (main, external, etc) +/// may require different configuration layouts. +/// The intention is to only keep the actually used information here. +#[derive(Debug)] +pub struct TxSenderConfig { pub fee_account_addr: Address, - pub chain_id: L2ChainId, pub gas_price_scale_factor: f64, pub max_nonce_ahead: u32, - pub max_single_tx_gas: u32, - pub rate_limiter: - Option>>, + pub max_allowed_l2_tx_gas_limit: u32, + pub fair_l2_gas_price: u64, + pub vm_execution_cache_misses_limit: Option, + pub validation_computational_gas_limit: u32, +} + +impl From for TxSenderConfig { + fn from(config: ZkSyncConfig) -> Self { + Self { + fee_account_addr: config.chain.state_keeper.fee_account_addr, + gas_price_scale_factor: config.api.web3_json_rpc.gas_price_scale_factor, + max_nonce_ahead: config.api.web3_json_rpc.max_nonce_ahead, + max_allowed_l2_tx_gas_limit: config.chain.state_keeper.max_allowed_l2_tx_gas_limit, + fair_l2_gas_price: config.chain.state_keeper.fair_l2_gas_price, + vm_execution_cache_misses_limit: config + .api + .web3_json_rpc + .vm_execution_cache_misses_limit, + validation_computational_gas_limit: config + .chain + .state_keeper + .validation_computational_gas_limit, + } + } +} + +pub struct TxSenderInner { + pub(super) sender_config: TxSenderConfig, + pub master_connection_pool: Option, + pub replica_connection_pool: ConnectionPool, // Used to keep track of gas prices for the fee ticker. - pub gas_adjuster: Arc>, - pub state_keeper_config: StateKeeperConfig, - pub playground_base_system_contracts: BaseSystemContracts, - pub estimate_fee_base_system_contracts: BaseSystemContracts, - pub proxy: Option, + pub l1_gas_price_source: Arc, + pub(super) playground_base_system_contracts: BaseSystemContracts, + estimate_fee_base_system_contracts: BaseSystemContracts, + /// Optional rate limiter that will limit the amount of transactions per second sent from a single entity. + rate_limiter: Option, + /// Optional transaction proxy to be used for transaction submission. + pub(super) proxy: Option, + /// An up-to-date version of the state keeper config. + /// This field may be omitted on the external node, since the configuration may change unexpectedly. + /// If this field is set to `None`, `TxSender` will assume that any transaction is executable. + state_keeper_config: Option, } -#[derive(Clone)] -pub struct TxSender(pub Arc); +pub struct TxSender(pub Arc>); -impl std::fmt::Debug for TxSender { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TxSender").finish() +// Custom implementation is required due to generic param: +// Even though it's under `Arc`, compiler doesn't generate the `Clone` implementation unless +// an unnecessary bound is added. +impl Clone for TxSender { + fn clone(&self) -> Self { + Self(self.0.clone()) } } -impl TxSender { - pub fn new( - config: &ZkSyncConfig, - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - gas_adjuster: Arc>, - playground_base_system_contracts: BaseSystemContracts, - estimate_fee_base_system_contracts: BaseSystemContracts, - ) -> Self { - let rate_limiter = config - .api - .web3_json_rpc - .transactions_per_sec_limit - .map(|value| { - RateLimiter::direct_with_clock( - Quota::per_second(NonZeroU32::new(value).unwrap()), - &MonotonicClock::default(), - ) - }); - - let proxy = config - .api - .web3_json_rpc - .main_node_url - .as_ref() - .map(|url| TxProxy::new(url)); - - Self(Arc::new(TxSenderInner { - chain_id: L2ChainId(config.chain.eth.zksync_network_id), - master_connection_pool, - replica_connection_pool, - fee_account_addr: config.chain.state_keeper.fee_account_addr, - max_nonce_ahead: config.api.web3_json_rpc.max_nonce_ahead, - gas_price_scale_factor: config.api.web3_json_rpc.gas_price_scale_factor, - max_single_tx_gas: config.chain.state_keeper.max_single_tx_gas, - rate_limiter, - gas_adjuster, - state_keeper_config: config.chain.state_keeper.clone(), - playground_base_system_contracts, - estimate_fee_base_system_contracts, - proxy, - })) +impl std::fmt::Debug for TxSender { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TxSender").finish() } +} +impl TxSender { #[tracing::instrument(skip(self, tx))] pub fn submit_tx(&self, tx: L2Tx) -> Result { if let Some(rate_limiter) = &self.0.rate_limiter { @@ -147,7 +254,7 @@ impl TxSender { let _maximal_allowed_overhead = 0; if tx.common_data.fee.gas_limit - > U256::from(self.0.state_keeper_config.max_allowed_l2_tx_gas_limit) + > U256::from(self.0.sender_config.max_allowed_l2_tx_gas_limit) { vlog::info!( "Submitted Tx is Unexecutable {:?} because of GasLimitIsTooBig {}", @@ -156,8 +263,7 @@ impl TxSender { ); return Err(SubmitTxError::GasLimitIsTooBig); } - if tx.common_data.fee.max_fee_per_gas < self.0.state_keeper_config.fair_l2_gas_price.into() - { + if tx.common_data.fee.max_fee_per_gas < self.0.sender_config.fair_l2_gas_price.into() { vlog::info!( "Submitted Tx is Unexecutable {:?} because of MaxFeePerGasTooLow {}", tx.hash(), @@ -180,11 +286,11 @@ impl TxSender { )); } - let l1_gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); + let l1_gas_price = self.0.l1_gas_price_source.estimate_effective_gas_price(); let (_, gas_per_pubdata_byte) = derive_base_fee_and_gas_per_pubdata( l1_gas_price, - self.0.state_keeper_config.fair_l2_gas_price, + self.0.sender_config.fair_l2_gas_price, ); let intrinsic_constants = get_intrinsic_constants(); @@ -210,20 +316,21 @@ impl TxSender { metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "1_validate"); stage_started_at = Instant::now(); - let l1_gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); - let fair_l2_gas_price = self.0.state_keeper_config.fair_l2_gas_price; + let l1_gas_price = self.0.l1_gas_price_source.estimate_effective_gas_price(); + let fair_l2_gas_price = self.0.sender_config.fair_l2_gas_price; let (tx_metrics, _) = execute_tx_with_pending_state( &self.0.replica_connection_pool, tx.clone().into(), - AccountTreeId::new(self.0.fee_account_addr), - TxExecutionMode::EthCall, + AccountTreeId::new(self.0.sender_config.fee_account_addr), + TxExecutionMode::VerifyExecute, Some(tx.nonce()), U256::zero(), l1_gas_price, fair_l2_gas_price, Some(tx.common_data.fee.max_fee_per_gas.as_u64()), &self.0.playground_base_system_contracts, + &mut Default::default(), ); vlog::info!( @@ -237,7 +344,7 @@ impl TxSender { let validation_result = validate_tx_with_pending_state( &self.0.replica_connection_pool, tx.clone(), - AccountTreeId::new(self.0.fee_account_addr), + AccountTreeId::new(self.0.sender_config.fee_account_addr), TxExecutionMode::VerifyExecute, Some(tx.nonce()), U256::zero(), @@ -245,9 +352,7 @@ impl TxSender { fair_l2_gas_price, Some(tx.common_data.fee.max_fee_per_gas.as_u64()), &self.0.playground_base_system_contracts, - self.0 - .state_keeper_config - .validation_computational_gas_limit, + self.0.sender_config.validation_computational_gas_limit, ); metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "3_verify_execute"); @@ -261,11 +366,22 @@ impl TxSender { if let Some(proxy) = &self.0.proxy { // We're running an external node: we have to proxy the transaction to the main node. + // But before we do that, save the tx to cache in case someone will request it + // Before it reaches the main node. + proxy.save_tx(tx.hash(), tx.clone()); proxy.submit_tx(&tx)?; - proxy.save_tx(tx.hash(), tx); + // Now, after we are sure that the tx is on the main node, remove it from cache + // since we don't want to store txs that might have been replaced or otherwise removed + // from the mempool. + proxy.forget_tx(tx.hash()); metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "4_tx_proxy"); metrics::counter!("server.processed_txs", 1, "stage" => "proxied"); return Ok(L2TxSubmissionResult::Proxied); + } else { + assert!( + self.0.master_connection_pool.is_some(), + "TxSender is instantiated without both master connection pool and tx proxy" + ); } let nonce = tx.common_data.nonce.0; @@ -274,6 +390,8 @@ impl TxSender { let submission_res_handle = self .0 .master_connection_pool + .as_ref() + .unwrap() // Checked above .access_storage_blocking() .transactions_dal() .insert_transaction_l2(tx, tx_metrics); @@ -284,7 +402,7 @@ impl TxSender { status = "already_executed".to_string(); Err(SubmitTxError::NonceIsTooLow( expected_nonce.0, - expected_nonce.0 + self.0.max_nonce_ahead, + expected_nonce.0 + self.0.sender_config.max_nonce_ahead, nonce, )) } @@ -317,15 +435,15 @@ impl TxSender { if tx.common_data.nonce.0 < expected_nonce.0 { Err(SubmitTxError::NonceIsTooLow( expected_nonce.0, - expected_nonce.0 + self.0.max_nonce_ahead, + expected_nonce.0 + self.0.sender_config.max_nonce_ahead, tx.nonce().0, )) - } else if !(expected_nonce.0..=(expected_nonce.0 + self.0.max_nonce_ahead)) + } else if !(expected_nonce.0..=(expected_nonce.0 + self.0.sender_config.max_nonce_ahead)) .contains(&tx.common_data.nonce.0) { Err(SubmitTxError::NonceIsTooHigh( expected_nonce.0, - expected_nonce.0 + self.0.max_nonce_ahead, + expected_nonce.0 + self.0.sender_config.max_nonce_ahead, tx.nonce().0, )) } else { @@ -361,7 +479,7 @@ impl TxSender { // Estimate the minimum fee price user will agree to. let gas_price = std::cmp::min( tx.common_data.fee.max_fee_per_gas, - U256::from(self.0.state_keeper_config.fair_l2_gas_price) + U256::from(self.0.sender_config.fair_l2_gas_price) + tx.common_data.fee.max_priority_fee_per_gas, ); let max_fee = tx.common_data.fee.gas_limit * gas_price; @@ -378,45 +496,6 @@ impl TxSender { } } - /// Given the gas per pubdata limit signed by the user, returns - /// the gas per pubdata byte that should be used in the block for simulation - pub fn validate_gas_per_pubdata_byte( - &self, - agreed_by_user: U256, - ) -> Result { - // The user has agreed an a higher gas price than it is even possible to have in block. - // While we could just let it go, it is better to ensure that users know what they are doing. - if agreed_by_user > U256::from(u32::MAX) { - return Err(SubmitTxError::FeePerPubdataByteTooHigh); - } - - // It is now safe to convert here - let agreed_by_user = agreed_by_user.as_u32(); - - // This check is needed to filter out unrealistic transactions that will reside in mempool forever. - // If transaction has such limit set, most likely it was done manually or there is some mistake - // in user's code. This check is only needed for better UX. - const MIN_GAS_PER_PUBDATA_LIMIT: u32 = 10; // At 0.1 gwei per l2 gas it gives us max 1 gwei of l1 gas price. - if agreed_by_user < MIN_GAS_PER_PUBDATA_LIMIT { - return Err(SubmitTxError::UnrealisticPubdataPriceLimit); - } - - let l1_gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); - let suggested_gas_price_per_pubdata = derive_base_fee_and_gas_per_pubdata( - l1_gas_price, - self.0.state_keeper_config.fair_l2_gas_price, - ) - .1 as u32; - - // If user provided gas per pubdata limit lower than currently suggested - // by the server, the users' transaction will not be included in the blocks right away - // but it will stay in mempool. We still have to simulate it somehow, so we'll use the user's - // provided pubdata price - let result = agreed_by_user.min(suggested_gas_price_per_pubdata); - - Ok(result) - } - fn get_balance(&self, initiator_address: &H160) -> U256 { let eth_balance_key = storage_key_for_eth_balance(initiator_address); @@ -437,29 +516,34 @@ impl TxSender { estimated_fee_scale_factor: f64, acceptable_overestimation: u32, ) -> Result { + let estimation_started_at = Instant::now(); let l1_gas_price = { - let effective_gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); + let effective_gas_price = self.0.l1_gas_price_source.estimate_effective_gas_price(); let current_l1_gas_price = - ((effective_gas_price as f64) * self.0.gas_price_scale_factor) as u64; + ((effective_gas_price as f64) * self.0.sender_config.gas_price_scale_factor) as u64; // In order for execution to pass smoothly, we need to ensure that block's required gasPerPubdata will be // <= to the one in the transaction itself. adjust_l1_gas_price_for_tx( current_l1_gas_price, - self.0.state_keeper_config.fair_l2_gas_price, + self.0.sender_config.fair_l2_gas_price, tx.gas_per_pubdata_byte_limit(), ) }; - let (base_fee, gas_per_pubdata_byte) = { - let (current_base_fee, gas_per_pubdata_byte) = derive_base_fee_and_gas_per_pubdata( - l1_gas_price, - self.0.state_keeper_config.fair_l2_gas_price, - ); - let enforced_base_fee = std::cmp::min(tx.max_fee_per_gas().as_u64(), current_base_fee); - - (enforced_base_fee, gas_per_pubdata_byte) - }; + let (base_fee, gas_per_pubdata_byte) = derive_base_fee_and_gas_per_pubdata( + l1_gas_price, + self.0.sender_config.fair_l2_gas_price, + ); + match &mut tx.common_data { + ExecuteTransactionCommon::L2(common_data) => { + common_data.fee.max_fee_per_gas = base_fee.into(); + common_data.fee.max_priority_fee_per_gas = base_fee.into(); + } + ExecuteTransactionCommon::L1(common_data) => { + common_data.max_fee_per_gas = base_fee.into(); + } + } let hashed_key = get_code_key(&tx.initiator_account()); // if the default account does not have enough funds @@ -514,11 +598,23 @@ impl TxSender { pubdata_for_factory_deps * (gas_per_pubdata_byte as u32) }; + // Rolling cache with storage values that were read from the DB. + let mut storage_read_cache = Default::default(); + // We are using binary search to find the minimal values of gas_limit under which // the transaction succeedes let mut lower_bound = 0; let mut upper_bound = MAX_L2_TX_GAS_LIMIT as u32; - + let tx_id = format!( + "{:?}-{}", + tx.initiator_account(), + tx.nonce().unwrap_or(Nonce(0)) + ); + vlog::trace!( + "fee estimation tx {:?}: preparation took {:?}, starting binary search", + tx_id, + estimation_started_at.elapsed(), + ); // Given the gas_limit to be used for the body of the transaction, // returns the result for executing the transaction with such gas_limit let mut execute = |tx_gas_limit: u32| { @@ -559,14 +655,21 @@ impl TxSender { let (tx_metrics, exec_result) = execute_tx_with_pending_state( &self.0.replica_connection_pool, tx.clone(), - AccountTreeId::new(self.0.fee_account_addr), - TxExecutionMode::EstimateFee, + AccountTreeId::new(self.0.sender_config.fee_account_addr), + TxExecutionMode::EstimateFee { + missed_storage_invocation_limit: self + .0 + .sender_config + .vm_execution_cache_misses_limit + .unwrap_or(usize::MAX), + }, enforced_nonce, added_balance, l1_gas_price, - self.0.state_keeper_config.fair_l2_gas_price, + self.0.sender_config.fair_l2_gas_price, Some(base_fee), &self.0.estimate_fee_base_system_contracts, + &mut storage_read_cache, ); self.ensure_tx_executable(&tx, &tx_metrics, false) @@ -581,20 +684,27 @@ impl TxSender { exec_result }; - let mut number_of_iterations = 0usize; while lower_bound + acceptable_overestimation < upper_bound { let mid = (lower_bound + upper_bound) / 2; - // There is no way to distinct between errors due to out of gas // or normal exeuction errors, so we just hope that increasing the // gas limit will make the transaction successful + let iteration_started_at = Instant::now(); if execute(gas_for_bytecodes_pubdata + mid).is_err() { lower_bound = mid + 1; } else { upper_bound = mid; } + vlog::trace!( + "fee estimation tx {:?}: iteration {} took {:?}. lower_bound: {}, upper_bound: {}", + tx_id, + number_of_iterations, + iteration_started_at.elapsed(), + lower_bound, + upper_bound, + ); number_of_iterations += 1; } metrics::histogram!( @@ -620,8 +730,9 @@ impl TxSender { let full_gas_limit = match tx_body_gas_limit.overflowing_add(gas_for_bytecodes_pubdata + overhead) { (_, true) => { - return Err(SubmitTxError::CannotEstimateTransaction( + return Err(SubmitTxError::ExecutionReverted( "exceeds block gas limit".to_string(), + vec![], )) } (x, _) => x, @@ -637,22 +748,12 @@ impl TxSender { } } - pub fn token_price( - &self, - request_type: TokenPriceRequestType, - l2_token_address: Address, - ) -> Result { - let mut storage = self.0.replica_connection_pool.access_storage_blocking(); - let mut tokens_web3_dal = storage.tokens_web3_dal(); - FeeTicker::get_l2_token_price(&mut tokens_web3_dal, request_type, &l2_token_address) - } - pub fn gas_price(&self) -> u64 { - let gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); + let gas_price = self.0.l1_gas_price_source.estimate_effective_gas_price(); derive_base_fee_and_gas_per_pubdata( - (gas_price as f64 * self.0.gas_price_scale_factor).round() as u64, - self.0.state_keeper_config.fair_l2_gas_price, + (gas_price as f64 * self.0.sender_config.gas_price_scale_factor).round() as u64, + self.0.sender_config.fair_l2_gas_price, ) .0 } @@ -663,6 +764,13 @@ impl TxSender { tx_metrics: &TransactionExecutionMetrics, log_message: bool, ) -> Result<(), SubmitTxError> { + let Some(sk_config) = &self.0.state_keeper_config else { + // No config provided, so we can't check if transaction satisfies the seal criteria. + // We assume that it's executable, and if it's not, it will be caught by the main server + // (where this check is always performed). + return Ok(()); + }; + let execution_metrics = ExecutionMetrics { published_bytecode_bytes: tx_metrics.published_bytecode_bytes, l2_l1_long_messages: tx_metrics.l2_l1_long_messages, @@ -674,6 +782,7 @@ impl TxSender { vm_events: tx_metrics.vm_events, total_log_queries: tx_metrics.total_log_queries, cycles_used: tx_metrics.cycles_used, + computational_gas_used: tx_metrics.computational_gas_used, }; let writes_metrics = DeduplicatedWritesMetrics { initial_storage_writes: tx_metrics.initial_storage_writes, @@ -686,9 +795,9 @@ impl TxSender { let tx_data: TransactionData = transaction.clone().into(); let tx_encoding_size = tx_data.into_tokens().len(); - for sealer in &SealManager::get_default_sealers() { + for sealer in &ConditionalSealer::get_default_sealers() { let seal_resolution = sealer.should_seal( - &self.0.state_keeper_config, + sk_config, 0u128, 1, execution_metrics, diff --git a/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs b/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs index eb7fafe0f829..c53f17fdf989 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs @@ -1,30 +1,30 @@ use std::collections::HashMap; +use std::future::Future; use std::sync::RwLock; use zksync_types::{ - api::{BlockId, Transaction, TransactionId}, + api::{BlockId, Transaction, TransactionDetails, TransactionId, TransactionReceipt}, l2::L2Tx, H256, }; use zksync_web3_decl::{ - jsonrpsee::core::Error as JsonrpseeError, + jsonrpsee::core::RpcResult, jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, - namespaces::EthNamespaceClient, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, }; /// Used by external node to proxy transaction to the main node /// and store them while they're not synced back yet +#[derive(Debug)] pub struct TxProxy { tx_cache: RwLock>, - client: HttpClient, + main_node_url: String, } impl TxProxy { - pub fn new(main_node_url: &str) -> Self { + pub fn new(main_node_url: String) -> Self { Self { - client: HttpClientBuilder::default() - .build(main_node_url) - .expect("Failed to create HTTP client"), + main_node_url, tx_cache: RwLock::new(HashMap::new()), } } @@ -41,20 +41,46 @@ impl TxProxy { self.tx_cache.write().unwrap().insert(tx_hash, tx); } - pub fn submit_tx(&self, tx: &L2Tx) -> Result { + fn proxy_request(&self, request: R) -> RpcResult + where + T: Send, + F: Send + Future>, + R: 'static + Send + FnOnce(HttpClient) -> F, + { + let main_node_url = self.main_node_url.clone(); + crate::block_on(async move { + // Clients are tied to the runtime they are created in, so we have to create it here. + let client = HttpClientBuilder::default().build(&main_node_url).unwrap(); + request(client).await + }) + } + + pub fn submit_tx(&self, tx: &L2Tx) -> RpcResult { let raw_tx = zksync_types::Bytes(tx.common_data.input_data().expect("raw tx is absent")); - async_std::task::block_on(self.client.send_raw_transaction(raw_tx)) - } - - pub fn request_tx(&self, id: TransactionId) -> Result, JsonrpseeError> { - async_std::task::block_on(match id { - TransactionId::Block(BlockId::Hash(block), index) => self - .client - .get_transaction_by_block_hash_and_index(block, index), - TransactionId::Block(BlockId::Number(block), index) => self - .client - .get_transaction_by_block_number_and_index(block, index), - TransactionId::Hash(hash) => self.client.get_transaction_by_hash(hash), + vlog::info!("Proxying tx {}", tx.hash()); + self.proxy_request(|client| async move { client.send_raw_transaction(raw_tx).await }) + } + + pub fn request_tx(&self, id: TransactionId) -> RpcResult> { + self.proxy_request(move |client| async move { + match id { + TransactionId::Block(BlockId::Hash(block), index) => { + client.get_transaction_by_block_hash_and_index(block, index) + } + TransactionId::Block(BlockId::Number(block), index) => { + client.get_transaction_by_block_number_and_index(block, index) + } + TransactionId::Hash(hash) => client.get_transaction_by_hash(hash), + } + .await }) } + + pub fn request_tx_details(&self, hash: H256) -> RpcResult> { + self.proxy_request(move |client| async move { client.get_transaction_details(hash).await }) + } + + pub fn request_tx_receipt(&self, hash: H256) -> RpcResult> { + self.proxy_request(move |client| async move { client.get_transaction_receipt(hash).await }) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs index d7cc9edb2cd0..16720abaf6dc 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs @@ -1,5 +1,4 @@ use jsonrpc_core::{Error, ErrorCode}; -use serde_json::json; use zksync_web3_decl::error::Web3Error; pub fn into_jsrpc_error(err: Web3Error) -> Error { @@ -14,20 +13,18 @@ pub fn into_jsrpc_error(err: Web3Error) -> Error { | Web3Error::FilterNotFound | Web3Error::InvalidFeeParams(_) | Web3Error::LogsLimitExceeded(_, _, _) => ErrorCode::InvalidParams, - Web3Error::SubmitTransactionError(_) | Web3Error::SerializationError(_) => 3.into(), + Web3Error::SubmitTransactionError(_, _) | Web3Error::SerializationError(_) => 3.into(), Web3Error::PubSubTimeout => 4.into(), Web3Error::RequestTimeout => 5.into(), }, message: match err { - Web3Error::SubmitTransactionError(_) => err.to_string(), + Web3Error::SubmitTransactionError(_, _) => err.to_string(), _ => err.to_string(), }, data: match err { - Web3Error::SubmitTransactionError(err) => json! ({ - "code": 104, - "message": err - }) - .into(), + Web3Error::SubmitTransactionError(_, data) => { + Some(format!("0x{}", hex::encode(data)).into()) + } _ => None, }, } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs new file mode 100644 index 000000000000..b62e026e4887 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs @@ -0,0 +1,95 @@ +// External uses +use crate::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; +use crate::api_server::web3::namespaces::debug::DebugNamespace; +use jsonrpc_core::Result; +use jsonrpc_derive::rpc; +use serde::{Deserialize, Serialize}; +use zksync_types::api::{BlockId, BlockNumber, DebugCall, ResultDebugCall}; +use zksync_types::transaction_request::CallRequest; +use zksync_types::H256; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum SupportedTracers { + CallTracer, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CallTracerConfig { + pub only_top_call: bool, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TracerConfig { + pub tracer: SupportedTracers, + pub tracer_config: CallTracerConfig, +} + +#[rpc] +pub trait DebugNamespaceT { + #[rpc(name = "debug_traceBlockByNumber", returns = "Vec")] + fn trace_block_by_number( + &self, + block: BlockNumber, + options: Option, + ) -> Result>; + #[rpc(name = "debug_traceBlockByHash", returns = "Vec")] + fn trace_block_by_hash( + &self, + hash: H256, + options: Option, + ) -> Result>; + #[rpc(name = "debug_traceCall", returns = "DebugCall")] + fn trace_call( + &self, + request: CallRequest, + block: Option, + options: Option, + ) -> Result; + #[rpc(name = "debug_traceTransaction", returns = "DebugCall")] + fn trace_transaction( + &self, + tx_hash: H256, + options: Option, + ) -> Result>; +} + +impl DebugNamespaceT for DebugNamespace { + fn trace_block_by_number( + &self, + block: BlockNumber, + options: Option, + ) -> Result> { + self.debug_trace_block_impl(BlockId::Number(block), options) + .map_err(into_jsrpc_error) + } + + fn trace_block_by_hash( + &self, + hash: H256, + options: Option, + ) -> Result> { + self.debug_trace_block_impl(BlockId::Hash(hash), options) + .map_err(into_jsrpc_error) + } + + fn trace_call( + &self, + request: CallRequest, + block: Option, + options: Option, + ) -> Result { + self.debug_trace_call_impl(request, block, options) + .map_err(into_jsrpc_error) + } + + fn trace_transaction( + &self, + tx_hash: H256, + options: Option, + ) -> Result> { + Ok(self.debug_trace_transaction_impl(tx_hash, options)) + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs index b9d094ee2190..2555816b384a 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs @@ -18,8 +18,8 @@ use zksync_web3_decl::error::Web3Error; use zksync_web3_decl::types::{Block, Filter, FilterChanges, Log}; // Local uses -use crate::web3::backend_jsonrpc::error::into_jsrpc_error; use crate::web3::namespaces::EthNamespace; +use crate::{l1_gas_price::L1GasPriceProvider, web3::backend_jsonrpc::error::into_jsrpc_error}; #[rpc] pub trait EthNamespaceT { @@ -178,7 +178,7 @@ pub trait EthNamespaceT { ) -> Result; } -impl EthNamespaceT for EthNamespace { +impl EthNamespaceT for EthNamespace { fn get_block_number(&self) -> Result { self.get_block_number_impl().map_err(into_jsrpc_error) } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs index 01baf794abf8..32503ab00fc7 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs @@ -1,3 +1,4 @@ +pub mod debug; pub mod eth; pub mod net; pub mod web3; diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs index 2361ab1872a6..1d37f765622a 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs @@ -19,8 +19,8 @@ use zksync_web3_decl::error::Web3Error; use zksync_web3_decl::types::Token; // Local uses -use crate::web3::backend_jsonrpc::error::into_jsrpc_error; use crate::web3::namespaces::ZksNamespace; +use crate::{l1_gas_price::L1GasPriceProvider, web3::backend_jsonrpc::error::into_jsrpc_error}; #[rpc] pub trait ZksNamespaceT { @@ -112,9 +112,15 @@ pub trait ZksNamespaceT { #[rpc(name = "zks_getL1BatchDetails", returns = "Option")] fn get_l1_batch_details(&self, batch: L1BatchNumber) -> Result>; + + #[rpc(name = "zks_getBytecodeByHash", returns = "Option>")] + fn get_bytecode_by_hash(&self, hash: H256) -> Result>>; + + #[rpc(name = "zks_getL1GasPrice", returns = "U64")] + fn get_l1_gas_price(&self) -> Result; } -impl ZksNamespaceT for ZksNamespace { +impl ZksNamespaceT for ZksNamespace { fn estimate_fee(&self, req: CallRequest) -> Result { self.estimate_fee_impl(req).map_err(into_jsrpc_error) } @@ -230,4 +236,12 @@ impl ZksNamespaceT for ZksNamespace { self.get_l1_batch_details_impl(batch) .map_err(into_jsrpc_error) } + + fn get_bytecode_by_hash(&self, hash: H256) -> Result>> { + Ok(self.get_bytecode_by_hash_impl(hash)) + } + + fn get_l1_gas_price(&self) -> Result { + Ok(self.get_l1_gas_price_impl()) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs index 2c1b9bc85277..958c4ccd8314 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs @@ -1,4 +1,4 @@ -use crate::api_server::web3::namespaces::eth::EthNamespace; +use crate::{api_server::web3::namespaces::eth::EthNamespace, l1_gas_price::L1GasPriceProvider}; use zksync_types::{ api::{ @@ -16,7 +16,7 @@ use zksync_web3_decl::{ types::{Filter, FilterChanges}, }; -impl EthNamespaceServer for EthNamespace { +impl EthNamespaceServer for EthNamespace { fn get_block_number(&self) -> RpcResult { self.get_block_number_impl() .map_err(|err| CallError::from_std_error(err).into()) diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index 372825c2a5a0..a85b7f29c074 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -1,4 +1,4 @@ -use crate::api_server::web3::namespaces::zks::ZksNamespace; +use crate::{api_server::web3::namespaces::zks::ZksNamespace, l1_gas_price::L1GasPriceProvider}; use bigdecimal::BigDecimal; use std::collections::HashMap; use zksync_types::{ @@ -15,7 +15,7 @@ use zksync_web3_decl::{ types::Token, }; -impl ZksNamespaceServer for ZksNamespace { +impl ZksNamespaceServer for ZksNamespace { fn estimate_fee(&self, req: CallRequest) -> RpcResult { self.estimate_fee_impl(req) .map_err(|err| CallError::from_std_error(err).into()) @@ -131,4 +131,12 @@ impl ZksNamespaceServer for ZksNamespace { self.get_l1_batch_details_impl(batch_number) .map_err(|err| CallError::from_std_error(err).into()) } + + fn get_bytecode_by_hash(&self, hash: H256) -> RpcResult>> { + Ok(self.get_bytecode_by_hash_impl(hash)) + } + + fn get_l1_gas_price(&self) -> RpcResult { + Ok(self.get_l1_gas_price_impl()) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/mod.rs b/core/bin/zksync_core/src/api_server/web3/mod.rs index 06eec3e4f741..ed895234434a 100644 --- a/core/bin/zksync_core/src/api_server/web3/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/mod.rs @@ -1,39 +1,45 @@ // Built-in uses +use std::collections::HashMap; +use std::net::SocketAddr; use std::sync::{Arc, RwLock}; use std::time::Duration; + // External uses use futures::channel::oneshot; use futures::FutureExt; use jsonrpc_core::IoHandler; use jsonrpc_pubsub::PubSubHandler; -use once_cell::{self, sync::Lazy}; use tokio::sync::watch; -use zksync_dal::ConnectionPool; // Workspace uses -use zksync_config::ZkSyncConfig; -use zksync_eth_client::clients::http_client::EthereumClient; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_dal::ConnectionPool; use zksync_eth_signer::{EthereumSigner, PrivateKeySigner}; -use zksync_types::H256; +use zksync_types::{Address, H256}; use zksync_web3_decl::{ jsonrpsee::{server::ServerBuilder, RpcModule}, namespaces::{EthNamespaceServer, NetNamespaceServer, Web3NamespaceServer, ZksNamespaceServer}, }; -use crate::gas_adjuster::GasAdjuster; +use crate::l1_gas_price::L1GasPriceProvider; +use crate::sync_layer::SyncState; + +use self::state::InternalApiConfig; // Local uses use super::tx_sender::TxSender; use backend_jsonrpc::{ namespaces::{ - eth::EthNamespaceT, net::NetNamespaceT, web3::Web3NamespaceT, zks::ZksNamespaceT, + debug::DebugNamespaceT, eth::EthNamespaceT, net::NetNamespaceT, web3::Web3NamespaceT, + zks::ZksNamespaceT, }, pub_sub::Web3PubSub, }; -use namespaces::{EthNamespace, EthSubscribe, NetNamespace, Web3Namespace, ZksNamespace}; +use namespaces::{ + DebugNamespace, EthNamespace, EthSubscribe, NetNamespace, Web3Namespace, ZksNamespace, +}; use pubsub_notifier::{notify_blocks, notify_logs, notify_txs}; use state::{Filters, RpcState}; -use zksync_contracts::{ESTIMATE_FEE_BLOCK_CODE, PLAYGROUND_BLOCK_BOOTLOADER_CODE}; pub mod backend_jsonrpc; pub mod backend_jsonrpsee; @@ -41,46 +47,126 @@ pub mod namespaces; mod pubsub_notifier; pub mod state; -pub fn get_config() -> &'static ZkSyncConfig { - static ZKSYNC_CONFIG: Lazy = Lazy::new(ZkSyncConfig::from_env); - - &ZKSYNC_CONFIG +#[derive(Debug, Clone, Copy)] +enum ApiBackend { + Jsonrpsee, + Jsonrpc, } -impl RpcState { - pub fn init( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - req_entities_limit: usize, - filters_limit: usize, - account_pks: Vec, - gas_adjuster: Arc>, - ) -> Self { - let config = get_config(); - let mut storage = replica_connection_pool.access_storage_blocking(); +#[derive(Debug, Clone, Copy)] +enum ApiTransport { + WebSocket(SocketAddr), + Http(SocketAddr), +} - let base_system_contracts = storage.storage_dal().get_base_system_contracts( - config.chain.state_keeper.bootloader_hash, - config.chain.state_keeper.default_aa_hash, - ); +#[derive(Debug)] +pub struct ApiBuilder { + backend: ApiBackend, + pool: ConnectionPool, + config: InternalApiConfig, + transport: Option, + tx_sender: Option>, + filters_limit: Option, + subscriptions_limit: Option, + sync_state: Option, + threads: Option, + polling_interval: Option, + accounts: HashMap, + debug_namespace_config: Option<(BaseSystemContractsHashes, u64, Option)>, +} - let mut playground_base_system_contracts = base_system_contracts.clone(); - let mut estimate_fee_base_system_contracts = base_system_contracts; - playground_base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); - estimate_fee_base_system_contracts.bootloader = ESTIMATE_FEE_BLOCK_CODE.clone(); +impl ApiBuilder { + pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { + Self { + backend: ApiBackend::Jsonrpsee, + transport: None, + pool, + sync_state: None, + tx_sender: None, + filters_limit: None, + subscriptions_limit: None, + threads: None, + polling_interval: None, + debug_namespace_config: None, + accounts: Default::default(), + config, + } + } - drop(storage); - let tx_sender = TxSender::new( + pub fn jsonrpc_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { + Self { + backend: ApiBackend::Jsonrpc, + transport: None, + pool, + sync_state: None, + tx_sender: None, + filters_limit: None, + subscriptions_limit: None, + threads: None, + polling_interval: None, + debug_namespace_config: None, + accounts: Default::default(), config, - master_connection_pool, - replica_connection_pool.clone(), - gas_adjuster, - playground_base_system_contracts, - estimate_fee_base_system_contracts, - ); + } + } + + pub fn ws(mut self, port: u16) -> Self { + self.transport = Some(ApiTransport::WebSocket(([0, 0, 0, 0], port).into())); + self + } - let accounts = if cfg!(feature = "openzeppelin_tests") { - account_pks + pub fn http(mut self, port: u16) -> Self { + self.transport = Some(ApiTransport::Http(([0, 0, 0, 0], port).into())); + self + } + + pub fn with_tx_sender(mut self, tx_sender: TxSender) -> Self { + self.tx_sender = Some(tx_sender); + self + } + + pub fn with_filter_limit(mut self, filters_limit: usize) -> Self { + self.filters_limit = Some(filters_limit); + self + } + + pub fn with_subscriptions_limit(mut self, subscriptions_limit: usize) -> Self { + self.subscriptions_limit = Some(subscriptions_limit); + self + } + + pub fn with_sync_state(mut self, sync_state: SyncState) -> Self { + self.sync_state = Some(sync_state); + self + } + + pub fn with_threads(mut self, threads: usize) -> Self { + self.threads = Some(threads); + self + } + + pub fn with_polling_interval(mut self, polling_interval: Duration) -> Self { + self.polling_interval = Some(polling_interval); + self + } + + pub fn enable_debug_namespace( + mut self, + base_system_contract_hashes: BaseSystemContractsHashes, + fair_l2_gas_price: u64, + cache_misses_limit: Option, + ) -> Self { + self.debug_namespace_config = Some(( + base_system_contract_hashes, + fair_l2_gas_price, + cache_misses_limit, + )); + self + } + + pub fn enable_oz_tests(mut self, account_pks: Vec) -> Self { + if cfg!(feature = "openzeppelin_tests") { + self.accounts = account_pks .into_iter() .map(|pk| { let signer = PrivateKeySigner::new(pk); @@ -88,322 +174,290 @@ impl RpcState { .expect("Failed to get address of a signer"); (address, signer) }) - .collect() + .collect(); } else { - Default::default() - }; + vlog::info!("OpenZeppelin tests are not enabled, ignoring `enable_oz_tests` call"); + } + self + } +} +impl ApiBuilder { + fn build_rpc_state(&self) -> RpcState { RpcState { - installed_filters: Arc::new(RwLock::new(Filters::new(filters_limit))), - connection_pool: replica_connection_pool, - tx_sender, - req_entities_limit, - accounts, - config, + installed_filters: Arc::new(RwLock::new(Filters::new( + self.filters_limit.unwrap_or(usize::MAX), + ))), + connection_pool: self.pool.clone(), + tx_sender: self.tx_sender.clone().expect("TxSender is not provided"), + sync_state: self.sync_state.clone(), + api_config: self.config.clone(), + accounts: self.accounts.clone(), #[cfg(feature = "openzeppelin_tests")] known_bytecodes: Arc::new(RwLock::new(Default::default())), } } -} -pub fn start_http_rpc_server_old( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - config: &ZkSyncConfig, - _stop_receiver: watch::Receiver, - gas_adjuster: Arc>, -) -> tokio::task::JoinHandle<()> { - let io_handler = build_http_io_handler( - master_connection_pool, - replica_connection_pool, - config, - gas_adjuster, - ); - let addr = config.api.web3_json_rpc.http_bind_addr(); - let threads_per_server = config.api.web3_json_rpc.threads_per_server as usize; - - let (sender, recv) = oneshot::channel::<()>(); - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .worker_threads(threads_per_server) - .build() - .unwrap(); - - let server = jsonrpc_http_server::ServerBuilder::new(io_handler) - .threads(1) - .event_loop_executor(runtime.handle().clone()) - .start_http(&addr) - .unwrap(); - - server.wait(); - let _ = sender; - }); - - tokio::spawn(recv.map(drop)) -} + fn build_rpc_module(&self) -> RpcModule> { + let zksync_network_id = self.config.l2_chain_id; + let rpc_app = self.build_rpc_state(); -fn start_notifying_active_subs( - pub_sub: EthSubscribe, - connection_pool: ConnectionPool, - polling_interval: Duration, - stop_receiver: watch::Receiver, -) -> Vec> { - vec![ - tokio::spawn(notify_blocks( - pub_sub.active_block_subs, - connection_pool.clone(), - polling_interval, - stop_receiver.clone(), - )), - tokio::spawn(notify_txs( - pub_sub.active_tx_subs, - connection_pool.clone(), - polling_interval, - stop_receiver.clone(), - )), - tokio::spawn(notify_logs( - pub_sub.active_log_subs, - connection_pool, - polling_interval, - stop_receiver, - )), - ] -} + // Declare namespaces we have. + let eth = EthNamespace::new(rpc_app.clone()); + let net = NetNamespace::new(zksync_network_id); + let web3 = Web3Namespace; + let zks = ZksNamespace::new(rpc_app); + + assert!( + self.debug_namespace_config.is_none(), + "Debug namespace is not supported with jsonrpsee_backend" + ); -pub fn start_ws_rpc_server_old( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - config: &ZkSyncConfig, - stop_receiver: watch::Receiver, - gas_adjuster: Arc>, -) -> Vec> { - let pub_sub = EthSubscribe::default(); - let mut notify_handles = start_notifying_active_subs( - pub_sub.clone(), - replica_connection_pool.clone(), - config.api.web3_json_rpc.pubsub_interval(), - stop_receiver.clone(), - ); - - let addr = config.api.web3_json_rpc.ws_bind_addr(); - let (sender, recv) = oneshot::channel::<()>(); - let io = build_pubsub_io_handler( - master_connection_pool, - replica_connection_pool, - config, - gas_adjuster.clone(), - pub_sub, - ); - - let server = jsonrpc_ws_server::ServerBuilder::with_meta_extractor( - io, - |context: &jsonrpc_ws_server::RequestContext| { - Arc::new(jsonrpc_pubsub::Session::new(context.sender())) - }, - ) - .max_connections(config.api.web3_json_rpc.subscriptions_limit()) - .start(&addr) - .unwrap(); - let close_handler = server.close_handle(); - - std::thread::spawn(move || { - server.wait().unwrap(); - let _ = sender; - }); - let mut thread_stop_receiver = stop_receiver.clone(); - std::thread::spawn(move || { - let stop_signal = futures::executor::block_on(thread_stop_receiver.changed()); - if stop_signal.is_ok() { - close_handler.close(); - vlog::info!("Stop signal received, WS JSON RPC API is shutting down"); + // Collect all the methods into a single RPC module. + let mut rpc: RpcModule<_> = eth.into_rpc(); + rpc.merge(net.into_rpc()) + .expect("Can't merge net namespace"); + rpc.merge(web3.into_rpc()) + .expect("Can't merge web3 namespace"); + rpc.merge(zks.into_rpc()) + .expect("Can't merge zks namespace"); + + rpc + } + + pub fn build( + mut self, + stop_receiver: watch::Receiver, + ) -> Vec> { + if self.filters_limit.is_none() { + vlog::warn!("Filters limit is not set - unlimited filters are allowed"); } - }); - notify_handles.push(tokio::spawn(gas_adjuster.run(stop_receiver))); - notify_handles.push(tokio::spawn(recv.map(drop))); - notify_handles -} + match (&self.transport, self.subscriptions_limit) { + (Some(ApiTransport::WebSocket(_)), None) => { + vlog::warn!( + "`subscriptions_limit` is not set - unlimited subscriptions are allowed" + ); + } + (Some(ApiTransport::Http(_)), Some(_)) => { + vlog::warn!( + "`subscriptions_limit` is ignored for HTTP transport, use WebSocket instead" + ); + } + _ => {} + } + + match (self.backend, self.transport.take()) { + (ApiBackend::Jsonrpc, Some(ApiTransport::Http(addr))) => { + vec![self.build_jsonrpc_http(addr)] + } + (ApiBackend::Jsonrpc, Some(ApiTransport::WebSocket(addr))) => { + self.build_jsonrpc_ws(addr, stop_receiver) + } + (ApiBackend::Jsonrpsee, Some(ApiTransport::Http(addr))) => { + vec![self.build_jsonrpsee_http(addr)] + } + (ApiBackend::Jsonrpsee, Some(ApiTransport::WebSocket(addr))) => { + vec![self.build_jsonrpsee_ws(addr)] + } + (_, None) => panic!("ApiTransport is not specified"), + } + } + + fn build_jsonrpc_http(self, addr: SocketAddr) -> tokio::task::JoinHandle<()> { + let io_handler = { + let zksync_network_id = self.config.l2_chain_id; + let rpc_state = self.build_rpc_state(); + let mut io = IoHandler::new(); + io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); + io.extend_with(ZksNamespace::new(rpc_state.clone()).to_delegate()); + io.extend_with(Web3Namespace.to_delegate()); + io.extend_with(NetNamespace::new(zksync_network_id).to_delegate()); + if let Some((hashes, fair_l2_gas_price, cache_misses_limit)) = + self.debug_namespace_config + { + io.extend_with( + DebugNamespace::new( + rpc_state.connection_pool, + hashes, + fair_l2_gas_price, + cache_misses_limit, + ) + .to_delegate(), + ); + } + + io + }; -pub fn start_http_rpc_server( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - config: &ZkSyncConfig, - gas_adjuster: Arc>, -) -> tokio::task::JoinHandle<()> { - let rpc = build_rpc_module( - master_connection_pool, - replica_connection_pool, - config, - gas_adjuster, - ); - let addr = config.api.web3_json_rpc.http_bind_addr(); - let threads_per_server = config.api.web3_json_rpc.threads_per_server as usize; - - // Start the server in a separate tokio runtime from a dedicated thread. - let (sender, recv) = oneshot::channel::<()>(); - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .worker_threads(threads_per_server) - .build() - .unwrap(); - - runtime.block_on(async move { - let server = ServerBuilder::default() - .http_only() - .max_connections(5000) - .build(addr) - .await - .expect("Can't start the HTTP JSON RPC server"); - - let server_handle = server - .start(rpc) - .expect("Failed to start HTTP JSON RPC application"); - server_handle.stopped().await + let (sender, recv) = oneshot::channel::<()>(); + std::thread::spawn(move || { + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .worker_threads(self.threads.unwrap()) + .build() + .unwrap(); + + let server = jsonrpc_http_server::ServerBuilder::new(io_handler) + .threads(1) + .event_loop_executor(runtime.handle().clone()) + .start_http(&addr) + .unwrap(); + + server.wait(); + let _ = sender; }); - sender.send(()).unwrap(); - }); + tokio::spawn(recv.map(drop)) + } - // Notifier for the rest of application about the end of the task. - tokio::spawn(recv.map(drop)) -} + fn build_jsonrpsee_http(self, addr: SocketAddr) -> tokio::task::JoinHandle<()> { + let rpc = self.build_rpc_module(); + + // Start the server in a separate tokio runtime from a dedicated thread. + let (sender, recv) = oneshot::channel::<()>(); + std::thread::spawn(move || { + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .worker_threads(self.threads.unwrap()) + .build() + .unwrap(); + + runtime.block_on(async move { + let server = ServerBuilder::default() + .http_only() + .max_connections(5000) + .build(addr) + .await + .expect("Can't start the HTTP JSON RPC server"); + + let server_handle = server + .start(rpc) + .expect("Failed to start HTTP JSON RPC application"); + server_handle.stopped().await + }); + + sender.send(()).unwrap(); + }); -pub fn start_ws_rpc_server( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - config: &ZkSyncConfig, - gas_adjuster: Arc>, -) -> tokio::task::JoinHandle<()> { - let rpc = build_rpc_module( - master_connection_pool, - replica_connection_pool, - config, - gas_adjuster, - ); - let addr = config.api.web3_json_rpc.ws_bind_addr(); - let threads_per_server = config.api.web3_json_rpc.threads_per_server as usize; - - // Start the server in a separate tokio runtime from a dedicated thread. - let (sender, recv) = oneshot::channel::<()>(); - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .worker_threads(threads_per_server) - .build() - .unwrap(); - - runtime.block_on(async move { - let server = ServerBuilder::default() - .ws_only() - .build(addr) - .await - .expect("Can't start the WS JSON RPC server"); - - let server_handle = server - .start(rpc) - .expect("Failed to start WS JSON RPC application"); - server_handle.stopped().await + // Notifier for the rest of application about the end of the task. + tokio::spawn(recv.map(drop)) + } + + fn build_jsonrpsee_ws(self, addr: SocketAddr) -> tokio::task::JoinHandle<()> { + vlog::warn!( + "`eth_subscribe` is not implemented for jsonrpsee backend, use jsonrpc instead" + ); + + let rpc = self.build_rpc_module(); + + // Start the server in a separate tokio runtime from a dedicated thread. + let (sender, recv) = oneshot::channel::<()>(); + std::thread::spawn(move || { + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .worker_threads(self.threads.unwrap()) + .build() + .unwrap(); + + runtime.block_on(async move { + let server = ServerBuilder::default() + .ws_only() + .build(addr) + .await + .expect("Can't start the WS JSON RPC server"); + + let server_handle = server + .start(rpc) + .expect("Failed to start WS JSON RPC application"); + server_handle.stopped().await + }); + + sender.send(()).unwrap(); }); - sender.send(()).unwrap(); - }); + // Notifier for the rest of application about the end of the task. + tokio::spawn(recv.map(drop)) + } - // Notifier for the rest of application about the end of the task. - tokio::spawn(recv.map(drop)) -} + fn build_jsonrpc_ws( + self, + addr: SocketAddr, + mut stop_receiver: watch::Receiver, + ) -> Vec> { + let pub_sub = EthSubscribe::default(); + let polling_interval = self.polling_interval.expect("Polling interval is not set"); + + let mut notify_handles = vec![ + tokio::spawn(notify_blocks( + pub_sub.active_block_subs.clone(), + self.pool.clone(), + polling_interval, + stop_receiver.clone(), + )), + tokio::spawn(notify_txs( + pub_sub.active_tx_subs.clone(), + self.pool.clone(), + polling_interval, + stop_receiver.clone(), + )), + tokio::spawn(notify_logs( + pub_sub.active_log_subs.clone(), + self.pool.clone(), + polling_interval, + stop_receiver.clone(), + )), + ]; + + let (sender, recv) = oneshot::channel::<()>(); + let io = { + let zksync_network_id = self.config.l2_chain_id; + let rpc_state = self.build_rpc_state(); + let mut io = PubSubHandler::default(); + io.extend_with(pub_sub.to_delegate()); + io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); + io.extend_with(ZksNamespace::new(rpc_state).to_delegate()); + io.extend_with(Web3Namespace.to_delegate()); + io.extend_with(NetNamespace::new(zksync_network_id).to_delegate()); + io + }; + let server = jsonrpc_ws_server::ServerBuilder::with_meta_extractor( + io, + |context: &jsonrpc_ws_server::RequestContext| { + Arc::new(jsonrpc_pubsub::Session::new(context.sender())) + }, + ) + .max_connections(self.subscriptions_limit.unwrap_or(usize::MAX)) + .session_stats(TrackOpenWsConnections) + .start(&addr) + .unwrap(); + let close_handler = server.close_handle(); + + std::thread::spawn(move || { + server.wait().unwrap(); + let _ = sender; + }); + std::thread::spawn(move || { + let stop_signal = futures::executor::block_on(stop_receiver.changed()); + if stop_signal.is_ok() { + close_handler.close(); + vlog::info!("Stop signal received, WS JSON RPC API is shutting down"); + } + }); -fn build_rpc_state( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - config: &ZkSyncConfig, - gas_adjuster: Arc>, -) -> RpcState { - let req_entities_limit = config.api.web3_json_rpc.req_entities_limit(); - let filters_limit = config.api.web3_json_rpc.filters_limit(); - let account_pks = config.api.web3_json_rpc.account_pks(); - - RpcState::init( - master_connection_pool, - replica_connection_pool, - req_entities_limit, - filters_limit, - account_pks, - gas_adjuster, - ) + notify_handles.push(tokio::spawn(recv.map(drop))); + notify_handles + } } -fn build_http_io_handler( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - config: &ZkSyncConfig, - gas_adjuster: Arc>, -) -> IoHandler { - let rpc_state = build_rpc_state( - master_connection_pool, - replica_connection_pool, - config, - gas_adjuster, - ); - let mut io = IoHandler::new(); - io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); - io.extend_with(ZksNamespace::new(rpc_state).to_delegate()); - io.extend_with(Web3Namespace.to_delegate()); - io.extend_with(NetNamespace.to_delegate()); - - io -} +struct TrackOpenWsConnections; -fn build_pubsub_io_handler( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - config: &ZkSyncConfig, - gas_adjuster: Arc>, - pub_sub: EthSubscribe, -) -> PubSubHandler> { - let rpc_state = build_rpc_state( - master_connection_pool, - replica_connection_pool, - config, - gas_adjuster, - ); - let mut io = PubSubHandler::default(); - io.extend_with(pub_sub.to_delegate()); - io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); - io.extend_with(ZksNamespace::new(rpc_state).to_delegate()); - io.extend_with(Web3Namespace.to_delegate()); - io.extend_with(NetNamespace.to_delegate()); - - io -} +impl jsonrpc_ws_server::SessionStats for TrackOpenWsConnections { + fn open_session(&self, _id: jsonrpc_ws_server::SessionId) { + metrics::increment_gauge!("api.ws.open_sessions", 1.0); + } -fn build_rpc_module( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, - config: &ZkSyncConfig, - gas_adjuster: Arc>, -) -> RpcModule { - let rpc_app = build_rpc_state( - master_connection_pool, - replica_connection_pool, - config, - gas_adjuster, - ); - - // Declare namespaces we have. - let eth = EthNamespace::new(rpc_app.clone()); - let net = NetNamespace; - let web3 = Web3Namespace; - let zks = ZksNamespace::new(rpc_app); - - // Collect all the methods into a single RPC module. - let mut rpc: RpcModule<_> = eth.into_rpc(); - rpc.merge(net.into_rpc()) - .expect("Can't merge net namespace"); - rpc.merge(web3.into_rpc()) - .expect("Can't merge web3 namespace"); - rpc.merge(zks.into_rpc()) - .expect("Can't merge zks namespace"); - rpc + fn close_session(&self, _id: jsonrpc_ws_server::SessionId) { + metrics::decrement_gauge!("api.ws.open_sessions", 1.0); + } } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs new file mode 100644 index 000000000000..d3c09e85e2e9 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -0,0 +1,155 @@ +use crate::api_server::execution_sandbox::execute_tx_eth_call; +use crate::api_server::web3::backend_jsonrpc::namespaces::debug::TracerConfig; +use std::time::Instant; +use zksync_contracts::{ + BaseSystemContracts, BaseSystemContractsHashes, PLAYGROUND_BLOCK_BOOTLOADER_CODE, +}; +use zksync_dal::ConnectionPool; +use zksync_types::api::{BlockId, BlockNumber, DebugCall, ResultDebugCall}; +use zksync_types::transaction_request::{l2_tx_from_call_req, CallRequest}; +use zksync_types::vm_trace::{Call, VmTrace}; +use zksync_types::{H256, USED_BOOTLOADER_MEMORY_BYTES}; +use zksync_web3_decl::error::Web3Error; + +#[derive(Debug, Clone)] +pub struct DebugNamespace { + pub connection_pool: ConnectionPool, + pub fair_l2_gas_price: u64, + pub base_system_contracts: BaseSystemContracts, + pub vm_execution_cache_misses_limit: Option, +} + +impl DebugNamespace { + pub fn new( + connection_pool: ConnectionPool, + base_system_contract_hashes: BaseSystemContractsHashes, + fair_l2_gas_price: u64, + vm_execution_cache_misses_limit: Option, + ) -> Self { + let mut storage = connection_pool.access_storage_blocking(); + + let mut base_system_contracts = storage.storage_dal().get_base_system_contracts( + base_system_contract_hashes.bootloader, + base_system_contract_hashes.default_aa, + ); + + drop(storage); + + base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); + Self { + connection_pool, + fair_l2_gas_price, + base_system_contracts, + vm_execution_cache_misses_limit, + } + } + + #[tracing::instrument(skip(self))] + pub fn debug_trace_block_impl( + &self, + block: BlockId, + options: Option, + ) -> Result, Web3Error> { + let only_top_call = options + .map(|options| options.tracer_config.only_top_call) + .unwrap_or(false); + let call_trace = self + .connection_pool + .access_storage_blocking() + .blocks_web3_dal() + .get_trace_for_miniblock(block)?; + Ok(call_trace + .into_iter() + .map(|call_trace| { + let mut result: DebugCall = call_trace.into(); + if only_top_call { + result.calls = vec![]; + } + ResultDebugCall { result } + }) + .collect()) + } + + #[tracing::instrument(skip(self))] + pub fn debug_trace_transaction_impl( + &self, + tx_hash: H256, + options: Option, + ) -> Option { + let only_top_call = options + .map(|options| options.tracer_config.only_top_call) + .unwrap_or(false); + let call_trace = self + .connection_pool + .access_storage_blocking() + .transactions_dal() + .get_call_trace(tx_hash); + call_trace.map(|call_trace| { + let mut result: DebugCall = call_trace.into(); + if only_top_call { + result.calls = vec![]; + } + result + }) + } + + #[tracing::instrument(skip(self, request, block))] + pub fn debug_trace_call_impl( + &self, + request: CallRequest, + block: Option, + options: Option, + ) -> Result { + let start = Instant::now(); + let only_top_call = options + .map(|options| options.tracer_config.only_top_call) + .unwrap_or(false); + let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let tx = l2_tx_from_call_req(request, USED_BOOTLOADER_MEMORY_BYTES)?; + + let enforced_base_fee = Some(tx.common_data.fee.max_fee_per_gas.as_u64()); + // We don't need properly trace if we only need top call + let result = execute_tx_eth_call( + &self.connection_pool, + tx.clone(), + block, + 100000, + self.fair_l2_gas_price, + enforced_base_fee, + &self.base_system_contracts, + self.vm_execution_cache_misses_limit, + !only_top_call, + )?; + + let (output, revert_reason) = match result.revert_reason { + Some(result) => (vec![], Some(result.revert_reason.to_string())), + None => ( + result + .return_data + .into_iter() + .flat_map(|val| { + let bytes: [u8; 32] = val.into(); + bytes.to_vec() + }) + .collect::>(), + None, + ), + }; + let trace = match result.trace { + VmTrace::CallTrace(trace) => trace, + VmTrace::ExecutionTrace(_) => vec![], + }; + let call = Call::new_high_level( + u32::MAX, + result.gas_used, + tx.execute.value, + tx.execute.calldata, + output, + revert_reason, + trace, + ); + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => "debug_trace_call"); + Ok(call.into()) + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs index 95954524e860..630be1e25c08 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -10,8 +10,8 @@ use zksync_types::{ l2::{L2Tx, TransactionType}, transaction_request::{l2_tx_from_call_req, CallRequest}, utils::decompose_full_nonce, - web3::types::SyncState, - AccountTreeId, Bytes, L2ChainId, MiniblockNumber, StorageKey, H256, L2_ETH_TOKEN_ADDRESS, + web3::types::{SyncInfo, SyncState}, + AccountTreeId, Bytes, MiniblockNumber, StorageKey, H256, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, U256, }; @@ -20,15 +20,19 @@ use zksync_web3_decl::{ types::{Address, Block, Filter, FilterChanges, Log, TypedFilter, U64}, }; -use crate::api_server::{ - execution_sandbox::execute_tx_eth_call, web3::backend_jsonrpc::error::internal_error, - web3::state::RpcState, +use crate::{ + api_server::{ + execution_sandbox::execute_tx_eth_call, web3::backend_jsonrpc::error::internal_error, + web3::state::RpcState, + }, + l1_gas_price::L1GasPriceProvider, }; use zksync_utils::u256_to_h256; #[cfg(feature = "openzeppelin_tests")] use zksync_utils::bytecode::hash_bytecode; + #[cfg(feature = "openzeppelin_tests")] use { zksync_eth_signer::EthereumSigner, @@ -43,12 +47,12 @@ pub const EVENT_TOPIC_NUMBER_LIMIT: usize = 4; pub const PROTOCOL_VERSION: &str = "zks/1"; #[derive(Debug, Clone)] -pub struct EthNamespace { - pub state: RpcState, +pub struct EthNamespace { + pub state: RpcState, } -impl EthNamespace { - pub fn new(state: RpcState) -> Self { +impl EthNamespace { + pub fn new(state: RpcState) -> Self { Self { state } } @@ -79,14 +83,19 @@ impl EthNamespace { let start = Instant::now(); let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + + let mut request_with_set_nonce = request.clone(); + self.state + .set_nonce_for_call_request(&mut request_with_set_nonce)?; + #[cfg(not(feature = "openzeppelin_tests"))] - let tx = l2_tx_from_call_req(request, self.state.config.api.web3_json_rpc.max_tx_size)?; + let tx = l2_tx_from_call_req(request, self.state.api_config.max_tx_size)?; #[cfg(feature = "openzeppelin_tests")] let tx: L2Tx = self .convert_evm_like_deploy_requests(tx_req_from_call_req( request, - self.state.config.api.web3_json_rpc.max_tx_size, + self.state.api_config.max_tx_size, )?)? .try_into()?; @@ -98,11 +107,17 @@ impl EthNamespace { self.state .tx_sender .0 - .gas_adjuster + .l1_gas_price_source .estimate_effective_gas_price(), - self.state.tx_sender.0.state_keeper_config.fair_l2_gas_price, + self.state.tx_sender.0.sender_config.fair_l2_gas_price, enforced_base_fee, &self.state.tx_sender.0.playground_base_system_contracts, + self.state + .tx_sender + .0 + .sender_config + .vm_execution_cache_misses_limit, + false, )?; let mut res_bytes = match result.revert_reason { @@ -135,18 +150,32 @@ impl EthNamespace { _block: Option, ) -> Result { let start = Instant::now(); + let mut request_with_gas_per_pubdata_overridden = request; - let is_eip712 = request.eip712_meta.is_some(); + self.state + .set_nonce_for_call_request(&mut request_with_gas_per_pubdata_overridden)?; + + if let Some(ref mut eip712_meta) = request_with_gas_per_pubdata_overridden.eip712_meta { + if eip712_meta.gas_per_pubdata == U256::zero() { + eip712_meta.gas_per_pubdata = MAX_GAS_PER_PUBDATA_BYTE.into(); + } + } + + let is_eip712 = request_with_gas_per_pubdata_overridden + .eip712_meta + .is_some(); #[cfg(not(feature = "openzeppelin_tests"))] - let mut tx: L2Tx = - l2_tx_from_call_req(request, self.state.config.api.web3_json_rpc.max_tx_size)?; + let mut tx: L2Tx = l2_tx_from_call_req( + request_with_gas_per_pubdata_overridden, + self.state.api_config.max_tx_size, + )?; #[cfg(feature = "openzeppelin_tests")] let mut tx: L2Tx = self .convert_evm_like_deploy_requests(tx_req_from_call_req( - request, - self.state.config.api.web3_json_rpc.max_tx_size, + request_with_gas_per_pubdata_overridden, + self.state.api_config.max_tx_size, )?)? .try_into()?; @@ -161,27 +190,17 @@ impl EthNamespace { tx.common_data.fee.max_fee_per_gas = self.state.tx_sender.gas_price().into(); tx.common_data.fee.max_priority_fee_per_gas = tx.common_data.fee.max_fee_per_gas; - tx.common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); // Modify the l1 gas price with the scale factor - let scale_factor = self - .state - .config - .api - .web3_json_rpc - .estimate_gas_scale_factor; - let acceptable_overestimation = self - .state - .config - .api - .web3_json_rpc - .estimate_gas_acceptable_overestimation; + let scale_factor = self.state.api_config.estimate_gas_scale_factor; + let acceptable_overestimation = + self.state.api_config.estimate_gas_acceptable_overestimation; let fee = self .state .tx_sender .get_txs_fee_in_wei(tx.into(), scale_factor, acceptable_overestimation) - .map_err(|err| Web3Error::SubmitTransactionError(err.to_string()))?; + .map_err(|err| Web3Error::SubmitTransactionError(err.to_string(), err.data()))?; metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_gas"); Ok(fee.gas_limit) @@ -284,11 +303,7 @@ impl EthNamespace { .connection_pool .access_storage_blocking() .blocks_web3_dal() - .get_block_by_web3_block_id( - block, - full_transactions, - L2ChainId(self.state.config.chain.eth.zksync_network_id), - ) + .get_block_by_web3_block_id(block, full_transactions, self.state.api_config.l2_chain_id) .map_err(|err| internal_error(endpoint_name, err)); metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); @@ -341,7 +356,7 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub fn chain_id_impl(&self) -> U64 { - self.state.config.chain.eth.zksync_network_id.into() + self.state.api_config.l2_chain_id.0.into() } #[tracing::instrument(skip(self))] @@ -421,7 +436,7 @@ impl EthNamespace { .connection_pool .access_storage_blocking() .transactions_web3_dal() - .get_transaction(id, L2ChainId(self.state.config.chain.eth.zksync_network_id)) + .get_transaction(id, self.state.api_config.l2_chain_id) .map_err(|err| internal_error(endpoint_name, err)); if let Some(proxy) = &self.state.tx_sender.0.proxy { @@ -458,7 +473,7 @@ impl EthNamespace { let start = Instant::now(); let endpoint_name = "get_transaction_receipt"; - let res = self + let mut receipt = self .state .connection_pool .access_storage_blocking() @@ -466,8 +481,32 @@ impl EthNamespace { .get_transaction_receipt(hash) .map_err(|err| internal_error(endpoint_name, err)); + if let Some(proxy) = &self.state.tx_sender.0.proxy { + // We're running an external node + if matches!(receipt, Ok(None)) { + // If the transaction is not in the db, query main node. + // Because it might be the case that it got rejected in state keeper + // and won't be synced back to us, but we still want to return a receipt. + // We want to only forwared these kinds of receipts because otherwise + // clients will assume that the transaction they got the receipt for + // was already processed on the EN (when it was not), + // and will think that the state has already been updated on the EN (when it was not). + if let Ok(Some(main_node_receipt)) = proxy + .request_tx_receipt(hash) + .map_err(|err| internal_error(endpoint_name, err)) + { + if main_node_receipt.status == Some(0.into()) + && main_node_receipt.block_number.is_none() + { + // Transaction was rejected in state-keeper. + receipt = Ok(Some(main_node_receipt)); + } + } + } + } + metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); - res + receipt } #[tracing::instrument(skip(self))] @@ -598,7 +637,10 @@ impl EthNamespace { 1, "reason" => err.grafana_error_code() ); - Err(Web3Error::SubmitTransactionError(err.to_string())) + Err(Web3Error::SubmitTransactionError( + err.to_string(), + err.data(), + )) } Ok(_) => Ok(hash), }; @@ -614,7 +656,21 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub fn syncing_impl(&self) -> SyncState { - SyncState::NotSyncing + if let Some(state) = self.state.sync_state.as_ref() { + // Node supports syncing process (i.e. not the main node). + if state.is_synced() { + SyncState::NotSyncing + } else { + SyncState::Syncing(SyncInfo { + starting_block: 0u64.into(), // We always start syncing from genesis right now. + current_block: state.get_local_block().0.into(), + highest_block: state.get_main_node_block().0.into(), + }) + } + } else { + // If there is no sync state, then the node is the main node and it's always synced. + SyncState::NotSyncing + } } #[tracing::instrument(skip(self, typed_filter))] @@ -631,7 +687,7 @@ impl EthNamespace { .connection_pool .access_storage_blocking() .blocks_web3_dal() - .get_block_hashes_after(from_block, self.state.req_entities_limit) + .get_block_hashes_after(from_block, self.state.api_config.req_entities_limit) .map_err(|err| internal_error(method_name, err))?; ( FilterChanges::Hashes(block_hashes), @@ -646,7 +702,7 @@ impl EthNamespace { .transactions_web3_dal() .get_pending_txs_hashes_after( from_timestamp, - Some(self.state.req_entities_limit), + Some(self.state.api_config.req_entities_limit), ) .map_err(|err| internal_error(method_name, err))?; ( @@ -687,11 +743,14 @@ impl EthNamespace { // In this case we should return error and suggest requesting logs with smaller block range. if let Some(miniblock_number) = storage .events_web3_dal() - .get_log_block_number(get_logs_filter.clone(), self.state.req_entities_limit) + .get_log_block_number( + get_logs_filter.clone(), + self.state.api_config.req_entities_limit, + ) .map_err(|err| internal_error(method_name, err))? { return Err(Web3Error::LogsLimitExceeded( - self.state.req_entities_limit, + self.state.api_config.req_entities_limit, from_block.0, miniblock_number.0 - 1, )); @@ -699,7 +758,7 @@ impl EthNamespace { let logs = storage .events_web3_dal() - .get_logs(get_logs_filter, self.state.req_entities_limit) + .get_logs(get_logs_filter, self.state.api_config.req_entities_limit) .map_err(|err| internal_error(method_name, err))?; let new_from_block = logs .last() @@ -757,9 +816,9 @@ impl EthNamespace { .from .and_then(|from| self.state.accounts.get(&from).cloned()) { - let chain_id = self.state.config.chain.eth.zksync_network_id.into(); + let chain_id = self.state.api_config.l2_chain_id; let domain = Eip712Domain::new(chain_id); - let signature = async_std::task::block_on(async { + let signature = crate::block_on(async { signer .sign_typed_data(&domain, &transaction_request) .await @@ -844,7 +903,7 @@ impl EthNamespace { // They are moved into a separate `impl` block so they don't make the actual implementation noisy. // This `impl` block contains methods that we *have* to implement for compliance, but don't really // make sense in terms in L2. -impl EthNamespace { +impl EthNamespace { pub fn coinbase_impl(&self) -> Address { // There is no coinbase account. Address::default() diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs index b008c8181b2b..d7f5482e1178 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs @@ -1,6 +1,7 @@ //! Actual implementation of Web3 API namespaces logic, not tied to the backend //! used to create a JSON RPC server. +pub mod debug; pub mod eth; pub mod eth_subscribe; pub mod net; @@ -12,8 +13,8 @@ use zksync_types::U256; use zksync_utils::{biguint_to_u256, u256_to_biguint}; pub use self::{ - eth::EthNamespace, eth_subscribe::EthSubscribe, net::NetNamespace, web3::Web3Namespace, - zks::ZksNamespace, + debug::DebugNamespace, eth::EthNamespace, eth_subscribe::EthSubscribe, net::NetNamespace, + web3::Web3Namespace, zks::ZksNamespace, }; pub fn scale_u256(val: U256, scale_factor: &Ratio) -> U256 { diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/net.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/net.rs index 8616eac62a3f..b31279ab6935 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/net.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/net.rs @@ -1,13 +1,17 @@ -use crate::api_server::web3::get_config; - -use zksync_types::U256; +use zksync_types::{L2ChainId, U256}; #[derive(Debug, Clone)] -pub struct NetNamespace; +pub struct NetNamespace { + zksync_network_id: L2ChainId, +} impl NetNamespace { + pub fn new(zksync_network_id: L2ChainId) -> Self { + Self { zksync_network_id } + } + pub fn version_impl(&self) -> String { - get_config().chain.eth.zksync_network_id.to_string() + self.zksync_network_id.to_string() } pub fn peer_count_impl(&self) -> U256 { diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs index 6ac4765c437c..d68521ee3dbb 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -1,8 +1,12 @@ -use bigdecimal::{BigDecimal, Zero}; use std::time::Instant; use std::{collections::HashMap, convert::TryInto}; +use bigdecimal::{BigDecimal, Zero}; + use zksync_mini_merkle_tree::mini_merkle_tree_proof; + +#[cfg(feature = "openzeppelin_tests")] +use zksync_types::Bytes; use zksync_types::{ api::{BridgeAddresses, GetLogsFilter, L2ToL1LogProof, TransactionDetails, U64}, commitment::CommitmentSerializable, @@ -23,32 +27,40 @@ use zksync_web3_decl::{ }; use crate::api_server::web3::{backend_jsonrpc::error::internal_error, RpcState}; +use crate::fee_ticker::FeeTicker; use crate::fee_ticker::{error::TickerError, TokenPriceRequestType}; - -#[cfg(feature = "openzeppelin_tests")] -use zksync_types::Bytes; +use crate::l1_gas_price::L1GasPriceProvider; #[derive(Debug, Clone)] -pub struct ZksNamespace { - pub state: RpcState, +pub struct ZksNamespace { + pub state: RpcState, } -impl ZksNamespace { - pub fn new(state: RpcState) -> Self { +impl ZksNamespace { + pub fn new(state: RpcState) -> Self { Self { state } } #[tracing::instrument(skip(self, request))] pub fn estimate_fee_impl(&self, request: CallRequest) -> Result { let start = Instant::now(); + let mut request_with_gas_per_pubdata_overridden = request; + + self.state + .set_nonce_for_call_request(&mut request_with_gas_per_pubdata_overridden)?; + + if let Some(ref mut eip712_meta) = request_with_gas_per_pubdata_overridden.eip712_meta { + eip712_meta.gas_per_pubdata = MAX_GAS_PER_PUBDATA_BYTE.into(); + } - let mut tx = l2_tx_from_call_req(request, self.state.config.api.web3_json_rpc.max_tx_size)?; + let mut tx = l2_tx_from_call_req( + request_with_gas_per_pubdata_overridden, + self.state.api_config.max_tx_size, + )?; // When we're estimating fee, we are trying to deduce values related to fee, so we should // not consider provided ones. - let fair_l2_gas_price = self.state.tx_sender.0.state_keeper_config.fair_l2_gas_price; - tx.common_data.fee.max_fee_per_gas = fair_l2_gas_price.into(); - tx.common_data.fee.max_priority_fee_per_gas = fair_l2_gas_price.into(); + tx.common_data.fee.max_priority_fee_per_gas = 0u64.into(); tx.common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); let fee = self.estimate_fee(tx.into())?; @@ -60,17 +72,19 @@ impl ZksNamespace { #[tracing::instrument(skip(self, request))] pub fn estimate_l1_to_l2_gas_impl(&self, request: CallRequest) -> Result { let start = Instant::now(); - - let mut tx: L1Tx = request.try_into().map_err(Web3Error::SerializationError)?; - + let mut request_with_gas_per_pubdata_overridden = request; // When we're estimating fee, we are trying to deduce values related to fee, so we should // not consider provided ones. - let fair_l2_gas_price = self.state.tx_sender.0.state_keeper_config.fair_l2_gas_price; - tx.common_data.max_fee_per_gas = fair_l2_gas_price.into(); - if tx.common_data.gas_per_pubdata_limit == U256::zero() { - tx.common_data.gas_per_pubdata_limit = REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(); + if let Some(ref mut eip712_meta) = request_with_gas_per_pubdata_overridden.eip712_meta { + if eip712_meta.gas_per_pubdata == U256::zero() { + eip712_meta.gas_per_pubdata = REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(); + } } + let tx: L1Tx = request_with_gas_per_pubdata_overridden + .try_into() + .map_err(Web3Error::SerializationError)?; + let fee = self.estimate_fee(tx.into())?; metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_gas_l1_to_l2"); @@ -78,49 +92,37 @@ impl ZksNamespace { } fn estimate_fee(&self, tx: Transaction) -> Result { - let scale_factor = self - .state - .config - .api - .web3_json_rpc - .estimate_gas_scale_factor; - let acceptable_overestimation = self - .state - .config - .api - .web3_json_rpc - .estimate_gas_acceptable_overestimation; + let scale_factor = self.state.api_config.estimate_gas_scale_factor; + let acceptable_overestimation = + self.state.api_config.estimate_gas_acceptable_overestimation; let fee = self .state .tx_sender .get_txs_fee_in_wei(tx, scale_factor, acceptable_overestimation) - .map_err(|err| Web3Error::SubmitTransactionError(err.to_string()))?; + .map_err(|err| Web3Error::SubmitTransactionError(err.to_string(), err.data()))?; Ok(fee) } #[tracing::instrument(skip(self))] pub fn get_main_contract_impl(&self) -> Address { - self.state.config.contracts.diamond_proxy_addr + self.state.api_config.diamond_proxy_addr } #[tracing::instrument(skip(self))] pub fn get_testnet_paymaster_impl(&self) -> Option
{ - self.state.config.contracts.l2_testnet_paymaster_addr + self.state.api_config.l2_testnet_paymaster_addr } #[tracing::instrument(skip(self))] pub fn get_bridge_contracts_impl(&self) -> BridgeAddresses { - BridgeAddresses { - l1_erc20_default_bridge: self.state.config.contracts.l1_erc20_bridge_proxy_addr, - l2_erc20_default_bridge: self.state.config.contracts.l2_erc20_bridge_addr, - } + self.state.api_config.bridge_addresses.clone() } #[tracing::instrument(skip(self))] pub fn l1_chain_id_impl(&self) -> U64 { - U64::from(*self.state.config.chain.eth.network.chain_id()) + U64::from(*self.state.api_config.l1_chain_id) } #[tracing::instrument(skip(self))] @@ -156,11 +158,17 @@ impl ZksNamespace { let start = Instant::now(); let endpoint_name = "get_token_price"; - let result = match self - .state - .tx_sender - .token_price(TokenPriceRequestType::USDForOneToken, l2_token) - { + let token_price_result = { + let mut storage = self.state.connection_pool.access_storage_blocking(); + let mut tokens_web3_dal = storage.tokens_web3_dal(); + FeeTicker::get_l2_token_price( + &mut tokens_web3_dal, + TokenPriceRequestType::USDForOneToken, + &l2_token, + ) + }; + + let result = match token_price_result { Ok(price) => Ok(price), Err(TickerError::PriceNotTracked(_)) => Ok(BigDecimal::zero()), Err(err) => Err(internal_error(endpoint_name, err)), @@ -302,7 +310,7 @@ impl ZksNamespace { addresses: vec![L1_MESSENGER_ADDRESS], topics: vec![(2, vec![address_to_h256(&sender)]), (3, vec![msg])], }, - self.state.req_entities_limit, + self.state.api_config.req_entities_limit, ) .map_err(|err| internal_error(endpoint_name, err))? .iter() @@ -472,7 +480,10 @@ impl ZksNamespace { .access_storage_blocking() .explorer() .blocks_dal() - .get_block_details(block_number) + .get_block_details( + block_number, + self.state.tx_sender.0.sender_config.fee_account_addr, + ) .map_err(|err| internal_error(endpoint_name, err)); metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); @@ -509,7 +520,7 @@ impl ZksNamespace { let start = Instant::now(); let endpoint_name = "get_transaction_details"; - let tx_details = self + let mut tx_details = self .state .connection_pool .access_storage_blocking() @@ -517,6 +528,17 @@ impl ZksNamespace { .get_transaction_details(hash) .map_err(|err| internal_error(endpoint_name, err)); + if let Some(proxy) = &self.state.tx_sender.0.proxy { + // We're running an external node - we should query the main node directly + // in case the transaction was proxied but not yet synced back to us + if matches!(tx_details, Ok(None)) { + // If the transaction is not in the db, query main node for details + tx_details = proxy + .request_tx_details(hash) + .map_err(|err| internal_error(endpoint_name, err)); + } + } + metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); tx_details @@ -544,6 +566,40 @@ impl ZksNamespace { l1_batch } + #[tracing::instrument(skip(self))] + pub fn get_bytecode_by_hash_impl(&self, hash: H256) -> Option> { + let start = Instant::now(); + let endpoint_name = "get_bytecode_by_hash"; + + let bytecode = self + .state + .connection_pool + .access_storage_blocking() + .storage_dal() + .get_factory_dep(hash); + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + + bytecode + } + + #[tracing::instrument(skip(self))] + pub fn get_l1_gas_price_impl(&self) -> U64 { + let start = Instant::now(); + let endpoint_name = "get_l1_gas_price"; + + let gas_price = self + .state + .tx_sender + .0 + .l1_gas_price_source + .estimate_effective_gas_price(); + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + + gas_price.into() + } + #[cfg(feature = "openzeppelin_tests")] /// Saves contract bytecode to memory. pub fn set_known_bytecode_impl(&self, bytecode: Bytes) -> bool { diff --git a/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs b/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs index 51d2600d519c..813301ef41d9 100644 --- a/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs +++ b/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs @@ -15,8 +15,7 @@ pub async fn notify_blocks( stop_receiver: watch::Receiver, ) { let mut last_block_number = connection_pool - .access_storage() - .await + .access_storage_blocking() .blocks_web3_dal() .get_sealed_miniblock_number() .unwrap(); @@ -31,8 +30,7 @@ pub async fn notify_blocks( let start = Instant::now(); let new_blocks = connection_pool - .access_storage() - .await + .access_storage_blocking() .blocks_web3_dal() .get_block_headers_after(last_block_number) .unwrap(); @@ -70,8 +68,7 @@ pub async fn notify_txs( let start = Instant::now(); let (new_txs, new_last_time) = connection_pool - .access_storage() - .await + .access_storage_blocking() .transactions_web3_dal() .get_pending_txs_hashes_after(last_time, None) .unwrap(); @@ -97,8 +94,7 @@ pub async fn notify_logs( stop_receiver: watch::Receiver, ) { let mut last_block_number = connection_pool - .access_storage() - .await + .access_storage_blocking() .blocks_web3_dal() .get_sealed_miniblock_number() .unwrap(); @@ -113,8 +109,7 @@ pub async fn notify_logs( let start = Instant::now(); let new_logs = connection_pool - .access_storage() - .await + .access_storage_blocking() .events_web3_dal() .get_all_logs(last_block_number) .unwrap(); diff --git a/core/bin/zksync_core/src/api_server/web3/state.rs b/core/bin/zksync_core/src/api_server/web3/state.rs index 020e1299d62c..64bf8bbc7022 100644 --- a/core/bin/zksync_core/src/api_server/web3/state.rs +++ b/core/bin/zksync_core/src/api_server/web3/state.rs @@ -8,38 +8,97 @@ use std::sync::RwLock; use crate::api_server::tx_sender::TxSender; use crate::api_server::web3::backend_jsonrpc::error::internal_error; +use crate::sync_layer::SyncState; use zksync_config::ZkSyncConfig; use zksync_dal::ConnectionPool; use zksync_eth_signer::PrivateKeySigner; -use zksync_types::api::{self, TransactionRequest}; -use zksync_types::{l2::L2Tx, Address, MiniblockNumber, H256, U256, U64}; +use zksync_types::{ + api::{self, BlockId, BlockNumber, BridgeAddresses, TransactionRequest}, + l2::L2Tx, + transaction_request::CallRequest, + Address, L1ChainId, L2ChainId, MiniblockNumber, H256, U256, U64, +}; use zksync_web3_decl::{ error::Web3Error, types::{Filter, TypedFilter}, }; -/// Holder for the data required for the API to be functional. +/// Configuration values for the API. +/// This structure is detached from `ZkSyncConfig`, since different node types (main, external, etc) +/// may require different configuration layouts. +/// The intention is to only keep the actually used information here. #[derive(Debug, Clone)] -pub struct RpcState { +pub struct InternalApiConfig { + pub l1_chain_id: L1ChainId, + pub l2_chain_id: L2ChainId, + pub max_tx_size: usize, + pub estimate_gas_scale_factor: f64, + pub estimate_gas_acceptable_overestimation: u32, + pub bridge_addresses: BridgeAddresses, + pub diamond_proxy_addr: Address, + pub l2_testnet_paymaster_addr: Option
, + pub req_entities_limit: usize, +} + +impl From for InternalApiConfig { + fn from(config: ZkSyncConfig) -> Self { + Self { + l1_chain_id: config.chain.eth.network.chain_id(), + l2_chain_id: L2ChainId(config.chain.eth.zksync_network_id), + max_tx_size: config.api.web3_json_rpc.max_tx_size, + estimate_gas_scale_factor: config.api.web3_json_rpc.estimate_gas_scale_factor, + estimate_gas_acceptable_overestimation: config + .api + .web3_json_rpc + .estimate_gas_acceptable_overestimation, + bridge_addresses: BridgeAddresses { + l1_erc20_default_bridge: config.contracts.l1_erc20_bridge_proxy_addr, + l2_erc20_default_bridge: config.contracts.l2_erc20_bridge_addr, + }, + diamond_proxy_addr: config.contracts.diamond_proxy_addr, + l2_testnet_paymaster_addr: config.contracts.l2_testnet_paymaster_addr, + req_entities_limit: config.api.web3_json_rpc.req_entities_limit(), + } + } +} + +/// Holder for the data required for the API to be functional. +#[derive(Debug)] +pub struct RpcState { pub installed_filters: Arc>, pub connection_pool: ConnectionPool, - pub tx_sender: TxSender, - pub req_entities_limit: usize, - pub config: &'static ZkSyncConfig, + pub tx_sender: TxSender, + pub sync_state: Option, + pub(super) api_config: InternalApiConfig, pub accounts: HashMap, #[cfg(feature = "openzeppelin_tests")] pub known_bytecodes: Arc>>>, } -impl RpcState { +// Custom implementation is required due to generic param: +// Even though it's under `Arc`, compiler doesn't generate the `Clone` implementation unless +// an unnecessary bound is added. +impl Clone for RpcState { + fn clone(&self) -> Self { + Self { + installed_filters: self.installed_filters.clone(), + connection_pool: self.connection_pool.clone(), + tx_sender: self.tx_sender.clone(), + sync_state: self.sync_state.clone(), + api_config: self.api_config.clone(), + accounts: self.accounts.clone(), + #[cfg(feature = "openzeppelin_tests")] + known_bytecodes: self.known_bytecodes.clone(), + } + } +} + +impl RpcState { pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { - let chain_id = self.config.chain.eth.zksync_network_id; - let (tx_request, hash) = TransactionRequest::from_bytes( - bytes, - chain_id, - self.config.api.web3_json_rpc.max_tx_size, - )?; + let chain_id = self.api_config.l2_chain_id; + let (tx_request, hash) = + TransactionRequest::from_bytes(bytes, chain_id.0, self.api_config.max_tx_size)?; Ok((tx_request.try_into()?, hash)) } @@ -106,6 +165,28 @@ impl RpcState { }; Ok(block_number) } + + pub(crate) fn set_nonce_for_call_request( + &self, + call_request: &mut CallRequest, + ) -> Result<(), Web3Error> { + let method_name = "set_nonce_for_call_request"; + if call_request.nonce.is_none() { + let from = call_request.from.unwrap_or_default(); + let address_historical_nonce = self + .connection_pool + .access_storage_blocking() + .storage_web3_dal() + .get_address_historical_nonce(from, BlockId::Number(BlockNumber::Latest)); + + call_request.nonce = Some( + address_historical_nonce + .unwrap() + .map_err(|result| internal_error(method_name, result.to_string()))?, + ); + } + Ok(()) + } } /// Contains mapping from index to `Filter` with optional location. diff --git a/core/bin/zksync_core/src/bin/block_reverter.rs b/core/bin/zksync_core/src/bin/block_reverter.rs index 937954ba832b..fb10fcd985dd 100644 --- a/core/bin/zksync_core/src/bin/block_reverter.rs +++ b/core/bin/zksync_core/src/bin/block_reverter.rs @@ -1,422 +1,63 @@ -use std::path::Path; -use std::thread::sleep; -use std::time::Duration; -use structopt::StructOpt; +use clap::{Parser, Subcommand}; +use tokio::io::{self, AsyncReadExt}; use zksync_config::ZkSyncConfig; -use zksync_contracts::zksync_contract; use zksync_dal::ConnectionPool; -use zksync_eth_client::clients::http_client::{EthInterface, EthereumClient}; -use zksync_merkle_tree::ZkSyncTree; -use zksync_state::secondary_storage::SecondaryStateStorage; -use zksync_storage::db::Database; -use zksync_storage::RocksDB; -use zksync_types::aggregated_operations::AggregatedActionType; -use zksync_types::ethabi::Token; -use zksync_types::web3::contract::Options; -use zksync_types::{L1BatchNumber, H256, U256}; +use zksync_types::{L1BatchNumber, U256}; -struct BlockReverter { - config: ZkSyncConfig, - connection_pool: ConnectionPool, -} - -impl BlockReverter { - /// rollback db(postgres + rocksdb) to previous state - async fn rollback_db( - &mut self, - last_l1_batch_to_keep: L1BatchNumber, - rollback_postgres: bool, - rollback_tree: bool, - rollback_sk_cache: bool, - ) { - let last_executed_l1_batch = self - .get_l1_batch_number_from_contract(AggregatedActionType::ExecuteBlocks) - .await; - assert!( - last_l1_batch_to_keep >= last_executed_l1_batch, - "Attempt to revert already executed blocks" - ); - - if !rollback_tree && rollback_postgres { - println!("You want to rollback Postgres DB without rolling back tree."); - println!("If tree is not yet rolled back to this block then the only way to make it synced with Postgres will be to completely rebuild it."); - println!("Are you sure? Print y/n"); - let mut input = String::new(); - std::io::stdin().read_line(&mut input).unwrap(); - if input.trim() != "y" { - std::process::exit(0); - } - } - - // tree needs to be reverted first to keep state recoverable - self.rollback_rocks_dbs(last_l1_batch_to_keep, rollback_tree, rollback_sk_cache) - .await; - - if rollback_postgres { - self.rollback_postgres(last_l1_batch_to_keep).await; - } - } - - async fn rollback_rocks_dbs( - &mut self, - last_l1_batch_to_keep: L1BatchNumber, - rollback_tree: bool, - rollback_sk_cache: bool, - ) { - println!("getting logs that should be applied to rollback state..."); - let logs = self - .connection_pool - .access_storage() - .await - .storage_logs_dedup_dal() - .get_storage_logs_for_revert(last_l1_batch_to_keep); - - if rollback_tree { - // Rolling back both full tree and lightweight tree - if Path::new(self.config.db.path()).exists() { - println!("Rolling back full tree..."); - self.rollback_tree( - last_l1_batch_to_keep, - logs.clone(), - self.config.db.path.clone(), - ) - .await; - } else { - println!("Full Tree not found; skipping"); - } - - if Path::new(self.config.db.merkle_tree_fast_ssd_path()).exists() { - println!("Rolling back lightweight tree..."); - self.rollback_tree( - last_l1_batch_to_keep, - logs.clone(), - self.config.db.merkle_tree_fast_ssd_path.clone(), - ) - .await; - } else { - println!("Lightweight Tree not found; skipping"); - } - } - - if rollback_sk_cache { - assert!( - Path::new(self.config.db.state_keeper_db_path()).exists(), - "Path with state keeper cache DB doesn't exist" - ); - self.rollback_state_keeper_cache(last_l1_batch_to_keep, logs) - .await; - } - } - - /// reverts blocks in merkle tree - async fn rollback_tree( - &mut self, - last_l1_batch_to_keep: L1BatchNumber, - logs: Vec<(H256, Option)>, - path: impl AsRef, - ) { - let db = RocksDB::new(Database::MerkleTree, path, true); - let mut tree = ZkSyncTree::new(db); - - if tree.block_number() <= last_l1_batch_to_keep.0 { - println!("Tree is behind the block to revert to; skipping"); - return; - } +use zksync_core::block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert}; - // Convert H256 -> U256, note that tree keys are encoded using little endianness. - let logs: Vec<_> = logs - .into_iter() - .map(|(key, value)| (U256::from_little_endian(&key.to_fixed_bytes()), value)) - .collect(); - tree.revert_logs(last_l1_batch_to_keep, logs); - - println!("checking match of the tree root hash and root hash from Postgres..."); - let storage_root_hash = self - .connection_pool - .access_storage() - .await - .blocks_dal() - .get_merkle_state_root(last_l1_batch_to_keep) - .expect("failed to fetch root hash for target block"); - let tree_root_hash = tree.root_hash(); - assert_eq!(&tree_root_hash, storage_root_hash.as_bytes()); - - println!("saving tree changes to disk..."); - tree.save().expect("Unable to update tree state"); - } - - /// reverts blocks in state keeper cache - async fn rollback_state_keeper_cache( - &mut self, - last_l1_batch_to_keep: L1BatchNumber, - logs: Vec<(H256, Option)>, - ) { - println!("opening DB with state keeper cache..."); - let db = RocksDB::new( - Database::StateKeeper, - self.config.db.state_keeper_db_path(), - true, - ); - let mut storage = SecondaryStateStorage::new(db); - - if storage.get_l1_batch_number() > last_l1_batch_to_keep + 1 { - println!("getting contracts and factory deps that should be removed..."); - let (_, last_miniblock_to_keep) = self - .connection_pool - .access_storage() - .await - .blocks_dal() - .get_miniblock_range_of_l1_batch(last_l1_batch_to_keep) - .expect("L1 batch should contain at least one miniblock"); - let contracts = self - .connection_pool - .access_storage() - .await - .storage_dal() - .get_contracts_for_revert(last_miniblock_to_keep); - let factory_deps = self - .connection_pool - .access_storage() - .await - .storage_dal() - .get_factory_deps_for_revert(last_miniblock_to_keep); - - println!("rolling back state keeper cache..."); - storage.rollback(logs, contracts, factory_deps, last_l1_batch_to_keep); - } else { - println!("nothing to revert in state keeper cache"); - } - } - - /// reverts data in postgres database - async fn rollback_postgres(&mut self, last_l1_batch_to_keep: L1BatchNumber) { - let (_, last_miniblock_to_keep) = self - .connection_pool - .access_storage() - .await - .blocks_dal() - .get_miniblock_range_of_l1_batch(last_l1_batch_to_keep) - .expect("L1 batch should contain at least one miniblock"); - - println!("rolling back postgres data..."); - let mut storage = self.connection_pool.access_storage().await; - let mut transaction = storage.start_transaction().await; - - println!("rolling back transactions state..."); - transaction - .transactions_dal() - .reset_transactions_state(last_miniblock_to_keep); - println!("rolling back events..."); - transaction - .events_dal() - .rollback_events(last_miniblock_to_keep); - println!("rolling back l2 to l1 logs..."); - transaction - .events_dal() - .rollback_l2_to_l1_logs(last_miniblock_to_keep); - println!("rolling back created tokens..."); - transaction - .tokens_dal() - .rollback_tokens(last_miniblock_to_keep); - println!("rolling back factory deps...."); - transaction - .storage_dal() - .rollback_factory_deps(last_miniblock_to_keep); - println!("rolling back storage..."); - transaction - .storage_logs_dal() - .rollback_storage(last_miniblock_to_keep); - println!("rolling back storage logs..."); - transaction - .storage_logs_dal() - .rollback_storage_logs(last_miniblock_to_keep); - println!("rolling back dedup storage logs..."); - transaction - .storage_logs_dedup_dal() - .rollback_storage_logs(last_l1_batch_to_keep); - println!("rolling back l1 batches..."); - transaction - .blocks_dal() - .delete_l1_batches(last_l1_batch_to_keep); - println!("rolling back miniblocks..."); - transaction - .blocks_dal() - .delete_miniblocks(last_miniblock_to_keep); - - transaction.commit().await; - } - - /// sends revert transaction to L1 - async fn send_ethereum_revert_transaction( - &mut self, - last_l1_batch_to_keep: L1BatchNumber, - priority_fee_per_gas: U256, - nonce: u64, - ) { - let eth_gateway = EthereumClient::from_config(&self.config); - let revert_blocks = zksync_contract() - .functions - .get("revertBlocks") - .cloned() - .expect("revertBlocks function not found") - .pop() - .expect("revertBlocks function entry not found"); - let args = vec![Token::Uint(U256::from(last_l1_batch_to_keep.0))]; - let raw_tx = revert_blocks - .encode_input(&args) - .expect("Failed to encode transaction data.") - .to_vec(); - let signed_tx = eth_gateway - .sign_prepared_tx_for_addr( - raw_tx.clone(), - self.config.contracts.validator_timelock_addr, - Options::with(|opt| { - opt.gas = Some(5_000_000.into()); - opt.max_priority_fee_per_gas = Some(priority_fee_per_gas); - opt.nonce = Some(nonce.into()); - }), - "block-reverter", - ) - .await - .expect("Failed to sign transaction"); - let tx_hash = eth_gateway - .send_raw_tx(signed_tx.raw_tx) - .await - .expect("failed to send revert transaction to L1"); - - loop { - match eth_gateway - .get_tx_status(tx_hash, "block reverter") - .await - .expect("Failed to get tx status from eth node") - { - Some(status) => { - assert!(status.success); - println!("revert transaction has completed"); - return; - } - None => { - println!("waiting for L1 transaction confirmation..."); - sleep(Duration::from_secs(5)); - } - } - } - } - - async fn get_l1_batch_number_from_contract(&self, op: AggregatedActionType) -> L1BatchNumber { - let function_name = match op { - AggregatedActionType::CommitBlocks => "getTotalBlocksCommitted", - AggregatedActionType::PublishProofBlocksOnchain => "getTotalBlocksVerified", - AggregatedActionType::ExecuteBlocks => "getTotalBlocksExecuted", - }; - let eth_gateway = EthereumClient::from_config(&self.config); - let block_number: U256 = eth_gateway - .call_main_contract_function(function_name, (), None, Options::default(), None) - .await - .unwrap(); - L1BatchNumber(block_number.as_u32()) - } - - /// displays suggested values for rollback - async fn print_suggested_values(&mut self) { - let eth_gateway = EthereumClient::from_config(&self.config); - let last_committed_l1_batch_number = self - .get_l1_batch_number_from_contract(AggregatedActionType::CommitBlocks) - .await; - let last_verified_l1_batch_number = self - .get_l1_batch_number_from_contract(AggregatedActionType::PublishProofBlocksOnchain) - .await; - let last_executed_l1_batch_number = self - .get_l1_batch_number_from_contract(AggregatedActionType::ExecuteBlocks) - .await; - println!( - "Last L1 batch numbers on contract: committed {}, verified {}, executed {}", - last_committed_l1_batch_number, - last_verified_l1_batch_number, - last_executed_l1_batch_number - ); - - let nonce = eth_gateway - .pending_nonce("reverter") - .await - .unwrap() - .as_u64(); - println!("Suggested values for rollback:"); - println!(" l1 batch number: {}", last_executed_l1_batch_number.0); - println!(" nonce: {}", nonce); - println!( - " priority fee: {:?}", - self.config - .eth_sender - .gas_adjuster - .default_priority_fee_per_gas - ); - } - - /// Clears failed L1 transactions - async fn clear_failed_l1_transactions(&mut self) { - println!("clearing failed L1 transactions..."); - self.connection_pool - .access_storage() - .await - .eth_sender_dal() - .clear_failed_transactions(); - } +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version, about = "Block revert utility", long_about = None)] +struct Cli { + #[command(subcommand)] + command: Command, } -#[derive(StructOpt, Debug)] -#[structopt(name = "block revert utility")] -enum Opt { - #[structopt( - name = "print-suggested-values", - about = "Displays suggested values to use" - )] - Display, - - #[structopt( - name = "send-eth-transaction", - about = "Sends revert transaction to L1" - )] +#[derive(Debug, Subcommand)] +enum Command { + /// Displays suggested values to use. + #[command(name = "print-suggested-values")] + Display { + /// Displays the values as a JSON object, so that they are machine-readable. + #[arg(long)] + json: bool, + }, + /// Sends revert transaction to L1. + #[command(name = "send-eth-transaction")] SendEthTransaction { - /// L1 batch number used to rollback to - #[structopt(long)] + /// L1 batch number used to rollback to. + #[arg(long)] l1_batch_number: u32, - - /// Priority fee used for rollback ethereum transaction + /// Priority fee used for rollback ethereum transaction. // We operate only by priority fee because we want to use base fee from ethereum // and send transaction as soon as possible without any resend logic - #[structopt(long)] + #[arg(long)] priority_fee_per_gas: Option, - - /// Nonce used for rollback ethereum transaction - #[structopt(long)] + /// Nonce used for rollback Ethereum transaction. + #[arg(long)] nonce: u64, }, - #[structopt( - name = "rollback-db", - about = "Reverts internal database state to previous block" - )] + /// Reverts internal database state to previous block. + #[command(name = "rollback-db")] RollbackDB { - /// L1 batch number used to rollback to - #[structopt(long)] + /// L1 batch number used to rollback to. + #[arg(long)] l1_batch_number: u32, /// Flag that specifies if Postgres DB should be rolled back. - #[structopt(long)] + #[arg(long)] rollback_postgres: bool, /// Flag that specifies if RocksDB with tree should be rolled back. - #[structopt(long)] + #[arg(long)] rollback_tree: bool, /// Flag that specifies if RocksDB with state keeper cache should be rolled back. - #[structopt(long)] + #[arg(long)] rollback_sk_cache: bool, }, - #[structopt( - name = "clear-failed-transactions", - about = "Clears failed L1 transactions" - )] + /// Clears failed L1 transactions. + #[command(name = "clear-failed-transactions")] ClearFailedL1Transactions, } @@ -424,28 +65,28 @@ enum Opt { async fn main() -> anyhow::Result<()> { let _sentry_guard = vlog::init(); let config = ZkSyncConfig::from_env(); + let default_priority_fee_per_gas = + U256::from(config.eth_sender.gas_adjuster.default_priority_fee_per_gas); let connection_pool = ConnectionPool::new(None, true); - let mut block_reverter = BlockReverter { - config: config.clone(), - connection_pool: connection_pool.clone(), - }; - - match Opt::from_args() { - Opt::Display => block_reverter.print_suggested_values().await, - Opt::SendEthTransaction { + let block_reverter = + BlockReverter::new(config, connection_pool, L1ExecutedBatchesRevert::Disallowed); + + match Cli::parse().command { + Command::Display { json } => { + let suggested_values = block_reverter.suggested_values().await; + if json { + println!("{}", serde_json::to_string(&suggested_values).unwrap()); + } else { + println!("Suggested values for rollback: {:#?}", suggested_values); + } + } + Command::SendEthTransaction { l1_batch_number, priority_fee_per_gas, nonce, } => { - let priority_fee_per_gas = priority_fee_per_gas.map(U256::from).unwrap_or_else(|| { - U256::from( - block_reverter - .config - .eth_sender - .gas_adjuster - .default_priority_fee_per_gas, - ) - }); + let priority_fee_per_gas = + priority_fee_per_gas.map_or(default_priority_fee_per_gas, U256::from); block_reverter .send_ethereum_revert_transaction( L1BatchNumber(l1_batch_number), @@ -454,22 +95,42 @@ async fn main() -> anyhow::Result<()> { ) .await } - Opt::RollbackDB { + Command::RollbackDB { l1_batch_number, rollback_postgres, rollback_tree, rollback_sk_cache, } => { + if !rollback_tree && rollback_postgres { + println!("You want to rollback Postgres DB without rolling back tree."); + println!( + "If tree is not yet rolled back to this block then the only way \ + to make it synced with Postgres will be to completely rebuild it." + ); + println!("Are you sure? Print y/n"); + + let mut input = [0u8]; + io::stdin().read_exact(&mut input).await.unwrap(); + if input[0] != b'y' && input[0] != b'Y' { + std::process::exit(0); + } + } + + let mut flags = BlockReverterFlags::empty(); + if rollback_postgres { + flags |= BlockReverterFlags::POSTGRES; + } + if rollback_tree { + flags |= BlockReverterFlags::TREE; + } + if rollback_sk_cache { + flags |= BlockReverterFlags::SK_CACHE; + } block_reverter - .rollback_db( - L1BatchNumber(l1_batch_number), - rollback_postgres, - rollback_tree, - rollback_sk_cache, - ) + .rollback_db(L1BatchNumber(l1_batch_number), flags) .await } - Opt::ClearFailedL1Transactions => block_reverter.clear_failed_l1_transactions().await, + Command::ClearFailedL1Transactions => block_reverter.clear_failed_l1_transactions().await, } Ok(()) } diff --git a/core/bin/zksync_core/src/bin/en_playground.rs b/core/bin/zksync_core/src/bin/en_playground.rs deleted file mode 100644 index b1450c03208e..000000000000 --- a/core/bin/zksync_core/src/bin/en_playground.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! This file is a playground binary for the External Node development. -//! It's temporary and once a PoC is ready, this file will be replaced by the real EN entrypoint. -use zksync_config::ZkSyncConfig; -use zksync_core::{ - state_keeper::{seal_criteria::SealManager, ZkSyncStateKeeper}, - sync_layer::{ - batch_status_updater::run_batch_status_updater, external_io::ExternalIO, - fetcher::MainNodeFetcher, genesis::perform_genesis_if_needed, - mock_batch_executor::MockBatchExecutorBuilder, ActionQueue, ExternalNodeSealer, - }, -}; -use zksync_dal::ConnectionPool; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber}; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let _sentry_guard = vlog::init(); - let connection_pool = ConnectionPool::new(None, true); - let config = ZkSyncConfig::from_env(); - - vlog::info!("Started the EN playground"); - - perform_genesis_if_needed(&mut connection_pool.access_storage().await, &config).await; - - let action_queue = ActionQueue::new(); - let en_sealer = ExternalNodeSealer::new(action_queue.clone()); - let sealer = SealManager::custom( - config.chain.state_keeper.clone(), - Vec::new(), - en_sealer.clone().into_unconditional_batch_seal_criterion(), - en_sealer.clone().into_miniblock_seal_criterion(), - ); - - let mock_batch_executor_base = Box::new(MockBatchExecutorBuilder); - - let io = Box::new(ExternalIO::new(Address::default(), action_queue.clone())); - let (_stop_sender, stop_receiver) = tokio::sync::watch::channel::(false); - - let state_keeper = ZkSyncStateKeeper::new(stop_receiver, io, mock_batch_executor_base, sealer); - - // Different envs for the ease of local testing. - // Localhost - // let main_node_url = std::env::var("API_WEB3_JSON_RPC_HTTP_URL").unwrap(); - // Stage - // let main_node_url = "https://z2-dev-api.zksync.dev:443"; - // Testnet - // let main_node_url = "https://zksync2-testnet.zksync.dev:443"; - // Mainnet (doesn't work yet) - // let main_node_url = "https://zksync2-mainnet.zksync.io:443"; - - let fetcher = MainNodeFetcher::new( - &config.api.web3_json_rpc.main_node_url.unwrap(), - L1BatchNumber(0), - MiniblockNumber(1), - L1BatchNumber(0), - L1BatchNumber(0), - L1BatchNumber(0), - action_queue.clone(), - ); - - let _updater_handle = std::thread::spawn(move || run_batch_status_updater(action_queue)); - let _sk_handle = tokio::task::spawn_blocking(|| state_keeper.run()); - fetcher.run().await; - - Ok(()) -} diff --git a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs index b84549eceab2..775eba3419e4 100644 --- a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs +++ b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs @@ -1,12 +1,93 @@ +use clap::Parser; + +use std::{num::NonZeroU32, time::Instant}; + use zksync_config::ZkSyncConfig; use zksync_merkle_tree::ZkSyncTree; +use zksync_merkle_tree2::domain::ZkSyncTree as NewTree; use zksync_storage::db::Database; use zksync_storage::RocksDB; +#[derive(Debug, Parser)] +#[command( + author = "Matter Labs", + version, + about = "Merkle tree consistency checker", + long_about = None +)] +struct Cli { + /// Do not check the old tree implementation in full mode. By default, this is the only + /// tree checked. + #[arg(long = "no-full")] + no_full: bool, + /// Check the old tree implementation in lightweight mode. + #[arg(long = "lightweight")] + lightweight: bool, + /// Check the new tree implementation in lightweight mode. The optional argument + /// specifies the version of the tree to be checked, expressed as a non-zero number + /// of blocks applied to it. By default, the latest tree version is checked. + #[arg(long = "lightweight-new", value_name = "BLOCKS")] + new_lightweight: Option>, +} + +impl Cli { + fn run(self, config: &ZkSyncConfig) { + if !self.no_full { + let db_path = config.db.path(); + vlog::info!( + "Verifying consistency of old tree, full mode at {}", + db_path + ); + let start = Instant::now(); + let db = RocksDB::new(Database::MerkleTree, db_path, true); + let tree = ZkSyncTree::new(db); + tree.verify_consistency(); + vlog::info!("Old tree in full mode verified in {:?}", start.elapsed()); + } + + if self.lightweight { + let db_path = config.db.merkle_tree_fast_ssd_path(); + vlog::info!( + "Verifying consistency of old tree, lightweight mode at {}", + db_path + ); + let start = Instant::now(); + let db = RocksDB::new(Database::MerkleTree, db_path, true); + let tree = ZkSyncTree::new_lightweight(db); + tree.verify_consistency(); + vlog::info!( + "Old tree in lightweight mode verified in {:?}", + start.elapsed() + ); + } + + if let Some(maybe_block_number) = self.new_lightweight { + let db_path = &config.db.new_merkle_tree_ssd_path; + vlog::info!( + "Verifying consistency of new tree, lightweight mode at {}", + db_path + ); + let start = Instant::now(); + let db = RocksDB::new(Database::MerkleTree, db_path, true); + let tree = NewTree::new_lightweight(db); + + let block_number = maybe_block_number.or_else(|| NonZeroU32::new(tree.block_number())); + if let Some(block_number) = block_number { + vlog::info!("Block number to check: {}", block_number); + tree.verify_consistency(block_number); + vlog::info!( + "New tree in lightweight mode verified in {:?}", + start.elapsed() + ); + } else { + vlog::info!("The tree is empty, skipping"); + } + } + } +} + fn main() { let _sentry_guard = vlog::init(); let config = ZkSyncConfig::from_env(); - let db = RocksDB::new(Database::MerkleTree, config.db.path(), true); - let tree = ZkSyncTree::new(db); - tree.verify_consistency(); + Cli::parse().run(&config); } diff --git a/core/bin/zksync_core/src/bin/rocksdb_util.rs b/core/bin/zksync_core/src/bin/rocksdb_util.rs index 41e247703cd5..8085390ce902 100644 --- a/core/bin/zksync_core/src/bin/rocksdb_util.rs +++ b/core/bin/zksync_core/src/bin/rocksdb_util.rs @@ -1,18 +1,22 @@ -use structopt::StructOpt; +use clap::{Parser, Subcommand}; use zksync_config::DBConfig; use zksync_storage::rocksdb::backup::{BackupEngine, BackupEngineOptions, RestoreOptions}; use zksync_storage::rocksdb::{Error, Options, DB}; -#[derive(StructOpt, Debug)] -#[structopt(name = "rocksdb management utility")] -enum Opt { - #[structopt( - name = "backup", - about = "Creates new backup of running rocksdb instance" - )] - Backup, +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version, about = "RocksDB management utility", long_about = None)] +struct Cli { + #[command(subcommand)] + command: Command, +} - #[structopt(name = "restore-from-backup", about = "Restores rocksdb from backup")] +#[derive(Debug, Subcommand)] +enum Command { + /// Creates new backup of running RocksDB instance. + #[command(name = "backup")] + Backup, + /// Restores RocksDB from backup. + #[command(name = "restore-from-backup")] Restore, } @@ -36,9 +40,9 @@ fn restore_from_latest_backup(config: &DBConfig) -> Result<(), Error> { fn main() { let config = DBConfig::from_env(); - match Opt::from_args() { - Opt::Backup => create_backup(&config).unwrap(), - Opt::Restore => restore_from_latest_backup(&config).unwrap(), + match Cli::parse().command { + Command::Backup => create_backup(&config).unwrap(), + Command::Restore => restore_from_latest_backup(&config).unwrap(), } } diff --git a/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs b/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs new file mode 100644 index 000000000000..d8125a961cf6 --- /dev/null +++ b/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs @@ -0,0 +1,53 @@ +use std::io::Write; +use zksync_dal::ConnectionPool; +use zksync_types::explorer_api::SourceCodeData; + +fn main() { + let pool = ConnectionPool::new(Some(1), false); + let mut storage = pool.access_storage_blocking(); + let reqs = storage + .explorer() + .contract_verification_dal() + .get_all_successful_requests() + .unwrap(); + + std::fs::create_dir_all("./verified_sources").unwrap(); + for req in reqs { + let dir = format!("./verified_sources/{:?}", req.req.contract_address); + if std::path::Path::new(&dir).exists() { + continue; + } + + std::fs::create_dir_all(&dir).unwrap(); + let mut file = std::fs::File::create(format!("{}/request.json", &dir)).unwrap(); + file.write_all(serde_json::to_string_pretty(&req.req).unwrap().as_bytes()) + .unwrap(); + + match req.req.source_code_data { + SourceCodeData::SolSingleFile(content) => { + let mut file = + std::fs::File::create(format!("{}/{}.sol", &dir, req.req.contract_name)) + .unwrap(); + file.write_all(content.as_bytes()).unwrap(); + } + SourceCodeData::YulSingleFile(content) => { + let mut file = + std::fs::File::create(format!("{}/{}.yul", &dir, req.req.contract_name)) + .unwrap(); + file.write_all(content.as_bytes()).unwrap(); + } + SourceCodeData::StandardJsonInput(input) => { + let sources = input.get(&"sources".to_string()).unwrap().clone(); + for (key, val) in sources.as_object().unwrap() { + let p = format!("{}/{}", &dir, key); + let path = std::path::Path::new(p.as_str()); + let prefix = path.parent().unwrap(); + std::fs::create_dir_all(prefix).unwrap(); + let mut file = std::fs::File::create(path).unwrap(); + let content = val.get(&"content".to_string()).unwrap().as_str().unwrap(); + file.write_all(content.as_bytes()).unwrap(); + } + } + } + } +} diff --git a/core/bin/zksync_core/src/bin/zksync_server.rs b/core/bin/zksync_core/src/bin/zksync_server.rs index 1fc8cee2ea89..bfd0fee72fd8 100644 --- a/core/bin/zksync_core/src/bin/zksync_server.rs +++ b/core/bin/zksync_core/src/bin/zksync_server.rs @@ -1,58 +1,50 @@ -use std::cell::RefCell; +use clap::Parser; -use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; -use structopt::StructOpt; +use std::{env, str::FromStr, time::Duration}; use zksync_config::ZkSyncConfig; -use zksync_core::{genesis_init, initialize_components, wait_for_tasks, Component, Components}; +use zksync_core::{ + genesis_init, initialize_components, setup_sigint_handler, wait_for_tasks, Component, + Components, +}; use zksync_storage::RocksDB; -#[derive(Debug, Clone, Copy)] -pub enum ServerCommand { - Genesis, - Launch, -} - -#[derive(StructOpt)] -#[structopt(name = "zkSync operator node", author = "Matter Labs")] -struct Opt { - /// Generate genesis block for the first contract deployment using temporary db - #[structopt(long)] +#[derive(Debug, Parser)] +#[structopt(author = "Matter Labs", version, about = "zkSync operator node", long_about = None)] +struct Cli { + /// Generate genesis block for the first contract deployment using temporary DB. + #[arg(long)] genesis: bool, - - /// Rebuild tree - #[structopt(long)] + /// Rebuild tree. + #[arg(long)] rebuild_tree: bool, - - /// comma-separated list of components to launch - #[structopt( + /// Comma-separated list of components to launch. + #[arg( long, default_value = "api,tree,tree_lightweight,eth,data_fetcher,state_keeper,witness_generator,housekeeper" )] components: ComponentsToRun, } +#[derive(Debug, Clone)] struct ComponentsToRun(Vec); -impl std::str::FromStr for ComponentsToRun { +impl FromStr for ComponentsToRun { type Err = String; fn from_str(s: &str) -> Result { - let components = s - .split(',') - .map(|x| Components::from_str(x.trim())) - .collect::, String>>()?; - let components = components - .into_iter() - .flat_map(|c| c.0) - .collect::>(); - Ok(ComponentsToRun(components)) + let components = s.split(',').try_fold(vec![], |mut acc, component_str| { + let components = Components::from_str(component_str.trim())?; + acc.extend(components.0); + Ok::<_, String>(acc) + })?; + Ok(Self(components)) } } #[tokio::main] async fn main() -> anyhow::Result<()> { - let opt = Opt::from_args(); + let opt = Cli::parse(); let mut config = ZkSyncConfig::from_env(); let sentry_guard = vlog::init(); @@ -61,14 +53,15 @@ async fn main() -> anyhow::Result<()> { return Ok(()); } - match sentry_guard { - Some(_) => vlog::info!( + if sentry_guard.is_some() { + vlog::info!( "Starting Sentry url: {}, l1_network: {}, l2_network {}", - std::env::var("MISC_SENTRY_URL").unwrap(), - std::env::var("CHAIN_ETH_NETWORK").unwrap(), - std::env::var("CHAIN_ETH_ZKSYNC_NETWORK").unwrap(), - ), - None => vlog::info!("No sentry url configured"), + env::var("MISC_SENTRY_URL").unwrap(), + env::var("CHAIN_ETH_NETWORK").unwrap(), + env::var("CHAIN_ETH_ZKSYNC_NETWORK").unwrap(), + ); + } else { + vlog::info!("No sentry url configured"); } let components = if opt.rebuild_tree { @@ -86,45 +79,34 @@ async fn main() -> anyhow::Result<()> { // OneShotWitnessGenerator is the only component that is not expected to run indefinitely // if this value is `false`, we expect all components to run indefinitely: we panic if any component returns. - let is_only_an_oneshotwitness_generator_task = components.len() == 1 - && components - .iter() - .all(|c| matches!(c, Component::WitnessGenerator(Some(_)))); + let is_only_oneshot_witness_generator_task = matches!( + components.as_slice(), + [Component::WitnessGenerator(Some(_), _)] + ); // Run core actors. - let (core_task_handles, stop_sender, cb_receiver) = initialize_components( - &config, - components, - is_only_an_oneshotwitness_generator_task, - ) - .await - .expect("Unable to start Core actors"); + let (core_task_handles, stop_sender, cb_receiver) = + initialize_components(&config, components, is_only_oneshot_witness_generator_task) + .await + .expect("Unable to start Core actors"); vlog::info!("Running {} core task handlers", core_task_handles.len()); - let (stop_signal_sender, mut stop_signal_receiver) = mpsc::channel(256); - { - let stop_signal_sender = RefCell::new(stop_signal_sender.clone()); - ctrlc::set_handler(move || { - let mut sender = stop_signal_sender.borrow_mut(); - block_on(sender.send(true)).expect("Ctrl+C signal send"); - }) - .expect("Error setting Ctrl+C handler"); - } + let sigint_receiver = setup_sigint_handler(); tokio::select! { - _ = async { wait_for_tasks(core_task_handles, is_only_an_oneshotwitness_generator_task).await } => {}, - _ = async { stop_signal_receiver.next().await } => { + _ = wait_for_tasks(core_task_handles, is_only_oneshot_witness_generator_task) => {}, + _ = sigint_receiver => { vlog::info!("Stop signal received, shutting down"); }, - error = async { cb_receiver.await } => { + error = cb_receiver => { if let Ok(error_msg) = error { vlog::warn!("Circuit breaker received, shutting down. Reason: {}", error_msg); } }, }; - let _ = stop_sender.send(true); + stop_sender.send(true).ok(); RocksDB::await_rocksdb_termination(); // Sleep for some time to let some components gracefully stop. - tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; + tokio::time::sleep(Duration::from_secs(5)).await; Ok(()) } diff --git a/core/bin/zksync_core/src/block_reverter/mod.rs b/core/bin/zksync_core/src/block_reverter/mod.rs new file mode 100644 index 000000000000..c9a1bf5dfe69 --- /dev/null +++ b/core/bin/zksync_core/src/block_reverter/mod.rs @@ -0,0 +1,428 @@ +use bitflags::bitflags; +use serde::Serialize; +use tokio::time::sleep; + +use std::path::Path; +use std::time::Duration; + +use zksync_config::ZkSyncConfig; +use zksync_contracts::zksync_contract; +use zksync_dal::ConnectionPool; +use zksync_eth_client::{clients::http::PKSigningClient, BoundEthInterface, EthInterface}; +use zksync_merkle_tree::ZkSyncTree; +use zksync_merkle_tree2::domain::ZkSyncTree as NewTree; +use zksync_state::secondary_storage::SecondaryStateStorage; +use zksync_storage::db::Database; +use zksync_storage::RocksDB; +use zksync_types::aggregated_operations::AggregatedActionType; +use zksync_types::ethabi::Token; +use zksync_types::web3::contract::Options; +use zksync_types::{L1BatchNumber, H256, U256}; + +bitflags! { + pub struct BlockReverterFlags: u32 { + const POSTGRES = 0b_0001; + const TREE = 0b_0010; + const SK_CACHE = 0b_0100; + } +} + +/// Flag determining whether the reverter is allowed to revert the state +/// past the last batch finalized on L1. If this flag is set to `Disallowed`, +/// block reverter will panic upon such an attempt. +/// +/// Main use case for the `Allowed` flag is the external node, where may obtain an +/// incorrect state even for a block that was marked as executed. On the EN, this mode is not destructive. +#[derive(Debug)] +pub enum L1ExecutedBatchesRevert { + Allowed, + Disallowed, +} + +/// This struct is used to perform a rollback of the state. +/// Rollback is a rare event of manual intervention, when the node operator +/// decides to revert some of the not yet finalized batches for some reason +/// (e.g. inability to generate a proof). +/// +/// It is also used to automatically perform a rollback on the external node +/// after it is detected on the main node. +/// +/// There are a few state components that we can roll back +/// - State of the Postgres database +/// - State of the merkle tree +/// - State of the state_keeper cache +/// - State of the Ethereum contract (if the block was committed) +#[derive(Debug)] +pub struct BlockReverter { + config: ZkSyncConfig, + connection_pool: ConnectionPool, + executed_batches_revert_mode: L1ExecutedBatchesRevert, +} + +impl BlockReverter { + pub fn new( + config: ZkSyncConfig, + connection_pool: ConnectionPool, + executed_batches_revert_mode: L1ExecutedBatchesRevert, + ) -> Self { + Self { + config, + connection_pool, + executed_batches_revert_mode, + } + } + + /// Rolls back DBs (Postgres + RocksDB) to a previous state. + pub async fn rollback_db( + &self, + last_l1_batch_to_keep: L1BatchNumber, + flags: BlockReverterFlags, + ) { + let rollback_tree = flags.contains(BlockReverterFlags::TREE); + let rollback_postgres = flags.contains(BlockReverterFlags::POSTGRES); + let rollback_sk_cache = flags.contains(BlockReverterFlags::SK_CACHE); + + if matches!( + self.executed_batches_revert_mode, + L1ExecutedBatchesRevert::Disallowed + ) { + let last_executed_l1_batch = self + .get_l1_batch_number_from_contract(AggregatedActionType::ExecuteBlocks) + .await; + assert!( + last_l1_batch_to_keep >= last_executed_l1_batch, + "Attempt to revert already executed blocks" + ); + } + + // Tree needs to be reverted first to keep state recoverable + self.rollback_rocks_dbs(last_l1_batch_to_keep, rollback_tree, rollback_sk_cache) + .await; + if rollback_postgres { + self.rollback_postgres(last_l1_batch_to_keep); + } + } + + async fn rollback_rocks_dbs( + &self, + last_l1_batch_to_keep: L1BatchNumber, + rollback_tree: bool, + rollback_sk_cache: bool, + ) { + vlog::info!("getting logs that should be applied to rollback state..."); + let logs = self + .connection_pool + .access_storage_blocking() + .storage_logs_dal() + .get_storage_logs_for_revert(last_l1_batch_to_keep); + + if rollback_tree { + let storage_root_hash = self + .connection_pool + .access_storage_blocking() + .blocks_dal() + .get_merkle_state_root(last_l1_batch_to_keep) + .expect("failed to fetch root hash for target block"); + + // Convert H256 -> U256, note that tree keys are encoded using little endianness. + let logs: Vec<_> = logs + .iter() + .map(|(key, value)| (U256::from_little_endian(&key.to_fixed_bytes()), *value)) + .collect(); + + // Rolling back both full tree and lightweight tree + let full_tree_path = self.config.db.path(); + if Path::new(full_tree_path).exists() { + vlog::info!("Rolling back full tree..."); + Self::rollback_tree( + last_l1_batch_to_keep, + logs.clone(), + full_tree_path, + storage_root_hash, + ); + } else { + vlog::info!("Full tree not found; skipping"); + } + + let lightweight_tree_path = self.config.db.merkle_tree_fast_ssd_path(); + if Path::new(lightweight_tree_path).exists() { + vlog::info!("Rolling back lightweight tree..."); + Self::rollback_tree( + last_l1_batch_to_keep, + logs, + lightweight_tree_path, + storage_root_hash, + ); + } else { + vlog::info!("Lightweight tree not found; skipping"); + } + + let new_lightweight_tree_path = &self.config.db.new_merkle_tree_ssd_path; + if Path::new(new_lightweight_tree_path).exists() { + vlog::info!("Rolling back new lightweight tree..."); + Self::rollback_new_tree( + last_l1_batch_to_keep, + new_lightweight_tree_path, + storage_root_hash, + ); + } else { + vlog::info!("New lightweight tree not found; skipping"); + } + } + + if rollback_sk_cache { + assert!( + Path::new(self.config.db.state_keeper_db_path()).exists(), + "Path with state keeper cache DB doesn't exist" + ); + self.rollback_state_keeper_cache(last_l1_batch_to_keep, logs) + .await; + } + } + + /// Reverts blocks in a Merkle tree. + fn rollback_tree( + last_l1_batch_to_keep: L1BatchNumber, + logs: Vec<(U256, Option)>, + path: impl AsRef, + storage_root_hash: H256, + ) { + let db = RocksDB::new(Database::MerkleTree, path, true); + let mut tree = ZkSyncTree::new(db); + + if tree.block_number() <= last_l1_batch_to_keep.0 { + vlog::info!("Tree is behind the block to revert to; skipping"); + return; + } + tree.revert_logs(last_l1_batch_to_keep, logs); + + vlog::info!("checking match of the tree root hash and root hash from Postgres..."); + assert_eq!(tree.root_hash(), storage_root_hash); + vlog::info!("saving tree changes to disk..."); + tree.save().expect("Unable to update tree state"); + } + + fn rollback_new_tree( + last_l1_batch_to_keep: L1BatchNumber, + path: impl AsRef, + storage_root_hash: H256, + ) { + let db = RocksDB::new(Database::MerkleTree, path, true); + let mut tree = NewTree::new_lightweight(db); + + if tree.block_number() <= last_l1_batch_to_keep.0 { + vlog::info!("Tree is behind the block to revert to; skipping"); + return; + } + tree.revert_logs(last_l1_batch_to_keep); + + vlog::info!("checking match of the tree root hash and root hash from Postgres..."); + assert_eq!(tree.root_hash(), storage_root_hash); + vlog::info!("saving tree changes to disk..."); + tree.save(); + } + + /// Reverts blocks in the state keeper cache. + async fn rollback_state_keeper_cache( + &self, + last_l1_batch_to_keep: L1BatchNumber, + logs: Vec<(H256, Option)>, + ) { + vlog::info!("opening DB with state keeper cache..."); + let db = RocksDB::new( + Database::StateKeeper, + self.config.db.state_keeper_db_path(), + true, + ); + let mut sk_cache = SecondaryStateStorage::new(db); + + if sk_cache.get_l1_batch_number() > last_l1_batch_to_keep + 1 { + vlog::info!("getting contracts and factory deps that should be removed..."); + let mut storage = self.connection_pool.access_storage_blocking(); + let (_, last_miniblock_to_keep) = storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(last_l1_batch_to_keep) + .expect("L1 batch should contain at least one miniblock"); + let factory_deps = storage + .storage_dal() + .get_factory_deps_for_revert(last_miniblock_to_keep); + + vlog::info!("rolling back state keeper cache..."); + sk_cache.rollback(logs, factory_deps, last_l1_batch_to_keep); + } else { + vlog::info!("nothing to revert in state keeper cache"); + } + } + + /// Reverts data in the Postgres database. + fn rollback_postgres(&self, last_l1_batch_to_keep: L1BatchNumber) { + vlog::info!("rolling back postgres data..."); + let mut storage = self.connection_pool.access_storage_blocking(); + let mut transaction = storage.start_transaction_blocking(); + + let (_, last_miniblock_to_keep) = transaction + .blocks_dal() + .get_miniblock_range_of_l1_batch(last_l1_batch_to_keep) + .expect("L1 batch should contain at least one miniblock"); + + vlog::info!("rolling back transactions state..."); + transaction + .transactions_dal() + .reset_transactions_state(last_miniblock_to_keep); + vlog::info!("rolling back events..."); + transaction + .events_dal() + .rollback_events(last_miniblock_to_keep); + vlog::info!("rolling back l2 to l1 logs..."); + transaction + .events_dal() + .rollback_l2_to_l1_logs(last_miniblock_to_keep); + vlog::info!("rolling back created tokens..."); + transaction + .tokens_dal() + .rollback_tokens(last_miniblock_to_keep); + vlog::info!("rolling back factory deps...."); + transaction + .storage_dal() + .rollback_factory_deps(last_miniblock_to_keep); + vlog::info!("rolling back storage..."); + transaction + .storage_logs_dal() + .rollback_storage(last_miniblock_to_keep); + vlog::info!("rolling back storage logs..."); + transaction + .storage_logs_dal() + .rollback_storage_logs(last_miniblock_to_keep); + vlog::info!("rolling back l1 batches..."); + transaction + .blocks_dal() + .delete_l1_batches(last_l1_batch_to_keep); + vlog::info!("rolling back miniblocks..."); + transaction + .blocks_dal() + .delete_miniblocks(last_miniblock_to_keep); + + transaction.commit_blocking(); + } + + /// Sends revert transaction to L1. + pub async fn send_ethereum_revert_transaction( + &self, + last_l1_batch_to_keep: L1BatchNumber, + priority_fee_per_gas: U256, + nonce: u64, + ) { + let eth_gateway = PKSigningClient::from_config(&self.config); + let contract = zksync_contract(); + let revert_blocks = contract + .functions + .get("revertBlocks") + .expect("revertBlocks function not found") + .last() + .expect("revertBlocks function entry not found"); + let args = [Token::Uint(U256::from(last_l1_batch_to_keep.0))]; + let raw_tx = revert_blocks + .encode_input(&args) + .expect("Failed to encode transaction data.") + .to_vec(); + let signed_tx = eth_gateway + .sign_prepared_tx_for_addr( + raw_tx, + self.config.contracts.validator_timelock_addr, + Options::with(|opt| { + opt.gas = Some(5_000_000.into()); + opt.max_priority_fee_per_gas = Some(priority_fee_per_gas); + opt.nonce = Some(nonce.into()); + }), + "block-reverter", + ) + .await + .expect("Failed to sign transaction"); + let tx_hash = eth_gateway + .send_raw_tx(signed_tx.raw_tx) + .await + .expect("failed to send revert transaction to L1"); + + loop { + if let Some(status) = eth_gateway + .get_tx_status(tx_hash, "block reverter") + .await + .expect("Failed to get tx status from eth node") + { + assert!(status.success); + vlog::info!("revert transaction has completed"); + return; + } else { + vlog::info!("waiting for L1 transaction confirmation..."); + sleep(Duration::from_secs(5)).await; + } + } + } + + async fn get_l1_batch_number_from_contract(&self, op: AggregatedActionType) -> L1BatchNumber { + let function_name = match op { + AggregatedActionType::CommitBlocks => "getTotalBlocksCommitted", + AggregatedActionType::PublishProofBlocksOnchain => "getTotalBlocksVerified", + AggregatedActionType::ExecuteBlocks => "getTotalBlocksExecuted", + }; + let eth_gateway = PKSigningClient::from_config(&self.config); + let block_number: U256 = eth_gateway + .call_main_contract_function(function_name, (), None, Options::default(), None) + .await + .unwrap(); + L1BatchNumber(block_number.as_u32()) + } + + /// Returns suggested values for rollback. + pub async fn suggested_values(&self) -> SuggestedRollbackValues { + let eth_gateway = PKSigningClient::from_config(&self.config); + let last_committed_l1_batch_number = self + .get_l1_batch_number_from_contract(AggregatedActionType::CommitBlocks) + .await; + let last_verified_l1_batch_number = self + .get_l1_batch_number_from_contract(AggregatedActionType::PublishProofBlocksOnchain) + .await; + let last_executed_l1_batch_number = self + .get_l1_batch_number_from_contract(AggregatedActionType::ExecuteBlocks) + .await; + vlog::info!( + "Last L1 batch numbers on contract: committed {}, verified {}, executed {}", + last_committed_l1_batch_number, + last_verified_l1_batch_number, + last_executed_l1_batch_number + ); + + let nonce = eth_gateway + .pending_nonce("reverter") + .await + .unwrap() + .as_u64(); + let priority_fee = self + .config + .eth_sender + .gas_adjuster + .default_priority_fee_per_gas; + + SuggestedRollbackValues { + last_executed_l1_batch_number, + nonce, + priority_fee, + } + } + + /// Clears failed L1 transactions + pub async fn clear_failed_l1_transactions(&self) { + vlog::info!("clearing failed L1 transactions..."); + self.connection_pool + .access_storage_blocking() + .eth_sender_dal() + .clear_failed_transactions(); + } +} + +#[derive(Debug, Serialize)] +pub struct SuggestedRollbackValues { + pub last_executed_l1_batch_number: L1BatchNumber, + pub nonce: u64, + pub priority_fee: u64, +} diff --git a/core/bin/zksync_core/src/consistency_checker/mod.rs b/core/bin/zksync_core/src/consistency_checker/mod.rs new file mode 100644 index 000000000000..10102da7c02c --- /dev/null +++ b/core/bin/zksync_core/src/consistency_checker/mod.rs @@ -0,0 +1,190 @@ +use std::time::Duration; +use zksync_dal::ConnectionPool; +use zksync_types::web3::{ + error, ethabi, + transports::Http, + types::{Address, TransactionId}, + Web3, +}; +use zksync_types::L1BatchNumber; + +#[derive(Debug)] +pub struct ConsistencyChecker { + // ABI of the zkSync contract + contract: ethabi::Contract, + // Address of the zkSync contract + contract_addr: Address, + // How many past batches to check when starting + max_batches_to_recheck: u32, + web3: Web3, + db: ConnectionPool, +} + +const SLEEP_DELAY: Duration = Duration::from_secs(5); + +impl ConsistencyChecker { + pub fn new( + web3_url: &str, + contract_addr: Address, + max_batches_to_recheck: u32, + db: ConnectionPool, + ) -> Self { + let web3 = Web3::new(Http::new(web3_url).unwrap()); + let contract = zksync_contracts::zksync_contract(); + Self { + web3, + contract, + contract_addr, + max_batches_to_recheck, + db, + } + } + + async fn check_commitments(&self, batch_number: L1BatchNumber) -> Result { + let mut storage = self.db.access_storage_blocking(); + + let storage_block = storage + .blocks_dal() + .get_storage_block(batch_number) + .unwrap_or_else(|| panic!("Block {} not found in the database", batch_number)); + + let commit_tx_id = storage_block + .eth_commit_tx_id + .unwrap_or_else(|| panic!("Block commit tx not found for block {}", batch_number)) + as u32; + + let block_metadata = storage + .blocks_dal() + .get_block_with_metadata(storage_block) + .unwrap_or_else(|| { + panic!( + "Block metadata for block {} not found in the database", + batch_number + ) + }); + + let commit_tx_hash = storage + .eth_sender_dal() + .get_confirmed_tx_hash_by_eth_tx_id(commit_tx_id) + .unwrap_or_else(|| { + panic!( + "Commit tx hash not found in the database. Commit tx id: {}", + commit_tx_id + ) + }); + + vlog::info!( + "Checking commit tx {} for batch {}", + commit_tx_hash, + batch_number.0 + ); + + // we can't get tx calldata from db because it can be fake + let commit_tx = self + .web3 + .eth() + .transaction(TransactionId::Hash(commit_tx_hash)) + .await? + .expect("Commit tx not found on L1"); + + let commit_tx_status = self + .web3 + .eth() + .transaction_receipt(commit_tx_hash) + .await? + .expect("Commit tx receipt not found on L1") + .status; + + assert_eq!( + commit_tx_status, + Some(1.into()), + "Main node gave us a failed commit tx" + ); + assert_eq!( + commit_tx.to, + Some(self.contract_addr), + "Main node gave us a commit tx sent to a wrong address" + ); + + let commitments = self + .contract + .function("commitBlocks") + .unwrap() + .decode_input(&commit_tx.input.0[4..]) + .unwrap() + .pop() + .unwrap() + .into_array() + .unwrap(); + + // Commit transactions usually publish multiple commitments at once, so we need to find + // the one that corresponds to the batch we're checking. + let first_batch_number = match &commitments[0] { + ethabi::Token::Tuple(tuple) => tuple[0].clone().into_uint().unwrap().as_usize(), + _ => panic!("ABI does not match the commitBlocks() function on the zkSync contract"), + }; + let commitment = &commitments[batch_number.0 as usize - first_batch_number]; + + Ok(commitment == &block_metadata.l1_commit_data()) + } + + fn last_committed_batch(&self) -> L1BatchNumber { + self.db + .access_storage_blocking() + .blocks_dal() + .get_number_of_last_block_committed_on_eth() + .unwrap_or(L1BatchNumber(0)) + } + + pub async fn run(self, stop_receiver: tokio::sync::watch::Receiver) { + let mut batch_number: L1BatchNumber = self + .last_committed_batch() + .0 + .saturating_sub(self.max_batches_to_recheck) + .max(1) + .into(); + + vlog::info!("Starting consistency checker from batch {}", batch_number.0); + + loop { + if *stop_receiver.borrow() { + vlog::info!("Stop signal received, consistency_checker is shutting down"); + break; + } + + let batch_has_metadata = self + .db + .access_storage_blocking() + .blocks_dal() + .get_block_metadata(batch_number) + .is_some(); + + // The batch might be already committed but not yet processed by the external node's tree + // OR the batch might be processed by the external node's tree but not yet committed. + // We need both. + if !batch_has_metadata || self.last_committed_batch() < batch_number { + tokio::time::sleep(SLEEP_DELAY).await; + continue; + } + + match self.check_commitments(batch_number).await { + Ok(true) => { + vlog::info!("Batch {} is consistent with L1", batch_number.0); + metrics::gauge!( + "external_node.last_correct_batch", + batch_number.0 as f64, + "component" => "consistency_checker", + ); + batch_number.0 += 1; + } + Ok(false) => { + panic!("Batch {} is inconsistent with L1", batch_number.0); + } + Err(e) => { + vlog::warn!("Consistency checker error: {}", e); + tokio::time::sleep(SLEEP_DELAY).await; + } + } + } + } +} diff --git a/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs index b61cda13b26a..f8b727e57161 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs @@ -87,11 +87,11 @@ impl TokenListFetcher { }; // We assume that token metadata does not change, thus we only looking for the new tokens. - let mut storage = pool.access_storage().await; - let unknown_tokens = self.load_unknown_tokens(&mut storage).await; + let mut storage = pool.access_storage_blocking(); + let unknown_tokens = self.load_unknown_tokens(&mut storage); token_list.retain(|token, _data| unknown_tokens.contains(token)); - self.update_tokens(&mut storage, token_list).await; + self.update_tokens(&mut storage, token_list); } } @@ -105,7 +105,7 @@ impl TokenListFetcher { .map_err(|_| ApiFetchError::RequestTimeout)? } - async fn update_tokens( + fn update_tokens( &self, storage: &mut StorageProcessor<'_>, tokens: HashMap, @@ -116,7 +116,7 @@ impl TokenListFetcher { } } - async fn load_unknown_tokens(&self, storage: &mut StorageProcessor<'_>) -> HashSet
{ + fn load_unknown_tokens(&self, storage: &mut StorageProcessor<'_>) -> HashSet
{ storage .tokens_dal() .get_unknown_l1_token_addresses() diff --git a/core/bin/zksync_core/src/data_fetchers/token_list/one_inch.rs b/core/bin/zksync_core/src/data_fetchers/token_list/one_inch.rs index 2643a9fb89a3..ce1ee7c79ed0 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_list/one_inch.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_list/one_inch.rs @@ -57,6 +57,8 @@ pub(super) struct OneInchTokensResponse { } #[tokio::test] +#[ignore] +// We can't rely on 1inch API in unit tests, so we ignore this test. async fn test_fetch_one_inch_token_list() { let mut config = FetcherConfig::from_env(); config.token_list.url = "https://api.1inch.exchange".to_string(); diff --git a/core/bin/zksync_core/src/data_fetchers/token_price/coingecko.rs b/core/bin/zksync_core/src/data_fetchers/token_price/coingecko.rs index 2402f00ccc5d..5f6db62cd40f 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_price/coingecko.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_price/coingecko.rs @@ -1,14 +1,10 @@ -use std::{ - cmp::{max, min}, - collections::HashMap, - str::FromStr, -}; +use std::{collections::HashMap, str::FromStr}; use async_trait::async_trait; use chrono::{DateTime, NaiveDateTime, Utc}; use futures::try_join; use itertools::Itertools; -use num::{rational::Ratio, BigUint, FromPrimitive}; +use num::{rational::Ratio, BigUint}; use reqwest::{Client, Url}; use serde::{Deserialize, Serialize}; @@ -139,26 +135,7 @@ impl FetcherImpl for CoinGeckoFetcher { let result = token_prices .into_iter() .map(|(address, coingecko_token_price)| { - let usd_price = { - let current_price = coingecko_token_price.usd; - if let Some(usd_24h_change) = coingecko_token_price.usd_24h_change { - let percent_price_diff = BigUint::from_f64(100.0f64 - usd_24h_change); - if let Some(percent_price_diff) = percent_price_diff { - let yesterdays_price = - (¤t_price * percent_price_diff) / BigUint::from(100u32); - - if address == ETHEREUM_ADDRESS { - max(current_price, yesterdays_price) - } else { - min(current_price, yesterdays_price) - } - } else { - current_price - } - } else { - current_price - } - }; + let usd_price = coingecko_token_price.usd; let last_updated = { let naive_last_updated = diff --git a/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs index 2f3717144e0b..a368f62cb8c0 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs @@ -77,8 +77,8 @@ impl TokenPriceFetcher { self.error_handler.update().await; // We refresh token list in case new tokens were added. - let mut storage = pool.access_storage().await; - let tokens = self.get_tokens(&mut storage).await; + let mut storage = pool.access_storage_blocking(); + let tokens = self.get_tokens(&mut storage); // Vector of received token prices in the format of (`token_addr`, `price_in_usd`, `fetch_timestamp`). let token_prices = match self.fetch_token_price(&tokens).await { @@ -91,7 +91,7 @@ impl TokenPriceFetcher { continue; } }; - self.store_token_prices(&mut storage, token_prices).await; + self.store_token_prices(&mut storage, token_prices); } } @@ -108,7 +108,7 @@ impl TokenPriceFetcher { .map_err(|_| ApiFetchError::RequestTimeout)? } - async fn store_token_prices( + fn store_token_prices( &self, storage: &mut StorageProcessor<'_>, token_prices: HashMap, @@ -121,7 +121,7 @@ impl TokenPriceFetcher { /// Returns the list of "interesting" tokens, e.g. ones that can be used to pay fees. /// We don't actually need prices for other tokens. - async fn get_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ + fn get_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ storage .tokens_dal() .get_l1_tokens_by_volume(&self.minimum_required_liquidity) diff --git a/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs index 2b3725ab14bf..165a14dd70fb 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs @@ -68,8 +68,8 @@ impl TradingVolumeFetcher { fetching_interval.tick().await; self.error_handler.update().await; - let mut storage = pool.access_storage().await; - let known_l1_tokens = self.load_tokens(&mut storage).await; + let mut storage = pool.access_storage_blocking(); + let known_l1_tokens = self.load_tokens(&mut storage); let trading_volumes = match self.fetch_trading_volumes(&known_l1_tokens).await { Ok(volumes) => { @@ -82,8 +82,7 @@ impl TradingVolumeFetcher { } }; - self.store_market_volumes(&mut storage, trading_volumes) - .await; + self.store_market_volumes(&mut storage, trading_volumes); } } @@ -100,7 +99,7 @@ impl TradingVolumeFetcher { .map_err(|_| ApiFetchError::RequestTimeout)? } - async fn store_market_volumes( + fn store_market_volumes( &self, storage: &mut StorageProcessor<'_>, tokens: HashMap, @@ -113,7 +112,7 @@ impl TradingVolumeFetcher { /// Returns the list of tokens with known metadata (if token is not in the list we use, /// it's very likely to not have required level of trading volume anyways). - async fn load_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ + fn load_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ storage .tokens_dal() .get_well_known_token_addresses() diff --git a/core/bin/zksync_core/src/data_fetchers/token_trading_volume/uniswap.rs b/core/bin/zksync_core/src/data_fetchers/token_trading_volume/uniswap.rs index 4e1a2b576f71..52339812aaab 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_trading_volume/uniswap.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_trading_volume/uniswap.rs @@ -117,6 +117,7 @@ pub struct TokenResponse { } #[tokio::test] +#[ignore] // Remote API may be unavailable, so we ignore this test by default. async fn test_fetch_uniswap_trading_volumes() { let mut config = FetcherConfig::from_env(); config.token_trading_volume.url = diff --git a/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs b/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs index 907ccc3ca97a..75bf1360664e 100644 --- a/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs +++ b/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs @@ -135,7 +135,7 @@ impl GasCriterion { GasCriterion { op, gas_limit } } - async fn get_gas_amount( + fn get_gas_amount( &mut self, storage: &mut StorageProcessor<'_>, block_number: L1BatchNumber, @@ -164,7 +164,7 @@ impl BlockPublishCriterion for GasCriterion { let mut last_block: Option = None; for (index, block) in consecutive_blocks.iter().enumerate() { - let block_gas = self.get_gas_amount(storage, block.header.number).await; + let block_gas = self.get_gas_amount(storage, block.header.number); if block_gas >= gas_left { if index == 0 { panic!( diff --git a/core/bin/zksync_core/src/eth_sender/error.rs b/core/bin/zksync_core/src/eth_sender/error.rs index be2912e05c70..585277b39a5f 100644 --- a/core/bin/zksync_core/src/eth_sender/error.rs +++ b/core/bin/zksync_core/src/eth_sender/error.rs @@ -1,4 +1,4 @@ -use zksync_eth_client::clients::http_client::Error; +use zksync_eth_client::types::Error; #[derive(Debug, thiserror::Error)] pub enum ETHSenderError { diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 1cc0d6990927..980032d1c0a5 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -7,7 +7,7 @@ use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_eth_client::clients::http_client::EthereumClient; +use zksync_eth_client::{clients::http::PKSigningClient, BoundEthInterface}; use zksync_types::{aggregated_operations::AggregatedOperation, eth_sender::EthTx, Address, H256}; /// The component is responsible for aggregating l1 batches into eth_txs: @@ -42,7 +42,7 @@ impl EthTxAggregator { pub async fn run( mut self, pool: ConnectionPool, - eth_client: EthereumClient, + eth_client: PKSigningClient, stop_receiver: watch::Receiver, ) { loop { @@ -50,7 +50,7 @@ impl EthTxAggregator { .get_l1_base_system_contracts_hashes(ð_client) .await .unwrap(); - let mut storage = pool.access_storage().await; + let mut storage = pool.access_storage_blocking(); if *stop_receiver.borrow() { vlog::info!("Stop signal received, eth_tx_aggregator is shutting down"); @@ -72,7 +72,7 @@ impl EthTxAggregator { async fn get_l1_base_system_contracts_hashes( &mut self, - eth_client: &EthereumClient, + eth_client: &PKSigningClient, ) -> Result { let bootloader_code_hash: H256 = eth_client .call_main_contract_function( @@ -110,13 +110,13 @@ impl EthTxAggregator { .get_next_ready_operation(storage, base_system_contracts_hashes) .await { - let tx = self.save_eth_tx(storage, &agg_op).await?; - Self::log_eth_tx_saving(storage, agg_op, &tx).await; + let tx = self.save_eth_tx(storage, &agg_op)?; + Self::log_eth_tx_saving(storage, agg_op, &tx); } Ok(()) } - async fn log_eth_tx_saving( + fn log_eth_tx_saving( storage: &mut StorageProcessor<'_>, aggregated_op: AggregatedOperation, tx: &EthTx, @@ -176,13 +176,13 @@ impl EthTxAggregator { .to_vec() } - pub(super) async fn save_eth_tx( + pub(super) fn save_eth_tx( &self, storage: &mut StorageProcessor<'_>, aggregated_op: &AggregatedOperation, ) -> Result { - let mut transaction = storage.start_transaction().await; - let nonce = self.get_next_nonce(&mut transaction).await?; + let mut transaction = storage.start_transaction_blocking(); + let nonce = self.get_next_nonce(&mut transaction)?; let calldata = self.encode_aggregated_op(aggregated_op); let (first_block, last_block) = aggregated_op.get_block_range(); let op_type = aggregated_op.get_action_type(); @@ -204,14 +204,11 @@ impl EthTxAggregator { transaction .blocks_dal() .set_eth_tx_id(first_block, last_block, eth_tx.id, op_type); - transaction.commit().await; + transaction.commit_blocking(); Ok(eth_tx) } - async fn get_next_nonce( - &self, - storage: &mut StorageProcessor<'_>, - ) -> Result { + fn get_next_nonce(&self, storage: &mut StorageProcessor<'_>) -> Result { let db_nonce = storage.eth_sender_dal().get_next_nonce().unwrap_or(0); // Between server starts we can execute some txs using operator account or remove some txs from the database // At the start we have to consider this fact and get the max nonce. diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs index 3f9843a83fca..285c67363070 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -4,8 +4,8 @@ use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{ - clients::http_client::{Error, ExecutedTxStatus, SignedCallResult}, - EthInterface, + types::{Error, ExecutedTxStatus, SignedCallResult}, + BoundEthInterface, }; use zksync_types::{ eth_sender::EthTx, @@ -14,9 +14,8 @@ use zksync_types::{ }; use zksync_utils::time::seconds_since_epoch; -use crate::eth_sender::grafana_metrics::track_eth_tx_metrics; use crate::eth_sender::ETHSenderError; -use crate::gas_adjuster::GasAdjuster; +use crate::{eth_sender::grafana_metrics::track_eth_tx_metrics, l1_gas_price::L1TxParamsProvider}; #[derive(Debug)] struct EthFee { @@ -38,18 +37,18 @@ struct OperatorNonce { /// Based on eth_tx_history queue the component can mark txs as stuck and create the new attempt /// with higher gas price #[derive(Debug)] -pub struct EthTxManager { +pub struct EthTxManager { ethereum_gateway: E, config: SenderConfig, - gas_adjuster: Arc>, + gas_adjuster: Arc, } -impl EthTxManager { - pub fn new( - config: SenderConfig, - gas_adjuster: Arc>, - ethereum_gateway: E, - ) -> Self { +impl EthTxManager +where + E: BoundEthInterface + Sync, + G: L1TxParamsProvider, +{ + pub fn new(config: SenderConfig, gas_adjuster: Arc, ethereum_gateway: E) -> Self { Self { ethereum_gateway, config, @@ -122,12 +121,11 @@ impl EthTxManager { }; // Extra check to prevent sending transaction will extremely high priority fee. - const MAX_ACCEPTABLE_PRIORITY_FEE: u64 = 10u64.pow(11); // 100 gwei - if priority_fee_per_gas > MAX_ACCEPTABLE_PRIORITY_FEE { + if priority_fee_per_gas > self.config.max_acceptable_priority_fee_in_gwei { panic!( "Extremely high value of priority_fee_per_gas is suggested: {}, while max acceptable is {}", priority_fee_per_gas, - MAX_ACCEPTABLE_PRIORITY_FEE + self.config.max_acceptable_priority_fee_in_gwei ); } @@ -197,27 +195,26 @@ impl EthTxManager { .sign_tx(tx, base_fee_per_gas, priority_fee_per_gas) .await; - let tx_history_id = storage.eth_sender_dal().insert_tx_history( + if let Some(tx_history_id) = storage.eth_sender_dal().insert_tx_history( tx.id, base_fee_per_gas, priority_fee_per_gas, signed_tx.hash, signed_tx.raw_tx.clone(), - ); - - if let Err(error) = self - .send_raw_transaction(storage, tx_history_id, signed_tx.raw_tx, current_block) - .await - { - vlog::warn!( - "Error when sending new signed tx for tx {}, base_fee_per_gas {}, priority_fee_per_gas: {}: {}", - tx.id, - base_fee_per_gas, - priority_fee_per_gas, - error - ); + ) { + if let Err(error) = self + .send_raw_transaction(storage, tx_history_id, signed_tx.raw_tx, current_block) + .await + { + vlog::warn!( + "Error when sending new signed tx for tx {}, base_fee_per_gas {}, priority_fee_per_gas: {}: {}", + tx.id, + base_fee_per_gas, + priority_fee_per_gas, + error + ); + } } - Ok(signed_tx.hash) } @@ -279,9 +276,15 @@ impl EthTxManager { let operator_nonce = self.get_operator_nonce(current_block).await?; + let inflight_txs = storage.eth_sender_dal().get_inflight_txs(); + metrics::gauge!( + "server.eth_sender.number_of_inflight_txs", + inflight_txs.len() as f64, + ); + // Not confirmed transactions, ordered by nonce - for tx in storage.eth_sender_dal().get_inflight_txs() { - vlog::debug!( + for tx in inflight_txs { + vlog::trace!( "Going through not confirmed txs. \ Current block: {}, current tx id: {}, \ sender's nonce on block `current block - number of confirmations`: {}", @@ -295,15 +298,11 @@ impl EthTxManager { // We only resend the first unmined transaction. if operator_nonce.current <= tx.nonce { // None means txs hasn't been sent yet - if let Some(first_sent_at_block) = storage + let first_sent_at_block = storage .eth_sender_dal() .get_block_number_on_first_sent_attempt(tx.id) - { - return Ok(Some((tx, first_sent_at_block))); - } else { - vlog::warn!("ETH Tx {} wasn't send", tx.id); - } - continue; + .unwrap_or(current_block.0); + return Ok(Some((tx, first_sent_at_block))); } // If on block `current_block - self.wait_confirmations` @@ -314,7 +313,7 @@ impl EthTxManager { continue; } - vlog::debug!( + vlog::trace!( "Sender's nonce on block `current block - number of confirmations` is greater than current tx's nonce. \ Checking transaction with id {}. Tx nonce is equal to {}", tx.id, @@ -418,11 +417,10 @@ impl EthTxManager { } } else { vlog::debug!( - "There is {} confirmations for transaction with history item tx hash: {} and id: {}. \ - But {} number of confirmations is required", - confirmations, + "Transaction {} with id {} has {} out of {} required confirmations", tx_status.tx_hash, tx.id, + confirmations, self.config.wait_confirmations ); } @@ -518,7 +516,7 @@ impl EthTxManager { .unwrap() .as_u32(), ); - let mut storage = pool.access_storage().await; + let mut storage = pool.access_storage_blocking(); self.send_unsent_txs(&mut storage, current_block).await; } @@ -526,7 +524,7 @@ impl EthTxManager { // will never check inflight txs status let mut last_known_l1_block = L1BlockNumber(0); loop { - let mut storage = pool.access_storage().await; + let mut storage = pool.access_storage_blocking(); if *stop_receiver.borrow() { vlog::info!("Stop signal received, eth_tx_manager is shutting down"); diff --git a/core/bin/zksync_core/src/eth_sender/tests.rs b/core/bin/zksync_core/src/eth_sender/tests.rs index 2ec6d79fb640..8cf04add1538 100644 --- a/core/bin/zksync_core/src/eth_sender/tests.rs +++ b/core/bin/zksync_core/src/eth_sender/tests.rs @@ -1,12 +1,12 @@ use crate::eth_sender::{Aggregator, EthTxAggregator, EthTxManager}; -use crate::gas_adjuster::GasAdjuster; +use crate::l1_gas_price::GasAdjuster; use db_test_macro::db_test; use zksync_config::{ configs::eth_sender::{ProofSendingMode, SenderConfig}, ETHSenderConfig, GasAdjusterConfig, }; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_eth_client::clients::{http_client::EthInterface, mock::MockEthereum}; +use zksync_eth_client::{clients::mock::MockEthereum, EthInterface}; use zksync_types::{ aggregated_operations::{AggregatedOperation, BlocksExecuteOperation}, Address, L1BlockNumber, @@ -15,7 +15,7 @@ use zksync_types::{ use std::sync::Arc; // Alias to conveniently call static methods of ETHSender. -type MockEthTxManager = EthTxManager>; +type MockEthTxManager = EthTxManager, GasAdjuster>>; const DUMMY_OPERATION: AggregatedOperation = AggregatedOperation::ExecuteBlocks(BlocksExecuteOperation { blocks: vec![] }); @@ -101,8 +101,7 @@ async fn confirm_many(connection_pool: ConnectionPool) -> anyhow::Result<()> { for _ in 0..5 { let tx = tester .aggregator - .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) - .await?; + .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION)?; let hash = tester .manager .send_eth_tx( @@ -170,8 +169,7 @@ async fn resend_each_block(connection_pool: ConnectionPool) -> anyhow::Result<() let block = L1BlockNumber(tester.gateway.block_number("").await?.as_u32()); let tx = tester .aggregator - .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) - .await?; + .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION)?; let hash = tester .manager @@ -238,7 +236,6 @@ async fn dont_resend_already_mined(connection_pool: ConnectionPool) -> anyhow::R let tx = tester .aggregator .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) - .await .unwrap(); let hash = tester @@ -303,7 +300,6 @@ async fn three_scenarios(connection_pool: ConnectionPool) -> anyhow::Result<()> let tx = tester .aggregator .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) - .await .unwrap(); let hash = tester @@ -367,7 +363,6 @@ async fn failed_eth_tx(connection_pool: ConnectionPool) { let tx = tester .aggregator .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) - .await .unwrap(); let hash = tester diff --git a/core/bin/zksync_core/src/eth_watch/client.rs b/core/bin/zksync_core/src/eth_watch/client.rs index 9f2aa6a73f6a..a3144b0d4bb0 100644 --- a/core/bin/zksync_core/src/eth_watch/client.rs +++ b/core/bin/zksync_core/src/eth_watch/client.rs @@ -4,7 +4,9 @@ use std::fmt::{Debug, Display}; use tokio::time::Instant; -use zksync_eth_client::clients::http_client::{self, EthInterface, EthereumClient}; +use zksync_eth_client::{ + clients::http::PKSigningClient, types::Error as EthClientError, BoundEthInterface, EthInterface, +}; use zksync_types::ethabi::{Contract, Hash}; use zksync_contracts::zksync_contract; @@ -23,7 +25,7 @@ pub enum Error { #[error("Log parsing filed: {0}")] LogParse(String), #[error("Eth client error: {0}")] - EthClient(#[from] http_client::Error), + EthClient(#[from] EthClientError), #[error("Infinite recursion caused by too many responses")] InfiniteRecursion, } @@ -63,13 +65,13 @@ const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; #[derive(Debug)] pub struct EthHttpClient { - client: EthereumClient, + client: PKSigningClient, topics: ContractTopics, zksync_contract_addr: H160, } impl EthHttpClient { - pub fn new(client: EthereumClient, zksync_contract_addr: H160) -> Self { + pub fn new(client: PKSigningClient, zksync_contract_addr: H160) -> Self { vlog::debug!("New eth client, contract addr: {:x}", zksync_contract_addr); let topics = ContractTopics::new(&zksync_contract()); Self { @@ -121,7 +123,7 @@ impl EthClient for EthHttpClient { // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. - if let Err(Error::EthClient(http_client::Error::EthereumGateway(err))) = &result { + if let Err(Error::EthClient(EthClientError::EthereumGateway(err))) = &result { vlog::warn!("Provider returned error message: {:?}", err); let err_message = err.to_string(); let err_code = if let web3::Error::Rpc(err) = err { diff --git a/core/bin/zksync_core/src/eth_watch/mod.rs b/core/bin/zksync_core/src/eth_watch/mod.rs index 5b9f9623bff4..58ef62c3ae70 100644 --- a/core/bin/zksync_core/src/eth_watch/mod.rs +++ b/core/bin/zksync_core/src/eth_watch/mod.rs @@ -12,7 +12,7 @@ use tokio::{sync::watch, task::JoinHandle}; // Workspace deps use zksync_config::constants::PRIORITY_EXPIRATION; -use zksync_eth_client::clients::http_client::EthereumClient; +use zksync_eth_client::clients::http::PKSigningClient; use zksync_types::{ l1::L1Tx, web3::types::BlockNumber as Web3BlockNumber, L1BlockNumber, PriorityOpId, }; @@ -53,7 +53,7 @@ impl EthWatch { number_of_confirmations_for_event: usize, poll_interval: Duration, ) -> Self { - let mut storage = pool.access_storage().await; + let mut storage = pool.access_storage_blocking(); let state = Self::initialize_state(&client, &mut storage, number_of_confirmations_for_event).await; @@ -109,7 +109,7 @@ impl EthWatch { metrics::counter!("server.eth_watch.eth_poll", 1); - let mut storage = pool.access_storage().await; + let mut storage = pool.access_storage_blocking(); if let Err(error) = self.loop_iteration(&mut storage).await { // This is an error because otherwise we could potentially miss a priority operation // thus entering priority mode, which is not desired. @@ -228,7 +228,7 @@ impl EthWatch { pub async fn start_eth_watch( pool: ConnectionPool, - eth_gateway: EthereumClient, + eth_gateway: PKSigningClient, config_options: &ZkSyncConfig, stop_receiver: watch::Receiver, ) -> JoinHandle<()> { diff --git a/core/bin/zksync_core/src/eth_watch/tests.rs b/core/bin/zksync_core/src/eth_watch/tests.rs index 57ff542ce2c7..646b8684cb3a 100644 --- a/core/bin/zksync_core/src/eth_watch/tests.rs +++ b/core/bin/zksync_core/src/eth_watch/tests.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use tokio::sync::RwLock; use db_test_macro::db_test; -use zksync_dal::StorageProcessor; +use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::web3::types::{Address, BlockNumber}; use zksync_types::{ l1::{L1Tx, OpProcessingType, PriorityQueueType}, diff --git a/core/bin/zksync_core/src/fee_monitor/mod.rs b/core/bin/zksync_core/src/fee_monitor/mod.rs index 99a0bbfe1fa8..9d6e88e6bd8d 100644 --- a/core/bin/zksync_core/src/fee_monitor/mod.rs +++ b/core/bin/zksync_core/src/fee_monitor/mod.rs @@ -4,7 +4,7 @@ use std::time::Duration; use zksync_config::ZkSyncConfig; use zksync_dal::ConnectionPool; -use zksync_eth_client::clients::http_client::EthereumClient; +use zksync_eth_client::{clients::http::PKSigningClient, EthInterface}; use zksync_types::{ api::BlockId, AccountTreeId, Address, L1BatchNumber, L2_ETH_TOKEN_ADDRESS, U256, }; @@ -42,18 +42,14 @@ pub struct FeeMonitor { fee_account_address: Address, storage: ConnectionPool, - client: EthereumClient, + client: PKSigningClient, next_finalized_block: L1BatchNumber, } impl FeeMonitor { - pub async fn new( - config: &ZkSyncConfig, - storage: ConnectionPool, - client: EthereumClient, - ) -> Self { - let mut storage_processor = storage.access_storage().await; + pub fn new(config: &ZkSyncConfig, storage: ConnectionPool, client: PKSigningClient) -> Self { + let mut storage_processor = storage.access_storage_blocking(); let latest_l1_batch_finalized = storage_processor .blocks_dal() .get_number_of_last_block_executed_on_eth() @@ -83,7 +79,7 @@ impl FeeMonitor { async fn run_iter(&mut self) { let last_finalized = { - let mut storage = self.storage.access_storage().await; + let mut storage = self.storage.access_storage_blocking(); storage .blocks_dal() .get_number_of_last_block_executed_on_eth() @@ -96,15 +92,11 @@ impl FeeMonitor { // Only report data if new blocks were finalized. if last_finalized >= self.next_finalized_block { - let _ = self - .report_collected_fees(last_finalized) - .await - .map_err(|err| { - vlog::warn!("Unable to report collected fees in fee monitor: {err}"); - }); + let _ = self.report_collected_fees(last_finalized).map_err(|err| { + vlog::warn!("Unable to report collected fees in fee monitor: {err}"); + }); let _ = self .report_l1_batch_finalized(last_finalized) - .await .map_err(|err| { vlog::warn!("Unable to report l1 batch finalization in fee monitor: {err}"); }); @@ -114,7 +106,7 @@ impl FeeMonitor { } async fn report_balances(&self) -> anyhow::Result<()> { - let mut storage = self.storage.access_storage().await; + let mut storage = self.storage.access_storage_blocking(); let mut operator_balance_l1 = self .client .eth_balance(self.operator_address, COMPONENT_NAME) @@ -152,8 +144,8 @@ impl FeeMonitor { Ok(()) } - async fn report_collected_fees(&mut self, last_finalized: L1BatchNumber) -> anyhow::Result<()> { - let mut storage = self.storage.access_storage().await; + fn report_collected_fees(&mut self, last_finalized: L1BatchNumber) -> anyhow::Result<()> { + let mut storage = self.storage.access_storage_blocking(); for block_number in block_range(self.next_finalized_block, last_finalized) { let collected_fees = storage .fee_monitor_dal() @@ -172,11 +164,8 @@ impl FeeMonitor { Ok(()) } - async fn report_l1_batch_finalized( - &mut self, - last_finalized: L1BatchNumber, - ) -> anyhow::Result<()> { - let mut storage = self.storage.access_storage().await; + fn report_l1_batch_finalized(&mut self, last_finalized: L1BatchNumber) -> anyhow::Result<()> { + let mut storage = self.storage.access_storage_blocking(); for block_number in block_range(self.next_finalized_block, last_finalized) { let block_data = storage .fee_monitor_dal() diff --git a/core/bin/zksync_core/src/genesis.rs b/core/bin/zksync_core/src/genesis.rs index bc0a1a6e266e..3fdcfef42f75 100644 --- a/core/bin/zksync_core/src/genesis.rs +++ b/core/bin/zksync_core/src/genesis.rs @@ -2,8 +2,15 @@ //! It initializes the Merkle tree with the basic setup (such as fields of special service accounts), //! setups the required databases, and outputs the data required to initialize a smart contract. +use crate::sync_layer::genesis::fetch_base_system_contracts; use tempfile::TempDir; use vm::zk_evm::aux_structures::{LogQuery, Timestamp}; + +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; +use zksync_dal::StorageProcessor; +use zksync_merkle_tree::ZkSyncTree; +use zksync_storage::db::Database; +use zksync_storage::RocksDB; use zksync_types::{ block::DeployedContract, block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, @@ -12,62 +19,88 @@ use zksync_types::{ system_contracts::get_system_smart_contracts, tokens::{TokenInfo, TokenMetadata, ETHEREUM_ADDRESS}, zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - Address, L1BatchNumber, MiniblockNumber, StorageLog, StorageLogKind, H256, + Address, L1BatchNumber, L2ChainId, MiniblockNumber, StorageLog, StorageLogKind, H256, }; use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, miniblock_hash}; - -use zksync_config::ZkSyncConfig; -use zksync_contracts::BaseSystemContracts; -use zksync_merkle_tree::ZkSyncTree; - -use zksync_dal::StorageProcessor; -use zksync_storage::db::Database; - -use zksync_storage::RocksDB; +use zksync_web3_decl::{jsonrpsee::http_client::HttpClientBuilder, namespaces::ZksNamespaceClient}; + +#[derive(Debug, Clone)] +pub enum GenesisParams { + MainNode { + first_validator: Address, + }, + ExternalNode { + main_node_url: String, + base_system_contracts_hashes: BaseSystemContractsHashes, + }, +} pub async fn ensure_genesis_state( storage: &mut StorageProcessor<'_>, - config: &ZkSyncConfig, + zksync_chain_id: L2ChainId, + genesis_params: GenesisParams, ) -> H256 { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new(db); - let mut transaction = storage.start_transaction().await; + let mut transaction = storage.start_transaction_blocking(); // return if genesis block was already processed if !transaction.blocks_dal().is_genesis_needed() { vlog::debug!("genesis is not needed!"); return transaction .blocks_dal() - .get_storage_block(L1BatchNumber(0)) - .expect("genesis block is not found") - .hash - .map(|h| H256::from_slice(&h)) + .get_block_state_root(L1BatchNumber(0)) .expect("genesis block hash is empty"); } + vlog::info!("running regenesis"); - // For now we consider the operator to be the first validator. - let first_validator_address = config.eth_sender.sender.operator_commit_eth_addr; - let chain_id = H256::from_low_u64_be(config.chain.eth.zksync_network_id as u64); + // If running main node, load base system contracts from disk. + // If running external node, request contracts and first validator address from the main node. + let (base_system_contracts, first_validator_address) = match genesis_params { + GenesisParams::ExternalNode { + main_node_url, + base_system_contracts_hashes, + } => { + // These have to be *initial* base contract hashes of main node + // (those that were used during genesis), not necessarily the current ones. + let contracts = + fetch_base_system_contracts(&main_node_url, base_system_contracts_hashes) + .await + .expect("Failed to fetch base system contracts from main node"); + + let client = HttpClientBuilder::default().build(main_node_url).unwrap(); + let first_validator = client + .get_block_details(MiniblockNumber(0)) + .await + .ok() + .flatten() + .expect("Failed to fetch genesis miniblock") + .operator_address; + + (contracts, first_validator) + } + GenesisParams::MainNode { first_validator } => { + (BaseSystemContracts::load_from_disk(), first_validator) + } + }; - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let base_system_contracts_hash = base_system_contracts.hashes(); + let base_system_contracts_hashes = base_system_contracts.hashes(); - chain_schema_genesis( + create_genesis_block( &mut transaction, first_validator_address, - chain_id, + zksync_chain_id, base_system_contracts, - ) - .await; + ); vlog::info!("chain_schema_genesis is complete"); let storage_logs = crate::metadata_calculator::get_logs_for_l1_batch(&mut transaction, L1BatchNumber(0)); let metadata = tree.process_block(storage_logs.unwrap().storage_logs); - let genesis_root_hash = H256::from_slice(&metadata.root_hash); + let genesis_root_hash = metadata.root_hash; let rollup_last_leaf_index = metadata.rollup_last_leaf_index; let block_commitment = BlockCommitment::new( @@ -76,11 +109,11 @@ pub async fn ensure_genesis_state( genesis_root_hash, vec![], vec![], - config.chain.state_keeper.bootloader_hash, - config.chain.state_keeper.default_aa_hash, + base_system_contracts_hashes.bootloader, + base_system_contracts_hashes.default_aa, ); - operations_schema_genesis( + save_genesis_block_metadata( &mut transaction, &block_commitment, genesis_root_hash, @@ -88,16 +121,13 @@ pub async fn ensure_genesis_state( ); vlog::info!("operations_schema_genesis is complete"); - transaction.commit().await; + transaction.commit_blocking(); // We need to `println` this value because it will be used to initialize the smart contract. + println!("CONTRACTS_GENESIS_ROOT={:?}", genesis_root_hash); println!( - "CONTRACTS_GENESIS_ROOT=0x{}", - hex::encode(genesis_root_hash) - ); - println!( - "CONTRACTS_GENESIS_BLOCK_COMMITMENT=0x{}", - hex::encode(block_commitment.hash().commitment) + "CONTRACTS_GENESIS_BLOCK_COMMITMENT={:?}", + block_commitment.hash().commitment ); println!( "CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX={}", @@ -105,11 +135,11 @@ pub async fn ensure_genesis_state( ); println!( "CHAIN_STATE_KEEPER_BOOTLOADER_HASH={:?}", - base_system_contracts_hash.bootloader + base_system_contracts_hashes.bootloader ); println!( "CHAIN_STATE_KEEPER_DEFAULT_AA_HASH={:?}", - base_system_contracts_hash.default_aa + base_system_contracts_hashes.default_aa ); genesis_root_hash @@ -135,10 +165,10 @@ fn insert_base_system_contracts_to_factory_deps( .insert_factory_deps(MiniblockNumber(0), factory_deps); } -async fn insert_system_contracts( +fn insert_system_contracts( storage: &mut StorageProcessor<'_>, contracts: Vec, - chain_id: H256, + chain_id: L2ChainId, ) { let system_context_init_logs = (H256::default(), get_system_context_init_logs(chain_id)); @@ -157,7 +187,7 @@ async fn insert_system_contracts( .chain(Some(system_context_init_logs)) .collect(); - let mut transaction = storage.start_transaction().await; + let mut transaction = storage.start_transaction_blocking(); transaction .storage_logs_dal() @@ -195,10 +225,6 @@ async fn insert_system_contracts( let (_, deduped_log_queries) = sort_storage_access_queries(&log_queries); - transaction - .storage_logs_dedup_dal() - .insert_storage_logs(L1BatchNumber(0), &deduped_log_queries); - let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = deduped_log_queries .into_iter() .partition(|log_query| log_query.rw_flag); @@ -219,13 +245,13 @@ async fn insert_system_contracts( .storage_dal() .insert_factory_deps(MiniblockNumber(0), factory_deps); - transaction.commit().await; + transaction.commit_blocking(); } -pub(crate) async fn chain_schema_genesis<'a>( +pub(crate) fn create_genesis_block( storage: &mut StorageProcessor<'_>, first_validator_address: Address, - chain_id: H256, + chain_id: L2ChainId, base_system_contracts: BaseSystemContracts, ) { let mut zero_block_header = L1BatchHeader::new( @@ -248,7 +274,7 @@ pub(crate) async fn chain_schema_genesis<'a>( base_system_contracts_hashes: base_system_contracts.hashes(), }; - let mut transaction = storage.start_transaction().await; + let mut transaction = storage.start_transaction_blocking(); transaction .blocks_dal() @@ -263,14 +289,14 @@ pub(crate) async fn chain_schema_genesis<'a>( insert_base_system_contracts_to_factory_deps(&mut transaction, base_system_contracts); let contracts = get_system_smart_contracts(); - insert_system_contracts(&mut transaction, contracts, chain_id).await; + insert_system_contracts(&mut transaction, contracts, chain_id); - add_eth_token(&mut transaction).await; + add_eth_token(&mut transaction); - transaction.commit().await; + transaction.commit_blocking(); } -pub(crate) async fn add_eth_token(storage: &mut StorageProcessor<'_>) { +pub(crate) fn add_eth_token(storage: &mut StorageProcessor<'_>) { let eth_token = TokenInfo { l1_address: ETHEREUM_ADDRESS, l2_address: ETHEREUM_ADDRESS, @@ -281,17 +307,17 @@ pub(crate) async fn add_eth_token(storage: &mut StorageProcessor<'_>) { }, }; - let mut transaction = storage.start_transaction().await; + let mut transaction = storage.start_transaction_blocking(); transaction.tokens_dal().add_tokens(vec![eth_token.clone()]); transaction .tokens_dal() .update_well_known_l1_token(ÐEREUM_ADDRESS, eth_token.metadata); - transaction.commit().await; + transaction.commit_blocking(); } -pub(crate) fn operations_schema_genesis( +pub(crate) fn save_genesis_block_metadata( storage: &mut StorageProcessor<'_>, block_commitment: &BlockCommitment, genesis_root_hash: H256, diff --git a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs index dcd5c7d7ddf4..0614f66741c1 100644 --- a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs +++ b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -1,10 +1,19 @@ -use crate::house_keeper::periodic_job::PeriodicJob; use zksync_dal::ConnectionPool; -#[derive(Debug, Default)] -pub struct L1BatchMetricsReporter; +use crate::house_keeper::periodic_job::PeriodicJob; + +#[derive(Debug)] +pub struct L1BatchMetricsReporter { + reporting_interval_ms: u64, +} impl L1BatchMetricsReporter { + pub fn new(reporting_interval_ms: u64) -> Self { + Self { + reporting_interval_ms, + } + } + fn report_metrics(&self, connection_pool: ConnectionPool) { let mut conn = connection_pool.access_storage_blocking(); let mut block_metrics = vec![ @@ -62,9 +71,12 @@ impl L1BatchMetricsReporter { impl PeriodicJob for L1BatchMetricsReporter { const SERVICE_NAME: &'static str = "L1BatchMetricsReporter"; - const POLLING_INTERVAL_MS: u64 = 10000; fn run_routine_task(&mut self, connection_pool: ConnectionPool) { self.report_metrics(connection_pool); } + + fn polling_interval_ms(&self) -> u64 { + self.reporting_interval_ms + } } diff --git a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs index 24ccfcb8fd1f..2a2f6e0a9c7b 100644 --- a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs +++ b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs @@ -1,15 +1,28 @@ use zksync_dal::ConnectionPool; -use zksync_object_store::object_store::{ - DynamicObjectStore, ObjectStoreError, LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, PROVER_JOBS_BUCKET_PATH, - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, WITNESS_INPUT_BUCKET_PATH, -}; +use zksync_object_store::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}; use crate::house_keeper::periodic_job::PeriodicJob; +trait AsBlobUrls { + fn as_blob_urls(&self) -> (&str, Option<&str>); +} + +impl AsBlobUrls for String { + fn as_blob_urls(&self) -> (&str, Option<&str>) { + (self.as_str(), None) + } +} + +impl AsBlobUrls for (String, String) { + fn as_blob_urls(&self) -> (&str, Option<&str>) { + (self.0.as_str(), Some(self.1.as_str())) + } +} + #[derive(Debug)] pub struct GcsBlobCleaner { - pub object_store: DynamicObjectStore, + object_store: Box, + cleaning_interval_ms: u64, } const BATCH_CLEANUP_SIZE: u8 = 5; @@ -21,14 +34,19 @@ fn handle_remove_result(result: Result<(), ObjectStoreError>) { // in this scenario the retry of removal from GCS would fail as the object is already removed. // Hence we ignore the KeyNotFound error below ObjectStoreError::KeyNotFound(_) => {} - ObjectStoreError::Other(err) => { - panic!("{:?}", err) - } + other => panic!("{:?}", other), } } } impl GcsBlobCleaner { + pub fn new(store_factory: &ObjectStoreFactory, cleaning_interval_ms: u64) -> Self { + Self { + object_store: store_factory.create_store(), + cleaning_interval_ms, + } + } + fn cleanup_blobs(&mut self, pool: ConnectionPool) { self.cleanup_prover_jobs_blobs(pool.clone()); self.cleanup_witness_inputs_blobs(pool.clone()); @@ -37,43 +55,40 @@ impl GcsBlobCleaner { self.cleanup_scheduler_witness_jobs_blobs(pool); } - fn cleanup_prover_jobs_blobs(&mut self, pool: ConnectionPool) { + fn cleanup_prover_jobs_blobs(&self, pool: ConnectionPool) { let mut conn = pool.access_storage_blocking(); - let id_blob_urls_tuple = conn + let blob_urls = conn .prover_dal() .get_circuit_input_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE); - let (ids, circuit_input_blob_urls): (Vec<_>, Vec<_>) = - id_blob_urls_tuple.into_iter().unzip(); + let ids = self.cleanup_blob_urls(Bucket::ProverJobs, blob_urls); + conn.prover_dal().mark_gcs_blobs_as_cleaned(ids); + } - if !ids.is_empty() { - vlog::info!("Found {} provers jobs for cleaning blobs", ids.len()); + fn cleanup_blob_urls( + &self, + bucket: Bucket, + blob_urls: Vec<(i64, S)>, + ) -> Vec { + if !blob_urls.is_empty() { + vlog::info!("Found {} {} for cleaning blobs", blob_urls.len(), bucket); } - circuit_input_blob_urls.into_iter().for_each(|url| { - handle_remove_result(self.object_store.remove(PROVER_JOBS_BUCKET_PATH, url)); - }); - - conn.prover_dal().mark_gcs_blobs_as_cleaned(ids); + for (_, url) in &blob_urls { + let (first_url, second_url) = url.as_blob_urls(); + handle_remove_result(self.object_store.remove_raw(bucket, first_url)); + if let Some(second_url) = second_url { + handle_remove_result(self.object_store.remove_raw(bucket, second_url)); + } + } + blob_urls.into_iter().map(|(id, _)| id).collect() } - fn cleanup_witness_inputs_blobs(&mut self, pool: ConnectionPool) { + fn cleanup_witness_inputs_blobs(&self, pool: ConnectionPool) { let mut conn = pool.access_storage_blocking(); - let l1_batches_blob_urls_tuple = conn + let blob_urls = conn .blocks_dal() .get_merkle_tree_paths_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE); - let (l1_batch_numbers, merkle_tree_paths_blob_urls): (Vec<_>, Vec<_>) = - l1_batches_blob_urls_tuple.into_iter().unzip(); - - if !l1_batch_numbers.is_empty() { - vlog::info!( - "Found {} witness inputs for cleaning blobs", - l1_batch_numbers.len() - ); - } - - merkle_tree_paths_blob_urls.into_iter().for_each(|url| { - handle_remove_result(self.object_store.remove(WITNESS_INPUT_BUCKET_PATH, url)); - }); + let l1_batch_numbers = self.cleanup_blob_urls(Bucket::WitnessInput, blob_urls); conn.blocks_dal() .mark_gcs_blobs_as_cleaned(l1_batch_numbers); } @@ -81,104 +96,36 @@ impl GcsBlobCleaner { fn cleanup_leaf_aggregation_witness_jobs_blobs(&mut self, pool: ConnectionPool) { let mut conn = pool.access_storage_blocking(); - let l1_batches_blob_urls_tuple = conn + let blob_urls = conn .witness_generator_dal() .get_basic_circuit_and_circuit_inputs_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE); - let (l1_batch_numbers, basic_circuit_and_circuit_inputs_blob_urls): (Vec<_>, Vec<_>) = - l1_batches_blob_urls_tuple.into_iter().unzip(); - - if !l1_batch_numbers.is_empty() { - vlog::info!( - "Found {} leaf aggregation witness jobs for cleaning blobs", - l1_batch_numbers.len() - ); - } - - basic_circuit_and_circuit_inputs_blob_urls - .into_iter() - .for_each(|url_pair| { - handle_remove_result( - self.object_store - .remove(LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, url_pair.0), - ); - handle_remove_result( - self.object_store - .remove(LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, url_pair.1), - ); - }); - + let l1_batch_numbers = + self.cleanup_blob_urls(Bucket::LeafAggregationWitnessJobs, blob_urls); conn.witness_generator_dal() .mark_leaf_aggregation_gcs_blobs_as_cleaned(l1_batch_numbers); } fn cleanup_node_aggregation_witness_jobs_blobs(&mut self, pool: ConnectionPool) { let mut conn = pool.access_storage_blocking(); - let l1_batches_blob_urls_tuple = conn + let blob_urls = conn .witness_generator_dal() .get_leaf_layer_subqueues_and_aggregation_outputs_blob_urls_to_be_cleaned( BATCH_CLEANUP_SIZE, ); - - let (l1_batch_numbers, leaf_layer_subqueues_and_aggregation_outputs_blob_urls): ( - Vec<_>, - Vec<_>, - ) = l1_batches_blob_urls_tuple.into_iter().unzip(); - - if !l1_batch_numbers.is_empty() { - vlog::info!( - "Found {} node aggregation witness jobs for cleaning blobs", - l1_batch_numbers.len() - ); - } - - leaf_layer_subqueues_and_aggregation_outputs_blob_urls - .into_iter() - .for_each(|url_pair| { - handle_remove_result( - self.object_store - .remove(NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, url_pair.0), - ); - handle_remove_result( - self.object_store - .remove(NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, url_pair.1), - ); - }); + let l1_batch_numbers = + self.cleanup_blob_urls(Bucket::NodeAggregationWitnessJobs, blob_urls); conn.witness_generator_dal() .mark_node_aggregation_gcs_blobs_as_cleaned(l1_batch_numbers); } fn cleanup_scheduler_witness_jobs_blobs(&mut self, pool: ConnectionPool) { let mut conn = pool.access_storage_blocking(); - let l1_batches_blob_urls_tuple = conn + let blob_urls = conn .witness_generator_dal() .get_scheduler_witness_and_node_aggregations_blob_urls_to_be_cleaned( BATCH_CLEANUP_SIZE, ); - - let (l1_batch_numbers, scheduler_witness_and_node_aggregations_blob_urls): ( - Vec<_>, - Vec<_>, - ) = l1_batches_blob_urls_tuple.into_iter().unzip(); - - if !l1_batch_numbers.is_empty() { - vlog::info!( - "Found {} scheduler witness jobs for cleaning blobs", - l1_batch_numbers.len() - ); - } - - scheduler_witness_and_node_aggregations_blob_urls - .into_iter() - .for_each(|url_pair| { - handle_remove_result( - self.object_store - .remove(SCHEDULER_WITNESS_JOBS_BUCKET_PATH, url_pair.0), - ); - handle_remove_result( - self.object_store - .remove(SCHEDULER_WITNESS_JOBS_BUCKET_PATH, url_pair.1), - ); - }); + let l1_batch_numbers = self.cleanup_blob_urls(Bucket::SchedulerWitnessJobs, blob_urls); conn.witness_generator_dal() .mark_scheduler_witness_gcs_blobs_as_cleaned(l1_batch_numbers); } @@ -186,9 +133,12 @@ impl GcsBlobCleaner { impl PeriodicJob for GcsBlobCleaner { const SERVICE_NAME: &'static str = "GcsBlobCleaner"; - const POLLING_INTERVAL_MS: u64 = 5000; fn run_routine_task(&mut self, connection_pool: ConnectionPool) { self.cleanup_blobs(connection_pool); } + + fn polling_interval_ms(&self) -> u64 { + self.cleaning_interval_ms + } } diff --git a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs index c7284e986cee..27c2792ec1f5 100644 --- a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs @@ -2,8 +2,20 @@ use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; -#[derive(Debug, Default)] -pub struct GpuProverQueueMonitor {} +#[derive(Debug)] +pub struct GpuProverQueueMonitor { + synthesizer_per_gpu: u16, + reporting_interval_ms: u64, +} + +impl GpuProverQueueMonitor { + pub fn new(synthesizer_per_gpu: u16, reporting_interval_ms: u64) -> Self { + Self { + synthesizer_per_gpu, + reporting_interval_ms, + } + } +} /// Invoked periodically to push prover job statistics to Prometheus /// Note: these values will be used for auto-scaling circuit-synthesizer @@ -11,22 +23,32 @@ impl PeriodicJob for GpuProverQueueMonitor { const SERVICE_NAME: &'static str = "GpuProverQueueMonitor"; fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - let free_prover_instance_count = connection_pool + let prover_gpu_count_per_region_zone = connection_pool .access_storage_blocking() .gpu_prover_queue_dal() - .get_count_of_jobs_ready_for_processing(); + .get_prover_gpu_count_per_region_zone(); - if free_prover_instance_count > 0 { - vlog::info!( - "Found {} free circuit synthesizer jobs", - free_prover_instance_count + for ((region, zone), num_gpu) in prover_gpu_count_per_region_zone { + let synthesizers = self.synthesizer_per_gpu as u64 * num_gpu; + if synthesizers > 0 { + vlog::info!( + "Would be spawning {} circuit synthesizers in region {} zone {}", + synthesizers, + region, + zone + ); + } + metrics::gauge!( + "server.circuit_synthesizer.jobs", + synthesizers as f64, + "region" => region, + "zone" => zone, + "type" => "queued" ); } + } - metrics::gauge!( - "server.circuit_synthesizer.jobs", - free_prover_instance_count as f64, - "type" => "queued" - ); + fn polling_interval_ms(&self) -> u64 { + self.reporting_interval_ms } } diff --git a/core/bin/zksync_core/src/house_keeper/mod.rs b/core/bin/zksync_core/src/house_keeper/mod.rs index 54c708f9f3ab..d467b1287d4b 100644 --- a/core/bin/zksync_core/src/house_keeper/mod.rs +++ b/core/bin/zksync_core/src/house_keeper/mod.rs @@ -2,6 +2,7 @@ pub mod blocks_state_reporter; pub mod gcs_blob_cleaner; pub mod gpu_prover_queue_monitor; pub mod periodic_job; +pub mod prover_job_retry_manager; pub mod prover_queue_monitor; -pub mod witness_generator_misc_reporter; +pub mod waiting_to_queued_witness_job_mover; pub mod witness_generator_queue_monitor; diff --git a/core/bin/zksync_core/src/house_keeper/periodic_job.rs b/core/bin/zksync_core/src/house_keeper/periodic_job.rs index edded0668490..190e6e2b3fd8 100644 --- a/core/bin/zksync_core/src/house_keeper/periodic_job.rs +++ b/core/bin/zksync_core/src/house_keeper/periodic_job.rs @@ -6,11 +6,10 @@ use tokio::time::sleep; use zksync_dal::ConnectionPool; #[async_trait] -pub trait PeriodicJob { +pub trait PeriodicJob: Sync + Send { const SERVICE_NAME: &'static str; - const POLLING_INTERVAL_MS: u64 = 1000; - /// Runs the routine task periodically in `POLLING_INTERVAL_MS` frequency. + /// Runs the routine task periodically in [`Self::polling_interval_ms()`] frequency. fn run_routine_task(&mut self, connection_pool: ConnectionPool); async fn run(mut self, connection_pool: ConnectionPool) @@ -20,11 +19,13 @@ pub trait PeriodicJob { vlog::info!( "Starting periodic job: {} with frequency: {} ms", Self::SERVICE_NAME, - Self::POLLING_INTERVAL_MS + self.polling_interval_ms() ); loop { self.run_routine_task(connection_pool.clone()); - sleep(Duration::from_millis(Self::POLLING_INTERVAL_MS)).await; + sleep(Duration::from_millis(self.polling_interval_ms())).await; } } + + fn polling_interval_ms(&self) -> u64; } diff --git a/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs new file mode 100644 index 000000000000..b362e888a0d0 --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs @@ -0,0 +1,43 @@ +use std::time::Duration; + +use zksync_dal::ConnectionPool; + +use crate::house_keeper::periodic_job::PeriodicJob; + +#[derive(Debug)] +pub struct ProverJobRetryManager { + max_attempts: u32, + processing_timeout: Duration, + retry_interval_ms: u64, +} + +impl ProverJobRetryManager { + pub fn new(max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64) -> Self { + Self { + max_attempts, + processing_timeout, + retry_interval_ms, + } + } +} + +/// Invoked periodically to re-queue stuck prover jobs. +impl PeriodicJob for ProverJobRetryManager { + const SERVICE_NAME: &'static str = "ProverJobRetryManager"; + + fn run_routine_task(&mut self, connection_pool: ConnectionPool) { + let stuck_jobs = connection_pool + .access_storage_blocking() + .prover_dal() + .requeue_stuck_jobs(self.processing_timeout, self.max_attempts); + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + vlog::info!("re-queuing prover job {:?}", stuck_job); + } + metrics::counter!("server.prover.requeued_jobs", job_len as u64); + } + + fn polling_interval_ms(&self) -> u64 { + self.retry_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs index a8729957faea..3ad081646264 100644 --- a/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs @@ -1,21 +1,19 @@ -use std::collections::HashMap; - use zksync_config::configs::ProverGroupConfig; use zksync_dal::ConnectionPool; use zksync_prover_utils::circuit_name_to_numeric_index; -use zksync_types::proofs::JobCountStatistics; use crate::house_keeper::periodic_job::PeriodicJob; -const PROVER_SERVICE_NAME: &str = "prover"; - -#[derive(Debug, Default)] -pub struct ProverStatsReporter {} +#[derive(Debug)] +pub struct ProverStatsReporter { + reporting_interval_ms: u64, +} impl ProverStatsReporter { - fn get_job_statistics(connection_pool: ConnectionPool) -> HashMap { - let mut conn = connection_pool.access_storage_blocking(); - conn.prover_dal().get_prover_jobs_stats_per_circuit() + pub fn new(reporting_interval_ms: u64) -> Self { + Self { + reporting_interval_ms, + } } } @@ -26,31 +24,48 @@ impl PeriodicJob for ProverStatsReporter { fn run_routine_task(&mut self, connection_pool: ConnectionPool) { let prover_group_config = ProverGroupConfig::from_env(); - let stats = Self::get_job_statistics(connection_pool); - let prover_group_to_stats: HashMap = stats - .into_iter() - .map(|(key, value)| { - ( - prover_group_config - .get_group_id_for_circuit_id(circuit_name_to_numeric_index(&key).unwrap()) - .unwrap(), - value, - ) - }) - .collect(); - for (group_id, stats) in prover_group_to_stats.into_iter() { + let mut conn = connection_pool.access_storage_blocking(); + let stats = conn.prover_dal().get_prover_jobs_stats_per_circuit(); + + for (circuit_name, stats) in stats.into_iter() { + let group_id = prover_group_config + .get_group_id_for_circuit_id(circuit_name_to_numeric_index(&circuit_name).unwrap()) + .unwrap(); + metrics::gauge!( - format!("server.{}.jobs", PROVER_SERVICE_NAME), + "server.prover.jobs", stats.queued as f64, "type" => "queued", "prover_group_id" => group_id.to_string(), + "circuit_name" => circuit_name.clone(), + "circuit_type" => circuit_name_to_numeric_index(&circuit_name).unwrap().to_string() ); metrics::gauge!( - format!("server.{}.jobs", PROVER_SERVICE_NAME), + "server.prover.jobs", stats.in_progress as f64, - "type" => "in_progress", "prover_group_id" => group_id.to_string(), + "type" => "in_progress", + "prover_group_id" => group_id.to_string(), + "circuit_name" => circuit_name.clone(), + "circuit_type" => circuit_name_to_numeric_index(&circuit_name).unwrap().to_string() ); } + + if let Some(min_unproved_l1_batch_number) = conn.prover_dal().min_unproved_l1_batch_number() + { + metrics::gauge!("server.block_number", min_unproved_l1_batch_number.0 as f64, "stage" => "circuit_aggregation") + } + + let lag_by_circuit_type = conn + .prover_dal() + .min_unproved_l1_batch_number_by_basic_circuit_type(); + + for (circuit_type, l1_batch_number) in lag_by_circuit_type { + metrics::gauge!("server.block_number", l1_batch_number.0 as f64, "stage" => format!("circuit_{}", circuit_type)); + } + } + + fn polling_interval_ms(&self) -> u64 { + self.reporting_interval_ms } } diff --git a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs new file mode 100644 index 000000000000..db46f756490e --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs @@ -0,0 +1,88 @@ +use zksync_dal::ConnectionPool; + +use crate::house_keeper::periodic_job::PeriodicJob; + +#[derive(Debug)] +pub struct WaitingToQueuedWitnessJobMover { + job_moving_interval_ms: u64, +} + +impl WaitingToQueuedWitnessJobMover { + pub fn new(job_mover_interval_ms: u64) -> Self { + Self { + job_moving_interval_ms: job_mover_interval_ms, + } + } + + fn move_jobs(&mut self, pool: ConnectionPool) { + self.move_leaf_aggregation_jobs(pool.clone()); + self.move_node_aggregation_jobs(pool.clone()); + self.move_scheduler_jobs(pool); + } + + fn move_leaf_aggregation_jobs(&mut self, pool: ConnectionPool) { + let mut conn = pool.access_storage_blocking(); + let l1_batch_numbers = conn + .witness_generator_dal() + .move_leaf_aggregation_jobs_from_waiting_to_queued(); + let len = l1_batch_numbers.len(); + for l1_batch_number in l1_batch_numbers { + vlog::info!( + "Marked leaf aggregation job for l1_batch {} as queued", + l1_batch_number + ); + } + metrics::counter!( + "server.leaf_witness_generator.waiting_to_queued_jobs_transitions", + len as u64 + ); + } + + fn move_node_aggregation_jobs(&mut self, pool: ConnectionPool) { + let mut conn = pool.access_storage_blocking(); + let l1_batch_numbers = conn + .witness_generator_dal() + .move_node_aggregation_jobs_from_waiting_to_queued(); + let len = l1_batch_numbers.len(); + for l1_batch_number in l1_batch_numbers { + vlog::info!( + "Marking node aggregation job for l1_batch {} as queued", + l1_batch_number + ); + } + metrics::counter!( + "server.node_witness_generator.waiting_to_queued_jobs_transitions", + len as u64 + ); + } + + fn move_scheduler_jobs(&mut self, pool: ConnectionPool) { + let mut conn = pool.access_storage_blocking(); + let l1_batch_numbers = conn + .witness_generator_dal() + .move_scheduler_jobs_from_waiting_to_queued(); + let len = l1_batch_numbers.len(); + for l1_batch_number in l1_batch_numbers { + vlog::info!( + "Marking scheduler aggregation job for l1_batch {} as queued", + l1_batch_number + ); + } + metrics::counter!( + "server.scheduler_witness_generator.waiting_to_queued_jobs_transitions", + len as u64 + ); + } +} + +impl PeriodicJob for WaitingToQueuedWitnessJobMover { + const SERVICE_NAME: &'static str = "WaitingToQueuedWitnessJobMover"; + + fn run_routine_task(&mut self, connection_pool: ConnectionPool) { + self.move_jobs(connection_pool); + } + + fn polling_interval_ms(&self) -> u64 { + self.job_moving_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/witness_generator_misc_reporter.rs b/core/bin/zksync_core/src/house_keeper/witness_generator_misc_reporter.rs deleted file mode 100644 index ee01b66e4c57..000000000000 --- a/core/bin/zksync_core/src/house_keeper/witness_generator_misc_reporter.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::house_keeper::periodic_job::PeriodicJob; -use zksync_config::configs::{prover::ProverConfig, witness_generator::WitnessGeneratorConfig}; -use zksync_dal::ConnectionPool; - -#[derive(Debug)] -pub struct WitnessGeneratorMetricsReporter { - pub witness_generator_config: WitnessGeneratorConfig, - pub prover_config: ProverConfig, -} - -impl WitnessGeneratorMetricsReporter { - fn report_metrics(&self, connection_pool: ConnectionPool) { - let mut conn = connection_pool.access_storage_blocking(); - let last_sealed_l1_batch_number = conn.blocks_dal().get_sealed_block_number(); - let min_unproved_l1_batch_number = conn - .prover_dal() - .min_unproved_l1_batch_number(self.prover_config.max_attempts) - .unwrap_or(last_sealed_l1_batch_number); - let prover_lag = last_sealed_l1_batch_number.0 - min_unproved_l1_batch_number.0; - metrics::gauge!("server.prover.lag", prover_lag as f64); - } -} - -impl PeriodicJob for WitnessGeneratorMetricsReporter { - const SERVICE_NAME: &'static str = "WitnessGeneratorMiscReporter"; - - fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - self.report_metrics(connection_pool); - } -} diff --git a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs index 268d6d6a13d7..dddc14d1b1c4 100644 --- a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use zksync_dal::ConnectionPool; use zksync_types::proofs::{AggregationRound, JobCountStatistics}; @@ -5,24 +7,70 @@ use crate::house_keeper::periodic_job::PeriodicJob; const WITNESS_GENERATOR_SERVICE_NAME: &str = "witness_generator"; -#[derive(Debug, Default)] -pub struct WitnessGeneratorStatsReporter {} +#[derive(Debug)] +pub struct WitnessGeneratorStatsReporter { + reporting_interval_ms: u64, +} impl WitnessGeneratorStatsReporter { - fn get_job_statistics(connection_pool: ConnectionPool) -> JobCountStatistics { + pub fn new(reporting_interval_ms: u64) -> Self { + Self { + reporting_interval_ms, + } + } + + fn get_job_statistics( + connection_pool: ConnectionPool, + ) -> HashMap { let mut conn = connection_pool.access_storage_blocking(); - conn.witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::BasicCircuits) - + conn - .witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::LeafAggregation) - + conn - .witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::NodeAggregation) - + conn - .witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::Scheduler) + HashMap::from([ + ( + AggregationRound::BasicCircuits, + conn.witness_generator_dal() + .get_witness_jobs_stats(AggregationRound::BasicCircuits), + ), + ( + AggregationRound::LeafAggregation, + conn.witness_generator_dal() + .get_witness_jobs_stats(AggregationRound::LeafAggregation), + ), + ( + AggregationRound::NodeAggregation, + conn.witness_generator_dal() + .get_witness_jobs_stats(AggregationRound::NodeAggregation), + ), + ( + AggregationRound::Scheduler, + conn.witness_generator_dal() + .get_witness_jobs_stats(AggregationRound::Scheduler), + ), + ]) + } +} + +fn emit_metrics_for_round(round: AggregationRound, stats: JobCountStatistics) { + if stats.queued > 0 || stats.in_progress > 0 { + vlog::trace!( + "Found {} free and {} in progress {:?} witness generators jobs", + stats.queued, + stats.in_progress, + round + ); } + + metrics::gauge!( + format!("server.{}.jobs", WITNESS_GENERATOR_SERVICE_NAME), + stats.queued as f64, + "type" => "queued", + "round" => format!("{:?}", round) + ); + + metrics::gauge!( + format!("server.{}.jobs", WITNESS_GENERATOR_SERVICE_NAME), + stats.in_progress as f64, + "type" => "in_progress", + "round" => format!("{:?}", round) + ); } /// Invoked periodically to push job statistics to Prometheus @@ -31,22 +79,35 @@ impl PeriodicJob for WitnessGeneratorStatsReporter { const SERVICE_NAME: &'static str = "WitnessGeneratorStatsReporter"; fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - let stats = Self::get_job_statistics(connection_pool); + let stats_for_all_rounds = Self::get_job_statistics(connection_pool); + let mut aggregated = JobCountStatistics::default(); + for (round, stats) in stats_for_all_rounds { + emit_metrics_for_round(round, stats); + aggregated = aggregated + stats; + } - if stats.queued > 0 { - vlog::info!("Found {} free witness generators jobs", stats.queued); + if aggregated.queued > 0 { + vlog::trace!( + "Found {} free {} in progress witness generators jobs", + aggregated.queued, + aggregated.in_progress + ); } metrics::gauge!( format!("server.{}.jobs", WITNESS_GENERATOR_SERVICE_NAME), - stats.queued as f64, + aggregated.queued as f64, "type" => "queued" ); metrics::gauge!( format!("server.{}.jobs", WITNESS_GENERATOR_SERVICE_NAME), - stats.in_progress as f64, + aggregated.in_progress as f64, "type" => "in_progress" ); } + + fn polling_interval_ms(&self) -> u64 { + self.reporting_interval_ms + } } diff --git a/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/bounded_gas_adjuster.rs b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/bounded_gas_adjuster.rs new file mode 100644 index 000000000000..07e8dcc8f014 --- /dev/null +++ b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/bounded_gas_adjuster.rs @@ -0,0 +1,44 @@ +use crate::l1_gas_price::L1GasPriceProvider; +use std::fmt::Debug; +use std::sync::Arc; + +/// Gas adjuster that bounds the gas price to the specified value. +/// We need this to prevent the gas price from growing too much, because our bootloader is sensitive for the gas price and can fail if it's too high. +/// And for mainnet it's not the case, but for testnet we can have a situation when the gas price is too high. +pub struct BoundedGasAdjuster { + max_gas_price: u64, + default_gas_adjuster: Arc, +} + +impl Debug for BoundedGasAdjuster { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BoundedGasAdjuster") + .field("max_gas_price", &self.max_gas_price) + .finish() + } +} + +impl BoundedGasAdjuster { + pub fn new(max_gas_price: u64, default_gas_adjuster: Arc) -> Self { + Self { + max_gas_price, + default_gas_adjuster, + } + } +} + +impl L1GasPriceProvider for BoundedGasAdjuster { + fn estimate_effective_gas_price(&self) -> u64 { + let default_gas_price = self.default_gas_adjuster.estimate_effective_gas_price(); + if default_gas_price > self.max_gas_price { + vlog::warn!( + "Effective gas price is too high: {}, using max allowed: {}", + default_gas_price, + self.max_gas_price + ); + metrics::increment_counter!("server.state_keeper.gas_price_too_high"); + return self.max_gas_price; + } + default_gas_price + } +} diff --git a/core/bin/zksync_core/src/gas_adjuster/mod.rs b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs similarity index 89% rename from core/bin/zksync_core/src/gas_adjuster/mod.rs rename to core/bin/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs index 867861002eb0..8abeba5be7cb 100644 --- a/core/bin/zksync_core/src/gas_adjuster/mod.rs +++ b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs @@ -4,12 +4,13 @@ use std::collections::VecDeque; use std::sync::{Arc, RwLock}; use tokio::sync::watch::Receiver; -use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; use zksync_config::GasAdjusterConfig; -use zksync_eth_client::{clients::http_client::Error, EthInterface}; -use zksync_mempool::L2TxFilter; +use zksync_eth_client::{types::Error, EthInterface}; +use super::{L1GasPriceProvider, L1TxParamsProvider}; + +pub mod bounded_gas_adjuster; #[cfg(test)] mod tests; @@ -42,58 +43,6 @@ impl GasAdjuster { }) } - /// Returns the sum of base and priority fee, in wei, not considering time in mempool. - /// Can be used to get an estimate of current gas price. - pub fn estimate_effective_gas_price(&self) -> u64 { - if let Some(price) = self.config.internal_enforced_l1_gas_price { - return price; - } - - let effective_gas_price = self.get_base_fee(0) + self.get_priority_fee(); - - (self.config.internal_l1_pricing_multiplier * effective_gas_price as f64) as u64 - } - - // This is the method where we decide how much we are ready to pay for the - // base_fee based on the number of L1 blocks the transaction has been in the mempool. - // This is done in order to avoid base_fee spikes (e.g. during NFT drops) and - // smooth out base_fee increases in general. - // In other words, in order to pay less fees, we are ready to wait longer. - // But the longer we wait, the more we are ready to pay. - pub fn get_base_fee(&self, time_in_mempool: u32) -> u64 { - let a = self.config.pricing_formula_parameter_a; - let b = self.config.pricing_formula_parameter_b; - - // Currently we use an exponential formula. - // The alternative is a linear one: - // let scale_factor = a + b * time_in_mempool as f64; - let scale_factor = a * b.powf(time_in_mempool as f64); - let median = self.statistics.median(); - - metrics::gauge!("server.gas_adjuster.median_base_fee_per_gas", median as f64); - - let new_fee = median as f64 * scale_factor; - new_fee as u64 - } - - pub fn get_next_block_minimal_base_fee(&self) -> u64 { - let last_block_base_fee = self.statistics.last_added_value(); - - // The next block's base fee will decrease by a maximum of 12.5%. - last_block_base_fee * 875 / 1000 - } - - // Priority fee is set to constant, sourced from config. - // Reasoning behind this is the following: - // High priority_fee means high demand for block space, - // which means base_fee will increase, which means priority_fee - // will decrease. The EIP-1559 mechanism is designed such that - // base_fee will balance out priority_fee in such a way that - // priority_fee will be a small fraction of the overall fee. - pub fn get_priority_fee(&self) -> u64 { - self.config.default_priority_fee_per_gas - } - /// Performs an actualization routine for `GasAdjuster`. /// This method is intended to be invoked periodically. pub async fn keep_updated(&self) -> Result<(), Error> { @@ -130,18 +79,6 @@ impl GasAdjuster { Ok(()) } - pub fn l2_tx_filter(&self, fair_l2_gas_price: u64) -> L2TxFilter { - let effective_gas_price = self.estimate_effective_gas_price(); - - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(effective_gas_price, fair_l2_gas_price); - L2TxFilter { - l1_gas_price: effective_gas_price, - fee_per_gas: base_fee, - gas_per_pubdata: gas_per_pubdata as u32, - } - } - pub async fn run(self: Arc, stop_receiver: Receiver) { loop { if *stop_receiver.borrow() { @@ -158,6 +95,62 @@ impl GasAdjuster { } } +impl L1GasPriceProvider for GasAdjuster { + /// Returns the sum of base and priority fee, in wei, not considering time in mempool. + /// Can be used to get an estimate of current gas price. + fn estimate_effective_gas_price(&self) -> u64 { + if let Some(price) = self.config.internal_enforced_l1_gas_price { + return price; + } + + let effective_gas_price = self.get_base_fee(0) + self.get_priority_fee(); + + (self.config.internal_l1_pricing_multiplier * effective_gas_price as f64) as u64 + } +} + +impl L1TxParamsProvider for GasAdjuster { + // This is the method where we decide how much we are ready to pay for the + // base_fee based on the number of L1 blocks the transaction has been in the mempool. + // This is done in order to avoid base_fee spikes (e.g. during NFT drops) and + // smooth out base_fee increases in general. + // In other words, in order to pay less fees, we are ready to wait longer. + // But the longer we wait, the more we are ready to pay. + fn get_base_fee(&self, time_in_mempool: u32) -> u64 { + let a = self.config.pricing_formula_parameter_a; + let b = self.config.pricing_formula_parameter_b; + + // Currently we use an exponential formula. + // The alternative is a linear one: + // let scale_factor = a + b * time_in_mempool as f64; + let scale_factor = a * b.powf(time_in_mempool as f64); + let median = self.statistics.median(); + + metrics::gauge!("server.gas_adjuster.median_base_fee_per_gas", median as f64); + + let new_fee = median as f64 * scale_factor; + new_fee as u64 + } + + fn get_next_block_minimal_base_fee(&self) -> u64 { + let last_block_base_fee = self.statistics.last_added_value(); + + // The next block's base fee will decrease by a maximum of 12.5%. + last_block_base_fee * 875 / 1000 + } + + // Priority fee is set to constant, sourced from config. + // Reasoning behind this is the following: + // High priority_fee means high demand for block space, + // which means base_fee will increase, which means priority_fee + // will decrease. The EIP-1559 mechanism is designed such that + // base_fee will balance out priority_fee in such a way that + // priority_fee will be a small fraction of the overall fee. + fn get_priority_fee(&self) -> u64 { + self.config.default_priority_fee_per_gas + } +} + /// Helper structure responsible for collecting the data about recent transactions, /// calculating the median base fee. #[derive(Debug, Clone, Default)] diff --git a/core/bin/zksync_core/src/gas_adjuster/tests.rs b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs similarity index 93% rename from core/bin/zksync_core/src/gas_adjuster/tests.rs rename to core/bin/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs index 40b948a6ef9c..2c2164f8fb13 100644 --- a/core/bin/zksync_core/src/gas_adjuster/tests.rs +++ b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs @@ -1,12 +1,8 @@ -// Built-in uses +use super::{GasAdjuster, GasStatisticsInner}; use std::collections::VecDeque; use std::sync::Arc; -// Workspace uses use zksync_config::GasAdjusterConfig; use zksync_eth_client::clients::mock::MockEthereum; -// Local uses -use super::GasAdjuster; -use crate::gas_adjuster::GasStatisticsInner; /// Check that we compute the median correctly #[test] diff --git a/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs b/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs new file mode 100644 index 000000000000..b9b584172f2b --- /dev/null +++ b/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs @@ -0,0 +1,71 @@ +use std::{ + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + +use tokio::sync::watch::Receiver; + +use zksync_web3_decl::{ + jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + namespaces::ZksNamespaceClient, +}; + +use super::L1GasPriceProvider; + +const SLEEP_INTERVAL: Duration = Duration::from_secs(5); + +/// This structure maintains the known L1 gas price by periodically querying +/// the main node. +/// It is required since the main node doesn't only observe the current L1 gas price, +/// but also applies adjustments to it in order to smooth out the spikes. +/// The same algorithm cannot be consistently replicated on the external node side, +/// since it relies on the configuration, which may change. +#[derive(Debug)] +pub struct MainNodeGasPriceFetcher { + client: HttpClient, + gas_price: AtomicU64, +} + +impl MainNodeGasPriceFetcher { + pub fn new(main_node_url: &str) -> Self { + Self { + client: Self::build_client(main_node_url), + gas_price: AtomicU64::new(1u64), // Start with 1 wei until the first update. + } + } + + fn build_client(main_node_url: &str) -> HttpClient { + HttpClientBuilder::default() + .build(main_node_url) + .expect("Unable to create a main node client") + } + + pub async fn run(self: Arc, stop_receiver: Receiver) { + loop { + if *stop_receiver.borrow() { + vlog::info!("Stop signal received, MainNodeGasPriceFetcher is shutting down"); + break; + } + + let main_node_gas_price = match self.client.get_l1_gas_price().await { + Ok(price) => price, + Err(err) => { + vlog::warn!("Unable to get the gas price: {}", err); + continue; + } + }; + self.gas_price + .store(main_node_gas_price.as_u64(), Ordering::Relaxed); + tokio::time::sleep(SLEEP_INTERVAL).await; + } + } +} + +impl L1GasPriceProvider for MainNodeGasPriceFetcher { + fn estimate_effective_gas_price(&self) -> u64 { + self.gas_price.load(Ordering::Relaxed) + } +} diff --git a/core/bin/zksync_core/src/l1_gas_price/mod.rs b/core/bin/zksync_core/src/l1_gas_price/mod.rs new file mode 100644 index 000000000000..10cdf71940de --- /dev/null +++ b/core/bin/zksync_core/src/l1_gas_price/mod.rs @@ -0,0 +1,31 @@ +//! This module determines the fees to pay in txs containing blocks submitted to the L1. + +pub use gas_adjuster::bounded_gas_adjuster::BoundedGasAdjuster; +pub use gas_adjuster::GasAdjuster; +pub use main_node_fetcher::MainNodeGasPriceFetcher; + +mod gas_adjuster; +mod main_node_fetcher; + +/// Abstraction that provides information about the L1 gas price currently +/// observed by the application. +pub trait L1GasPriceProvider { + /// Returns a best guess of a realistic value for the L1 gas price. + /// Return value is in wei. + fn estimate_effective_gas_price(&self) -> u64; +} + +/// Extended version of `L1GasPriceProvider` that can provide parameters +/// to set the fee for an L1 transaction, taking the desired mining time into account. +/// +/// This trait, as a bound, should only be used in components that actually sign and send transactions. +pub trait L1TxParamsProvider: L1GasPriceProvider { + /// Returns the recommended `max_fee_per_gas` value (EIP1559). + fn get_base_fee(&self, time_in_mempool: u32) -> u64; + + /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559). + fn get_priority_fee(&self) -> u64; + + /// Returns a lower bound for the `base_fee` value for the next L1 block. + fn get_next_block_minimal_base_fee(&self) -> u64; +} diff --git a/core/bin/zksync_core/src/lib.rs b/core/bin/zksync_core/src/lib.rs index cb3938a49b87..6d0164eb5884 100644 --- a/core/bin/zksync_core/src/lib.rs +++ b/core/bin/zksync_core/src/lib.rs @@ -1,14 +1,15 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] +use std::future::Future; use std::str::FromStr; use std::sync::{Arc, Mutex}; +use std::time::Instant; use futures::channel::oneshot; use futures::future; -use std::time::Instant; +use tokio::runtime::Builder; use tokio::sync::watch; use tokio::task::JoinHandle; -use zksync_config::configs::WitnessGeneratorConfig; use house_keeper::periodic_job::PeriodicJob; use prometheus_exporter::run_prometheus_exporter; @@ -16,48 +17,64 @@ use zksync_circuit_breaker::{ facet_selectors::FacetSelectorsChecker, l1_txs::FailedL1TransactionChecker, vks::VksChecker, CircuitBreaker, CircuitBreakerChecker, CircuitBreakerError, }; -use zksync_config::ZkSyncConfig; + +use zksync_config::configs::house_keeper::HouseKeeperConfig; +use zksync_config::configs::{ProverGroupConfig, WitnessGeneratorConfig}; +use zksync_config::{ProverConfigs, ZkSyncConfig}; +use zksync_dal::healthcheck::ConnectionPoolHealthCheck; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_eth_client::clients::http_client::EthereumClient; -use zksync_eth_client::EthInterface; +use zksync_eth_client::clients::http::PKSigningClient; +use zksync_eth_client::BoundEthInterface; +use zksync_health_check::CheckHealth; use zksync_mempool::MempoolStore; -use zksync_object_store::object_store::create_object_store_from_env; +use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; +use zksync_types::proofs::AggregationRound; +use zksync_types::L2ChainId; +use crate::api_server::healthcheck; +use crate::api_server::tx_sender::{TxSender, TxSenderBuilder}; use crate::eth_sender::{Aggregator, EthTxManager}; use crate::fee_monitor::FeeMonitor; use crate::house_keeper::blocks_state_reporter::L1BatchMetricsReporter; use crate::house_keeper::gcs_blob_cleaner::GcsBlobCleaner; use crate::house_keeper::gpu_prover_queue_monitor::GpuProverQueueMonitor; use crate::house_keeper::{ - prover_queue_monitor::ProverStatsReporter, - witness_generator_misc_reporter::WitnessGeneratorMetricsReporter, + prover_job_retry_manager::ProverJobRetryManager, prover_queue_monitor::ProverStatsReporter, + waiting_to_queued_witness_job_mover::WaitingToQueuedWitnessJobMover, witness_generator_queue_monitor::WitnessGeneratorStatsReporter, }; -use crate::metadata_calculator::{MetadataCalculator, MetadataCalculatorMode}; +use crate::l1_gas_price::BoundedGasAdjuster; +use crate::l1_gas_price::L1GasPriceProvider; +use crate::metadata_calculator::{MetadataCalculator, TreeHealthCheck, TreeImplementation}; use crate::state_keeper::mempool_actor::MempoolFetcher; use crate::state_keeper::MempoolGuard; -use crate::witness_generator::WitnessGenerator; +use crate::witness_generator::basic_circuits::BasicWitnessGenerator; +use crate::witness_generator::leaf_aggregation::LeafAggregationWitnessGenerator; +use crate::witness_generator::node_aggregation::NodeAggregationWitnessGenerator; +use crate::witness_generator::scheduler::SchedulerWitnessGenerator; use crate::{ api_server::{explorer, web3}, data_fetchers::run_data_fetchers, eth_sender::EthTxAggregator, eth_watch::start_eth_watch, - gas_adjuster::GasAdjuster, + l1_gas_price::GasAdjuster, }; pub mod api_server; +pub mod block_reverter; +pub mod consistency_checker; pub mod data_fetchers; -pub mod db_storage_provider; pub mod eth_sender; pub mod eth_watch; pub mod fee_monitor; pub mod fee_ticker; -pub mod gas_adjuster; pub mod gas_tracker; pub mod genesis; pub mod house_keeper; +pub mod l1_gas_price; pub mod metadata_calculator; +pub mod reorg_detector; pub mod state_keeper; pub mod sync_layer; pub mod witness_generator; @@ -72,13 +89,13 @@ pub async fn wait_for_tasks(task_futures: Vec>, tasks_allowed_to_ if tasks_allowed_to_finish { vlog::info!("One of the actors finished its run. Finishing execution."); } else { - vlog::info!( + vlog::error!( "One of the actors finished its run, while it wasn't expected to do it" ); } } Err(error) => { - vlog::info!( + vlog::error!( "One of the tokio actors unexpectedly finished, shutting down: {:?}", error ); @@ -88,11 +105,37 @@ pub async fn wait_for_tasks(task_futures: Vec>, tasks_allowed_to_ /// Inserts the initial information about zkSync tokens into the database. pub async fn genesis_init(config: ZkSyncConfig) { - let mut storage = StorageProcessor::establish_connection(true).await; - genesis::ensure_genesis_state(&mut storage, &config).await; + let mut storage = StorageProcessor::establish_connection_blocking(true); + genesis::ensure_genesis_state( + &mut storage, + L2ChainId(config.chain.eth.zksync_network_id), + genesis::GenesisParams::MainNode { + // We consider the operator to be the first validator for now. + first_validator: config.eth_sender.sender.operator_commit_eth_addr, + }, + ) + .await; } -#[derive(Clone, Debug, PartialEq)] +/// Sets up an interrupt handler and returns a future that resolves once an interrupt signal +/// is received. +pub fn setup_sigint_handler() -> oneshot::Receiver<()> { + let (sigint_sender, sigint_receiver) = oneshot::channel(); + let mut sigint_sender = Some(sigint_sender); + ctrlc::set_handler(move || { + if let Some(sigint_sender) = sigint_sender.take() { + sigint_sender.send(()).ok(); + // ^ The send fails if `sigint_receiver` is dropped. We're OK with this, + // since at this point the node should be stopping anyway, or is not interested + // in listening to interrupt signals. + } + }) + .expect("Error setting Ctrl+C handler"); + + sigint_receiver +} + +#[derive(Debug, Clone, Copy, PartialEq)] pub enum Component { // Public Web3 API running on HTTP server. HttpApi, @@ -102,7 +145,9 @@ pub enum Component { ExplorerApi, // Metadata Calculator. Tree, + TreeNew, TreeLightweight, + TreeLightweightNew, TreeBackup, EthWatcher, // Eth tx generator @@ -113,8 +158,9 @@ pub enum Component { DataFetcher, // State keeper. StateKeeper, - // Witness Generator. The argument is a number of jobs to process. If None, runs indefinitely. - WitnessGenerator(Option), + // Witness Generator. The first argument is a number of jobs to process. If None, runs indefinitely. + // The second argument is the type of the witness-generation performed + WitnessGenerator(Option, AggregationRound), // Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, } @@ -136,14 +182,44 @@ impl FromStr for Components { "ws_api" => Ok(Components(vec![Component::WsApi])), "explorer_api" => Ok(Components(vec![Component::ExplorerApi])), "tree" => Ok(Components(vec![Component::Tree])), + "tree_new" => Ok(Components(vec![Component::TreeNew])), "tree_lightweight" => Ok(Components(vec![Component::TreeLightweight])), + "tree_lightweight_new" => Ok(Components(vec![Component::TreeLightweightNew])), "tree_backup" => Ok(Components(vec![Component::TreeBackup])), "data_fetcher" => Ok(Components(vec![Component::DataFetcher])), "state_keeper" => Ok(Components(vec![Component::StateKeeper])), "housekeeper" => Ok(Components(vec![Component::Housekeeper])), - "witness_generator" => Ok(Components(vec![Component::WitnessGenerator(None)])), - "one_shot_witness_generator" => { - Ok(Components(vec![Component::WitnessGenerator(Some(1))])) + "witness_generator" => Ok(Components(vec![ + Component::WitnessGenerator(None, AggregationRound::BasicCircuits), + Component::WitnessGenerator(None, AggregationRound::LeafAggregation), + Component::WitnessGenerator(None, AggregationRound::NodeAggregation), + Component::WitnessGenerator(None, AggregationRound::Scheduler), + ])), + "one_shot_witness_generator" => Ok(Components(vec![ + Component::WitnessGenerator(Some(1), AggregationRound::BasicCircuits), + Component::WitnessGenerator(Some(1), AggregationRound::LeafAggregation), + Component::WitnessGenerator(Some(1), AggregationRound::NodeAggregation), + Component::WitnessGenerator(Some(1), AggregationRound::Scheduler), + ])), + "one_shot_basic_witness_generator" => { + Ok(Components(vec![Component::WitnessGenerator( + Some(1), + AggregationRound::BasicCircuits, + )])) + } + "one_shot_leaf_witness_generator" => Ok(Components(vec![Component::WitnessGenerator( + Some(1), + AggregationRound::LeafAggregation, + )])), + "one_shot_node_witness_generator" => Ok(Components(vec![Component::WitnessGenerator( + Some(1), + AggregationRound::NodeAggregation, + )])), + "one_shot_scheduler_witness_generator" => { + Ok(Components(vec![Component::WitnessGenerator( + Some(1), + AggregationRound::Scheduler, + )])) } "eth" => Ok(Components(vec![ Component::EthWatcher, @@ -170,7 +246,7 @@ pub async fn initialize_components( vlog::info!("Starting the components: {:?}", components); let connection_pool = ConnectionPool::new(None, true); let replica_connection_pool = ConnectionPool::new(None, false); - + let mut healthchecks: Vec> = Vec::new(); let circuit_breaker_checker = CircuitBreakerChecker::new( circuit_breakers_for_components(&components, config), &config.chain.circuit_breaker, @@ -200,7 +276,7 @@ pub async fn initialize_components( .await, ); vlog::info!("initialized HTTP API in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "http_api"); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "http_api"); } if components.contains(&Component::WsApi) { @@ -216,23 +292,25 @@ pub async fn initialize_components( .await, ); vlog::info!("initialized WS API in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "ws_api"); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "ws_api"); } if components.contains(&Component::ExplorerApi) { let started_at = Instant::now(); vlog::info!("initializing explorer REST API"); task_futures.push(explorer::start_server_thread_detached( - config, + config.api.explorer.clone(), + config.contracts.l2_erc20_bridge_addr, + config.chain.state_keeper.fee_account_addr, connection_pool.clone(), - replica_connection_pool, + replica_connection_pool.clone(), stop_receiver.clone(), )); vlog::info!( "initialized explorer REST API in {:?}", started_at.elapsed() ); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "explorer_api"); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "explorer_api"); } if components.contains(&Component::StateKeeper) { @@ -240,29 +318,31 @@ pub async fn initialize_components( vlog::info!("initializing State Keeper"); let state_keeper_pool = ConnectionPool::new(Some(1), true); let next_priority_id = state_keeper_pool - .access_storage() - .await + .access_storage_blocking() .transactions_dal() .next_priority_id(); let mempool = MempoolGuard(Arc::new(Mutex::new(MempoolStore::new( next_priority_id, config.chain.mempool.capacity, )))); - let eth_gateway = EthereumClient::from_config(config); + let eth_gateway = PKSigningClient::from_config(config); let gas_adjuster = Arc::new( GasAdjuster::new(eth_gateway.clone(), config.eth_sender.gas_adjuster) .await .unwrap(), ); - task_futures.push(tokio::task::spawn( - gas_adjuster.clone().run(stop_receiver.clone()), + + let bounded_gas_adjuster = Arc::new(BoundedGasAdjuster::new( + config.chain.state_keeper.max_l1_gas_price(), + gas_adjuster.clone(), )); + task_futures.push(tokio::task::spawn(gas_adjuster.run(stop_receiver.clone()))); - let state_keeper_actor = crate::state_keeper::start_state_keeper( + let state_keeper_actor = state_keeper::start_state_keeper( config, &state_keeper_pool, mempool.clone(), - gas_adjuster.clone(), + bounded_gas_adjuster.clone(), stop_receiver.clone(), ); @@ -271,7 +351,7 @@ pub async fn initialize_components( })); let mempool_fetcher_pool = ConnectionPool::new(Some(1), true); - let mempool_fetcher_actor = MempoolFetcher::new(mempool, gas_adjuster, config); + let mempool_fetcher_actor = MempoolFetcher::new(mempool, bounded_gas_adjuster, config); task_futures.push(tokio::spawn(mempool_fetcher_actor.run( mempool_fetcher_pool, config.chain.mempool.remove_stuck_txs, @@ -282,20 +362,19 @@ pub async fn initialize_components( // Fee monitor is normally tied to a single instance of server, and it makes most sense to keep it together // with state keeper (since without state keeper running there should be no balance changes). - let fee_monitor_eth_gateway = EthereumClient::from_config(config); + let fee_monitor_eth_gateway = PKSigningClient::from_config(config); let fee_monitor_pool = ConnectionPool::new(Some(1), true); - let fee_monitor_actor = - FeeMonitor::new(config, fee_monitor_pool, fee_monitor_eth_gateway).await; + let fee_monitor_actor = FeeMonitor::new(config, fee_monitor_pool, fee_monitor_eth_gateway); task_futures.push(tokio::spawn(fee_monitor_actor.run())); vlog::info!("initialized State Keeper in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "state_keeper"); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "state_keeper"); } if components.contains(&Component::EthWatcher) { let started_at = Instant::now(); vlog::info!("initializing ETH-Watcher"); - let eth_gateway = EthereumClient::from_config(config); + let eth_gateway = PKSigningClient::from_config(config); let eth_watch_pool = ConnectionPool::new(Some(1), true); task_futures.push( start_eth_watch( @@ -307,14 +386,14 @@ pub async fn initialize_components( .await, ); vlog::info!("initialized ETH-Watcher in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "eth_watcher"); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "eth_watcher"); } if components.contains(&Component::EthTxAggregator) { let started_at = Instant::now(); vlog::info!("initializing ETH-TxAggregator"); let eth_sender_storage = ConnectionPool::new(Some(1), true); - let eth_gateway = EthereumClient::from_config(config); + let eth_gateway = PKSigningClient::from_config(config); let nonce = eth_gateway.pending_nonce("eth_sender").await.unwrap(); let eth_tx_aggregator_actor = EthTxAggregator::new( config.eth_sender.sender.clone(), @@ -328,14 +407,14 @@ pub async fn initialize_components( stop_receiver.clone(), ))); vlog::info!("initialized ETH-TxAggregator in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "eth_tx_aggregator"); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "eth_tx_aggregator"); } if components.contains(&Component::EthTxManager) { let started_at = Instant::now(); vlog::info!("initializing ETH-TxManager"); let eth_sender_storage = ConnectionPool::new(Some(1), true); - let eth_gateway = EthereumClient::from_config(config); + let eth_gateway = PKSigningClient::from_config(config); let gas_adjuster = Arc::new( GasAdjuster::new(eth_gateway.clone(), config.eth_sender.gas_adjuster) .await @@ -353,7 +432,7 @@ pub async fn initialize_components( tokio::spawn(gas_adjuster.run(stop_receiver.clone())), ]); vlog::info!("initialized ETH-TxManager in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "eth_tx_aggregator"); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "eth_tx_aggregator"); } if components.contains(&Component::DataFetcher) { @@ -365,97 +444,263 @@ pub async fn initialize_components( stop_receiver.clone(), )); vlog::info!("initialized data fetchers in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "data_fetchers"); - } - - if components.contains(&Component::Tree) { - let started_at = Instant::now(); - vlog::info!("initializing the tree"); - task_futures.extend(run_tree( - config, - stop_receiver.clone(), - MetadataCalculatorMode::Full, - )); - vlog::info!("initialized tree in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "tree"); - } - - if components.contains(&Component::TreeLightweight) { - task_futures.extend(run_tree( - config, - stop_receiver.clone(), - MetadataCalculatorMode::Lightweight, - )); - } - - if components.contains(&Component::TreeBackup) { - task_futures.extend(run_tree( - config, - stop_receiver.clone(), - MetadataCalculatorMode::Backup, - )); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "data_fetchers"); } - // We don't want witness generator to run on local nodes, as it's CPU heavy and is not stable yet - let is_local_setup = std::env::var("ZKSYNC_LOCAL_SETUP") == Ok("true".to_owned()); - if let Some(Component::WitnessGenerator(batch_size)) = components - .iter() - .find(|c| matches!(c, Component::WitnessGenerator(_))) - { - if !is_local_setup { - let started_at = Instant::now(); - vlog::info!( - "initializing the witness generator, batch size: {:?}", - batch_size - ); - let config = WitnessGeneratorConfig::from_env(); - let witness_generator = WitnessGenerator::new(config); - task_futures.push(tokio::spawn(witness_generator.run( - connection_pool.clone(), - stop_receiver.clone(), - *batch_size, - ))); - vlog::info!( - "initialized witness generator in {:?}", - started_at.elapsed() - ); - metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "witness_generator"); - } - } + let store_factory = ObjectStoreFactory::from_env(); + add_trees_to_task_futures( + &components, + config, + &store_factory, + &stop_receiver, + &mut task_futures, + &mut healthchecks, + ); + add_witness_generator_to_task_futures( + &components, + &connection_pool, + &store_factory, + &stop_receiver, + &mut task_futures, + ); if components.contains(&Component::Housekeeper) { - let witness_generator_misc_reporter = WitnessGeneratorMetricsReporter { - witness_generator_config: WitnessGeneratorConfig::from_env(), - prover_config: config.prover.non_gpu.clone(), - }; - let gcs_blob_cleaner = GcsBlobCleaner { - object_store: create_object_store_from_env(), - }; - let witness_generator_metrics = vec![ + let house_keeper_config = HouseKeeperConfig::from_env(); + let l1_batch_metrics_reporter = + L1BatchMetricsReporter::new(house_keeper_config.l1_batch_metrics_reporting_interval_ms); + let gcs_blob_cleaner = GcsBlobCleaner::new( + &store_factory, + house_keeper_config.blob_cleaning_interval_ms, + ); + let gpu_prover_queue = GpuProverQueueMonitor::new( + ProverGroupConfig::from_env().synthesizer_per_gpu, + house_keeper_config.gpu_prover_queue_reporting_interval_ms, + ); + let config = ProverConfigs::from_env().non_gpu; + let prover_job_retry_manager = ProverJobRetryManager::new( + config.max_attempts, + config.proof_generation_timeout(), + house_keeper_config.prover_job_retrying_interval_ms, + ); + let prover_stats_reporter = + ProverStatsReporter::new(house_keeper_config.prover_stats_reporting_interval_ms); + let waiting_to_queued_witness_job_mover = + WaitingToQueuedWitnessJobMover::new(house_keeper_config.witness_job_moving_interval_ms); + let witness_generator_stats_reporter = WitnessGeneratorStatsReporter::new( + house_keeper_config.witness_generator_stats_reporting_interval_ms, + ); + + let witness_generator_metrics = [ + tokio::spawn(witness_generator_stats_reporter.run(ConnectionPool::new(Some(1), true))), + tokio::spawn(gpu_prover_queue.run(ConnectionPool::new(Some(1), true))), + tokio::spawn(gcs_blob_cleaner.run(ConnectionPool::new(Some(1), true))), + tokio::spawn(l1_batch_metrics_reporter.run(ConnectionPool::new(Some(1), true))), + tokio::spawn(prover_stats_reporter.run(ConnectionPool::new(Some(1), true))), tokio::spawn( - WitnessGeneratorStatsReporter::default().run(ConnectionPool::new(Some(1), true)), + waiting_to_queued_witness_job_mover.run(ConnectionPool::new(Some(1), true)), ), - tokio::spawn(witness_generator_misc_reporter.run(ConnectionPool::new(Some(1), true))), - tokio::spawn(GpuProverQueueMonitor::default().run(ConnectionPool::new(Some(1), true))), - tokio::spawn(gcs_blob_cleaner.run(ConnectionPool::new(Some(1), true))), - tokio::spawn(L1BatchMetricsReporter::default().run(ConnectionPool::new(Some(1), true))), - tokio::spawn(ProverStatsReporter::default().run(ConnectionPool::new(Some(1), true))), + tokio::spawn(prover_job_retry_manager.run(ConnectionPool::new(Some(1), true))), ]; task_futures.extend(witness_generator_metrics); } + // Run healthcheck server for all components. + healthchecks.push(Box::new(ConnectionPoolHealthCheck::new( + replica_connection_pool, + ))); + task_futures.push(healthcheck::start_server_thread_detached( + config.api.healthcheck.bind_addr(), + healthchecks, + stop_receiver, + )); + Ok((task_futures, stop_sender, cb_receiver)) } +fn add_trees_to_task_futures( + components: &[Component], + config: &ZkSyncConfig, + store_factory: &ObjectStoreFactory, + stop_receiver: &watch::Receiver, + task_futures: &mut Vec>, + healthchecks: &mut Vec>, +) { + const COMPONENTS_TO_MODES: &[(Component, bool, TreeImplementation)] = &[ + (Component::Tree, true, TreeImplementation::Old), + (Component::TreeNew, true, TreeImplementation::New), + (Component::TreeLightweight, false, TreeImplementation::Old), + ( + Component::TreeLightweightNew, + false, + TreeImplementation::New, + ), + ]; + + if components.contains(&Component::TreeBackup) { + panic!("Tree backup mode is disabled"); + } + if components.contains(&Component::TreeNew) + && components.contains(&Component::TreeLightweightNew) + { + panic!( + "Cannot start a node with a new tree in both full and lightweight modes. \ + Since the storage layout is mode-independent, choose either of modes and run \ + the node with it." + ); + } + + for &(component, is_full, implementation) in COMPONENTS_TO_MODES { + if components.contains(&component) { + let store_factory = is_full.then_some(store_factory); + let (future, tree_health_check) = + run_tree(config, store_factory, stop_receiver.clone(), implementation); + task_futures.push(future); + healthchecks.push(Box::new(tree_health_check)); + } + } +} + fn run_tree( config: &ZkSyncConfig, + store_factory: Option<&ObjectStoreFactory>, stop_receiver: watch::Receiver, - mode: MetadataCalculatorMode, -) -> Vec> { - let metadata_calculator = MetadataCalculator::new(config, mode); - let pool = ConnectionPool::new(Some(1), true); - vec![tokio::spawn(metadata_calculator.run(pool, stop_receiver))] + implementation: TreeImplementation, +) -> (JoinHandle<()>, TreeHealthCheck) { + let started_at = Instant::now(); + vlog::info!( + "initializing Merkle tree with {:?} implementation in {} mode", + implementation, + if store_factory.is_some() { + "full" + } else { + "lightweight" + } + ); + + let metadata_calculator = if let Some(factory) = store_factory { + MetadataCalculator::full(config, factory, implementation) + } else { + MetadataCalculator::lightweight(config, implementation) + }; + let tree_health_check = metadata_calculator.tree_health_check(); + let tree_tag = metadata_calculator.tree_tag(); + let future = tokio::task::spawn_blocking(|| { + let pool = ConnectionPool::new(Some(1), true); + metadata_calculator.run(&pool, stop_receiver); + }); + + vlog::info!( + "initialized `{}` tree in {:?}", + tree_tag, + started_at.elapsed() + ); + metrics::gauge!( + "server.init.latency", + started_at.elapsed(), + "stage" => "tree", + "tree" => tree_tag + ); + (future, tree_health_check) +} + +fn add_witness_generator_to_task_futures( + components: &[Component], + connection_pool: &ConnectionPool, + store_factory: &ObjectStoreFactory, + stop_receiver: &watch::Receiver, + task_futures: &mut Vec>, +) { + // We don't want witness generator to run on local nodes, as it's CPU heavy. + if std::env::var("ZKSYNC_LOCAL_SETUP") == Ok("true".to_owned()) { + return; + } + + let generator_params = components.iter().filter_map(|component| { + if let Component::WitnessGenerator(batch_size, component_type) = component { + Some((*batch_size, *component_type)) + } else { + None + } + }); + + for (batch_size, component_type) in generator_params { + let started_at = Instant::now(); + vlog::info!( + "initializing the {:?} witness generator, batch size: {:?}", + component_type, + batch_size + ); + + let config = WitnessGeneratorConfig::from_env(); + let task = match component_type { + AggregationRound::BasicCircuits => { + let witness_generator = BasicWitnessGenerator::new(config, store_factory); + tokio::spawn(witness_generator.run( + connection_pool.clone(), + stop_receiver.clone(), + batch_size, + )) + } + AggregationRound::LeafAggregation => { + let witness_generator = LeafAggregationWitnessGenerator::new(config, store_factory); + tokio::spawn(witness_generator.run( + connection_pool.clone(), + stop_receiver.clone(), + batch_size, + )) + } + AggregationRound::NodeAggregation => { + let witness_generator = NodeAggregationWitnessGenerator::new(config, store_factory); + tokio::spawn(witness_generator.run( + connection_pool.clone(), + stop_receiver.clone(), + batch_size, + )) + } + AggregationRound::Scheduler => { + let witness_generator = SchedulerWitnessGenerator::new(config, store_factory); + tokio::spawn(witness_generator.run( + connection_pool.clone(), + stop_receiver.clone(), + batch_size, + )) + } + }; + task_futures.push(task); + + vlog::info!( + "initialized {:?} witness generator in {:?}", + component_type, + started_at.elapsed() + ); + metrics::gauge!( + "server.init.latency", + started_at.elapsed(), + "stage" => format!("witness_generator_{:?}", component_type) + ); + } +} + +fn build_tx_sender( + config: &ZkSyncConfig, + replica_pool: ConnectionPool, + master_pool: ConnectionPool, + l1_gas_price_provider: Arc, +) -> TxSender { + let mut tx_sender_builder = TxSenderBuilder::new(config.clone().into(), replica_pool) + .with_main_connection_pool(master_pool) + .with_state_keeper_config(config.chain.state_keeper.clone()); + + // Add rate limiter if enabled. + if let Some(transactions_per_sec_limit) = config.api.web3_json_rpc.transactions_per_sec_limit { + tx_sender_builder = tx_sender_builder.with_rate_limiter(transactions_per_sec_limit); + }; + + tx_sender_builder.build( + l1_gas_price_provider, + config.chain.state_keeper.default_aa_hash, + ) } async fn run_http_api( @@ -464,22 +709,45 @@ async fn run_http_api( replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> Vec> { - let eth_gateway = EthereumClient::from_config(config); + let eth_gateway = PKSigningClient::from_config(config); let gas_adjuster = Arc::new( GasAdjuster::new(eth_gateway.clone(), config.eth_sender.gas_adjuster) .await .unwrap(), ); - vec![ - web3::start_http_rpc_server_old( - master_connection_pool, - replica_connection_pool, - config, - stop_receiver.clone(), - gas_adjuster.clone(), - ), - tokio::spawn(gas_adjuster.run(stop_receiver)), - ] + let bounded_gas_adjuster = Arc::new(BoundedGasAdjuster::new( + config.chain.state_keeper.max_l1_gas_price(), + gas_adjuster.clone(), + )); + + let tx_sender = build_tx_sender( + config, + replica_connection_pool.clone(), + master_connection_pool.clone(), + bounded_gas_adjuster, + ); + + let mut handles = { + let mut builder = + web3::ApiBuilder::jsonrpc_backend(config.clone().into(), replica_connection_pool) + .http(config.api.web3_json_rpc.http_port) + .with_filter_limit(config.api.web3_json_rpc.filters_limit()) + .with_threads(config.api.web3_json_rpc.threads_per_server as usize) + .with_tx_sender(tx_sender); + + if config.chain.state_keeper.save_call_traces { + builder = builder.enable_debug_namespace( + config.chain.state_keeper.base_system_contracts_hashes(), + config.chain.state_keeper.fair_l2_gas_price, + config.api.web3_json_rpc.vm_execution_cache_misses_limit, + ) + } + + builder.build(stop_receiver.clone()) + }; + + handles.push(tokio::spawn(gas_adjuster.run(stop_receiver))); + handles } async fn run_ws_api( @@ -488,19 +756,36 @@ async fn run_ws_api( replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> Vec> { - let eth_gateway = EthereumClient::from_config(config); + let eth_gateway = PKSigningClient::from_config(config); let gas_adjuster = Arc::new( GasAdjuster::new(eth_gateway.clone(), config.eth_sender.gas_adjuster) .await .unwrap(), ); - web3::start_ws_rpc_server_old( - master_connection_pool, - replica_connection_pool, + + let bounded_gas_adjuster = Arc::new(BoundedGasAdjuster::new( + config.chain.state_keeper.max_l1_gas_price(), + gas_adjuster.clone(), + )); + + let tx_sender = build_tx_sender( config, - stop_receiver, - gas_adjuster, - ) + replica_connection_pool.clone(), + master_connection_pool.clone(), + bounded_gas_adjuster, + ); + + let mut tasks = + web3::ApiBuilder::jsonrpc_backend(config.clone().into(), replica_connection_pool) + .ws(config.api.web3_json_rpc.ws_port) + .with_filter_limit(config.api.web3_json_rpc.filters_limit()) + .with_subscriptions_limit(config.api.web3_json_rpc.subscriptions_limit()) + .with_polling_interval(config.api.web3_json_rpc.pubsub_interval()) + .with_tx_sender(tx_sender) + .build(stop_receiver.clone()); + + tasks.push(tokio::spawn(gas_adjuster.run(stop_receiver))); + tasks } fn circuit_breakers_for_components( @@ -529,15 +814,50 @@ fn circuit_breakers_for_components( | Component::TreeBackup ) }) { - circuit_breakers.push(Box::new(VksChecker::new(config))); + let eth_client = PKSigningClient::from_config(config); + circuit_breakers.push(Box::new(VksChecker::new( + &config.chain.circuit_breaker, + eth_client, + ))); } if components .iter() .any(|c| matches!(c, Component::EthTxAggregator | Component::EthTxManager)) { - circuit_breakers.push(Box::new(FacetSelectorsChecker::new(config))); + let eth_client = PKSigningClient::from_config(config); + circuit_breakers.push(Box::new(FacetSelectorsChecker::new( + &config.chain.circuit_breaker, + eth_client, + ))); } circuit_breakers } + +pub fn block_on(future: F) -> F::Output +where + F::Output: Send, +{ + std::thread::spawn(move || { + let runtime = Builder::new_current_thread() + .enable_all() + .build() + .expect("tokio runtime creation failed"); + runtime.block_on(future) + }) + .join() + .unwrap() +} + +#[tokio::test] +async fn test_house_keeper_components_get_added() { + let config = ZkSyncConfig::from_env(); + let (core_task_handles, _, _) = + initialize_components(&config, vec![Component::Housekeeper], false) + .await + .unwrap(); + // circuit-breaker, prometheus-exporter, healthcheck components are run, irrespective of other components. + let always_running_component_count = 3; + assert_eq!(7, core_task_handles.len() - always_running_component_count); +} diff --git a/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs b/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs new file mode 100644 index 000000000000..32feec5fafd2 --- /dev/null +++ b/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs @@ -0,0 +1,37 @@ +use tokio::sync::watch; +use zksync_health_check::{CheckHealth, CheckHealthStatus}; + +use super::{MetadataCalculatorMode, MetadataCalculatorStatus}; + +/// HealthCheck used to verify if the tree(MetadataCalculator) is ready. +/// This guarantees that we mark a tree as ready only when it can start processing blocks. +/// Used in the /health endpoint +#[derive(Clone, Debug)] +pub struct TreeHealthCheck { + receiver: watch::Receiver, + tree_mode: MetadataCalculatorMode, +} + +impl TreeHealthCheck { + pub(super) fn new( + receiver: watch::Receiver, + tree_mode: MetadataCalculatorMode, + ) -> TreeHealthCheck { + TreeHealthCheck { + receiver, + tree_mode, + } + } +} + +impl CheckHealth for TreeHealthCheck { + fn check_health(&self) -> CheckHealthStatus { + match *self.receiver.borrow() { + MetadataCalculatorStatus::Ready => CheckHealthStatus::Ready, + MetadataCalculatorStatus::NotReady => CheckHealthStatus::NotReady(format!( + "{} tree is not ready", + self.tree_mode.as_tag() + )), + } + } +} diff --git a/core/bin/zksync_core/src/metadata_calculator/helpers.rs b/core/bin/zksync_core/src/metadata_calculator/helpers.rs new file mode 100644 index 000000000000..b7eec35dd926 --- /dev/null +++ b/core/bin/zksync_core/src/metadata_calculator/helpers.rs @@ -0,0 +1,194 @@ +//! Various helpers for the metadata calculator. + +#[cfg(test)] +use std::sync::mpsc; +use std::{collections::BTreeMap, thread, time::Duration}; + +use zksync_dal::StorageProcessor; +use zksync_merkle_tree::{TreeMetadata, TreeMode, ZkSyncTree as OldTree}; +use zksync_merkle_tree2::domain::{TreeMetadata as NewTreeMetadata, ZkSyncTree as NewTree}; +use zksync_types::{ + block::WitnessBlockWithLogs, L1BatchNumber, StorageKey, StorageLog, StorageLogKind, + WitnessStorageLog, H256, +}; + +/// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. +#[derive(Debug)] +pub(super) enum ZkSyncTree { + Old(OldTree), + New(NewTree), +} + +impl ZkSyncTree { + pub fn map_metadata(new_metadata: NewTreeMetadata) -> TreeMetadata { + TreeMetadata { + root_hash: new_metadata.root_hash, + rollup_last_leaf_index: new_metadata.rollup_last_leaf_index, + initial_writes: new_metadata.initial_writes, + repeated_writes: new_metadata.repeated_writes, + witness_input: new_metadata.witness, + } + } + + pub fn is_empty(&self) -> bool { + match self { + Self::Old(tree) => tree.is_empty(), + Self::New(tree) => tree.is_empty(), + } + } + + pub fn block_number(&self) -> u32 { + match self { + Self::Old(tree) => tree.block_number(), + Self::New(tree) => tree.block_number(), + } + } + + pub fn root_hash(&self) -> H256 { + match self { + Self::Old(tree) => tree.root_hash(), + Self::New(tree) => tree.root_hash(), + } + } + + pub fn process_block(&mut self, block: &[WitnessStorageLog]) -> TreeMetadata { + match self { + Self::Old(tree) => tree.process_block(block), + Self::New(tree) => { + tree.reset(); // For compatibility with the old implementation + let new_metadata = tree.process_block(block); + Self::map_metadata(new_metadata) + } + } + } + + pub fn process_blocks<'a>( + &mut self, + blocks: impl Iterator, + ) -> Vec { + match self { + Self::Old(tree) => { + let mode = tree.mode(); + let blocks = blocks.map(|logs| Self::filter_block_logs(logs, mode)); + tree.process_blocks(blocks) + } + Self::New(tree) => { + tree.reset(); // For compatibility with the old implementation + blocks + .map(|block| Self::map_metadata(tree.process_block(block))) + .collect() + } + } + } + + fn filter_block_logs( + logs: &[WitnessStorageLog], + mode: TreeMode, + ) -> impl Iterator + '_ { + logs.iter().filter(move |log| { + matches!(mode, TreeMode::Full) || log.storage_log.kind == StorageLogKind::Write + }) + } + + pub fn save(&mut self) { + match self { + Self::Old(tree) => tree.save().expect("failed saving Merkle tree"), + Self::New(tree) => tree.save(), + } + } +} + +/// Component implementing the delay policy in [`MetadataCalculator`] when there are no +/// blocks to seal. +#[derive(Debug, Clone)] +pub(super) struct Delayer { + delay_interval: Duration, + // Notifies the tests about the block count and tree root hash when the calculator + // runs out of blocks to process. (Since RocksDB is exclusive, we cannot just create + // another instance to check these params on the test side without stopping the calc.) + #[cfg(test)] + pub delay_notifier: mpsc::Sender<(u32, H256)>, +} + +impl Delayer { + pub fn new(delay_interval: Duration) -> Self { + Self { + delay_interval, + #[cfg(test)] + delay_notifier: mpsc::channel().0, + } + } + + #[cfg_attr(not(test), allow(unused))] // `tree` is only used in test mode + pub fn wait(&self, tree: &ZkSyncTree) { + #[cfg(test)] + self.delay_notifier + .send((tree.block_number(), tree.root_hash())) + .ok(); + + thread::sleep(self.delay_interval); + } +} + +pub(crate) fn get_logs_for_l1_batch( + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, +) -> Option { + let header = storage.blocks_dal().get_block_header(l1_batch_number)?; + + // `BTreeMap` is used because tree needs to process slots in lexicographical order. + let mut storage_logs: BTreeMap = BTreeMap::new(); + + let protective_reads = storage + .storage_logs_dedup_dal() + .get_protective_reads_for_l1_batch(l1_batch_number); + let touched_slots = storage + .storage_logs_dal() + .get_touched_slots_for_l1_batch(l1_batch_number); + + let hashed_keys = protective_reads + .iter() + .chain(touched_slots.keys()) + .map(StorageKey::hashed_key) + .collect(); + let previous_values = storage + .storage_logs_dal() + .get_previous_storage_values(hashed_keys, l1_batch_number); + + for storage_key in protective_reads { + let previous_value = previous_values[&storage_key.hashed_key()]; + // Sanity check: value must not change for slots that require protective reads. + if let Some(value) = touched_slots.get(&storage_key) { + assert_eq!( + previous_value, *value, + "Value was changed for slot that requires protective read" + ); + } + + storage_logs.insert( + storage_key, + WitnessStorageLog { + storage_log: StorageLog::new_read_log(storage_key, previous_value), + previous_value, + }, + ); + } + + for (storage_key, value) in touched_slots { + let previous_value = previous_values[&storage_key.hashed_key()]; + if previous_value != value { + storage_logs.insert( + storage_key, + WitnessStorageLog { + storage_log: StorageLog::new_write_log(storage_key, value), + previous_value, + }, + ); + } + } + + Some(WitnessBlockWithLogs { + header, + storage_logs: storage_logs.into_values().collect(), + }) +} diff --git a/core/bin/zksync_core/src/metadata_calculator/metrics.rs b/core/bin/zksync_core/src/metadata_calculator/metrics.rs new file mode 100644 index 000000000000..ebf6ae7216dc --- /dev/null +++ b/core/bin/zksync_core/src/metadata_calculator/metrics.rs @@ -0,0 +1,143 @@ +//! Metrics for `MetadataCalculator`. + +use std::time::Instant; + +use zksync_types::block::L1BatchHeader; +use zksync_utils::time::seconds_since_epoch; + +use super::{MetadataCalculator, MetadataCalculatorMode}; + +#[derive(Debug, Clone, Copy)] +pub(super) enum TreeUpdateStage { + LoadChanges, + Compute, + PrepareResults, + ReestimateGasCost, + SavePostgres, + SaveRocksDB, + SaveWitnesses, + _Backup, +} + +impl TreeUpdateStage { + pub fn as_str(self) -> &'static str { + match self { + Self::LoadChanges => "load_changes", + Self::Compute => "compute", + Self::PrepareResults => "prepare_results", + Self::ReestimateGasCost => "reestimate_block_commit_gas_cost", + Self::SavePostgres => "save_postgres", + Self::SaveRocksDB => "save_rocksdb", + Self::SaveWitnesses => "save_gcs", + Self::_Backup => "backup_tree", + } + } + + pub fn start(self) -> UpdateTreeLatency { + UpdateTreeLatency { + stage: self, + start: Instant::now(), + } + } + + pub fn run(self, action: impl FnOnce() -> T) -> T { + let latency = self.start(); + let output = action(); + latency.report(); + output + } +} + +/// Latency metric for a certain stage of the tree update. +#[derive(Debug)] +#[must_use = "Tree latency should be `report`ed"] +pub(super) struct UpdateTreeLatency { + stage: TreeUpdateStage, + start: Instant, +} + +impl UpdateTreeLatency { + pub fn report(self) { + metrics::histogram!( + "server.metadata_calculator.update_tree.latency.stage", + self.start.elapsed(), + "stage" => self.stage.as_str() + ); + } +} + +impl MetadataCalculator { + pub(super) fn update_metrics( + mode: MetadataCalculatorMode, + block_headers: &[L1BatchHeader], + total_logs: usize, + start: Instant, + ) { + let mode_tag = mode.as_tag(); + let tree_implementation = mode.tree_implementation(); + let tree_tag = tree_implementation.as_tag(); + + metrics::histogram!( + "server.metadata_calculator.update_tree.latency", + start.elapsed(), + "tree" => tree_tag + ); + if total_logs > 0 { + metrics::histogram!( + "server.metadata_calculator.update_tree.per_log.latency", + start.elapsed().div_f32(total_logs as f32), + "tree" => tree_tag + ); + } + + let total_tx: usize = block_headers.iter().map(|block| block.tx_count()).sum(); + let total_l1_tx: u64 = block_headers + .iter() + .map(|block| u64::from(block.l1_tx_count)) + .sum(); + metrics::counter!( + "server.processed_txs", + total_tx as u64, + "stage" => "tree", + "tree" => tree_tag + ); + metrics::counter!( + "server.processed_l1_txs", + total_l1_tx, + "stage" => "tree", + "tree" => tree_tag + ); + metrics::histogram!( + "server.metadata_calculator.log_batch", + total_logs as f64, + "tree" => tree_tag + ); + metrics::histogram!( + "server.metadata_calculator.blocks_batch", + block_headers.len() as f64, + "tree" => tree_tag + ); + + let last_block_number = block_headers.last().unwrap().number.0; + vlog::info!( + "block {:?} processed in {} tree", + last_block_number, + tree_tag + ); + metrics::gauge!( + "server.block_number", + last_block_number as f64, + "stage" => format!("tree_{}_mode", mode_tag), + "tree" => tree_tag + ); + + let latency = + seconds_since_epoch().saturating_sub(block_headers.first().unwrap().timestamp); + metrics::histogram!( + "server.block_latency", + latency as f64, + "stage" => format!("tree_{}_mode", mode_tag), + "tree" => tree_tag + ); + } +} diff --git a/core/bin/zksync_core/src/metadata_calculator/mod.rs b/core/bin/zksync_core/src/metadata_calculator/mod.rs index 2a0c5013426e..bb685b25ad5e 100644 --- a/core/bin/zksync_core/src/metadata_calculator/mod.rs +++ b/core/bin/zksync_core/src/metadata_calculator/mod.rs @@ -1,359 +1,181 @@ //! This module applies updates to the ZkSyncTree, calculates metadata for sealed blocks, and //! stores them in the DB. -use std::collections::BTreeMap; -use std::time::Instant; - use tokio::sync::watch; +use std::time::Duration; + use zksync_config::{DBConfig, ZkSyncConfig}; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_merkle_tree::{TreeMetadata, TreeMode, ZkSyncTree}; -use zksync_object_store::gcs_utils::merkle_tree_paths_blob_url; -use zksync_object_store::object_store::{ - create_object_store_from_env, DynamicObjectStore, WITNESS_INPUT_BUCKET_PATH, +use zksync_merkle_tree::TreeMetadata; +use zksync_object_store::ObjectStoreFactory; +use zksync_storage::{ + db::Database, + rocksdb::{ + backup::{BackupEngine, BackupEngineOptions, RestoreOptions}, + Options, DB, + }, + RocksDB, }; -use zksync_storage::db::Database; -use zksync_storage::rocksdb::backup::{BackupEngine, BackupEngineOptions, RestoreOptions}; -use zksync_storage::rocksdb::{Options, DB}; -use zksync_storage::RocksDB; -use zksync_types::block::L1BatchHeader; -use zksync_types::commitment::{BlockCommitment, BlockMetadata, BlockWithMetadata}; use zksync_types::{ - block::WitnessBlockWithLogs, L1BatchNumber, StorageKey, StorageLog, StorageLogKind, - WitnessStorageLog, H256, + block::L1BatchHeader, + commitment::{BlockCommitment, BlockMetadata, BlockWithMetadata}, }; -use zksync_utils::time::seconds_since_epoch; +mod healthcheck; +mod helpers; +mod metrics; #[cfg(test)] mod tests; +mod updater; -#[derive(Debug)] -pub struct MetadataCalculator { - #[cfg_attr(test, allow(dead_code))] - delay_interval: std::time::Duration, - tree: ZkSyncTree, - config: DBConfig, - mode: MetadataCalculatorMode, - object_store: DynamicObjectStore, -} +pub use self::healthcheck::TreeHealthCheck; +pub(crate) use self::helpers::get_logs_for_l1_batch; +use self::{helpers::Delayer, metrics::TreeUpdateStage, updater::TreeUpdater}; -#[derive(Debug, Copy, Clone, PartialEq)] -pub enum MetadataCalculatorMode { - Full, - Lightweight, - Backup, +#[derive(Debug, Copy, Clone)] +pub enum TreeImplementation { + Old, + New, } -impl From for TreeMode { - fn from(mode: MetadataCalculatorMode) -> Self { - match mode { - MetadataCalculatorMode::Lightweight => TreeMode::Lightweight, - _ => TreeMode::Full, +impl TreeImplementation { + fn as_tag(self) -> &'static str { + match self { + Self::Old => "old", + Self::New => "new", } } } -impl MetadataCalculator { - pub fn new(config: &ZkSyncConfig, mode: MetadataCalculatorMode) -> Self { - { - let db = RocksDB::new( - Database::MerkleTree, - Self::rocksdb_path(&config.db, mode), - false, - ); - let tree = ZkSyncTree::new(db); - if tree.is_empty() { - Self::restore_from_backup(&config.db); - } - } - let db = RocksDB::new( - Database::MerkleTree, - Self::rocksdb_path(&config.db, mode), - true, - ); - let tree = ZkSyncTree::new_with_mode(db, mode.into()); - Self { - delay_interval: config.chain.operations_manager.delay_interval(), - tree, - config: config.db.clone(), - mode, - object_store: create_object_store_from_env(), +#[derive(Debug, Copy, Clone)] +enum MetadataCalculatorMode { + Full(TreeImplementation), + Lightweight(TreeImplementation), +} + +impl MetadataCalculatorMode { + fn as_tag(self) -> &'static str { + match self { + Self::Full(TreeImplementation::Old) => "full", + // ^ chosen for backward compatibility + Self::Full(TreeImplementation::New) => "full_new", + Self::Lightweight(TreeImplementation::Old) => "lightweight", + // ^ chosen for backward compatibility + Self::Lightweight(TreeImplementation::New) => "lightweight_new", } } - pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { - let mut storage = pool.access_storage().await; - - // ensure genesis creation - if self.tree.is_empty() { - let storage_logs = get_logs_for_l1_batch(&mut storage, L1BatchNumber(0)); - self.tree.process_block(storage_logs.unwrap().storage_logs); - self.tree.save().expect("Unable to update tree state"); + fn tree_implementation(self) -> TreeImplementation { + match self { + Self::Full(implementation) | Self::Lightweight(implementation) => implementation, } - let mut next_block_number_to_seal_in_tree = self.get_current_rocksdb_block_number(); - - let current_db_block = storage.blocks_dal().get_sealed_block_number() + 1; - let last_block_number_with_metadata = - storage.blocks_dal().get_last_block_number_with_metadata() + 1; - drop(storage); - - vlog::info!( - "initialized metadata calculator. Current rocksDB block: {}. Current Postgres block: {}", - next_block_number_to_seal_in_tree, - current_db_block - ); - metrics::gauge!( - "server.metadata_calculator.backup_lag", - (last_block_number_with_metadata - *next_block_number_to_seal_in_tree).0 as f64, - ); - - loop { - if *stop_receiver.borrow() { - vlog::info!("Stop signal received, metadata_calculator is shutting down"); - break; - } - - let query_started_at = Instant::now(); - - let mut storage = pool.access_storage().await; - - match self.mode { - MetadataCalculatorMode::Full => { - let last_sealed_block = storage.blocks_dal().get_sealed_block_number(); - let new_blocks: Vec<_> = (next_block_number_to_seal_in_tree.0 - ..=last_sealed_block.0) - .take(self.config.max_block_batch) - .flat_map(|block_number| { - get_logs_for_l1_batch(&mut storage, L1BatchNumber(block_number)) - }) - .collect(); - - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - query_started_at.elapsed(), - "stage" => "load_changes" - ); - - if new_blocks.is_empty() { - // we don't have any new data to process. Waiting... - #[cfg(not(test))] - tokio::time::sleep(self.delay_interval).await; - - #[cfg(test)] - return; - } else { - next_block_number_to_seal_in_tree = - new_blocks.last().unwrap().header.number + 1; - - self.process_multiple_blocks(&mut storage, new_blocks).await; - } - } - MetadataCalculatorMode::Lightweight => { - let new_block_logs = - get_logs_for_l1_batch(&mut storage, next_block_number_to_seal_in_tree); + } +} - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - query_started_at.elapsed(), - "stage" => "load_changes" - ); +#[derive(Debug, PartialEq)] +pub enum MetadataCalculatorStatus { + Ready, + NotReady, +} - match new_block_logs { - None => { - // we don't have any new data to process. Waiting... - #[cfg(not(test))] - tokio::time::sleep(self.delay_interval).await; +#[derive(Debug)] +pub struct MetadataCalculator { + updater: TreeUpdater, + delayer: Delayer, + throttler: Delayer, + status_sender: watch::Sender, +} - #[cfg(test)] - return; - } - Some(block) => { - next_block_number_to_seal_in_tree = block.header.number + 1; +impl MetadataCalculator { + /// Creates a calculator operating in the lightweight sync mode. In this mode, the calculator + /// computes Merkle tree root hashes and some auxiliary information for blocks, but not + /// witness inputs. + pub fn lightweight(config: &ZkSyncConfig, implementation: TreeImplementation) -> Self { + let mode = MetadataCalculatorMode::Lightweight(implementation); + Self::new(config, None, mode) + } - self.process_block(&mut storage, block).await; - } - } - } - MetadataCalculatorMode::Backup => { - unreachable!("Backup mode is disabled"); - } - } - } + /// Creates a calculator operating in the full sync mode. In this mode, the calculator + /// will compute witness inputs for all storage operations and put them into the object store + /// as provided by `store_factory` (e.g., GCS). + pub fn full( + config: &ZkSyncConfig, + store_factory: &ObjectStoreFactory, + implementation: TreeImplementation, + ) -> Self { + let mode = MetadataCalculatorMode::Full(implementation); + Self::new(config, Some(store_factory), mode) } - fn rocksdb_path(config: &DBConfig, mode: MetadataCalculatorMode) -> &str { - match mode { - MetadataCalculatorMode::Full => config.path(), - _ => config.merkle_tree_fast_ssd_path(), + fn new( + config: &ZkSyncConfig, + store_factory: Option<&ObjectStoreFactory>, + mode: MetadataCalculatorMode, + ) -> Self { + use self::TreeImplementation::New; + + let db_path = Self::db_path(&config.db, mode); + let db = Self::create_db(db_path); + let object_store = store_factory.map(ObjectStoreFactory::create_store); + let updater = TreeUpdater::new(mode, db, &config.db, object_store); + let delay_interval = config.chain.operations_manager.delay_interval(); + let throttle_interval = if matches!(mode, MetadataCalculatorMode::Lightweight(New)) { + config.db.new_merkle_tree_throttle_interval() + } else { + Duration::ZERO + }; + let (status_sender, _) = watch::channel(MetadataCalculatorStatus::NotReady); + Self { + updater, + delayer: Delayer::new(delay_interval), + throttler: Delayer::new(throttle_interval), + status_sender, } } - pub(crate) fn get_current_rocksdb_block_number(&mut self) -> L1BatchNumber { - L1BatchNumber(self.tree.block_number()) + /// Returns a health check for this calculator. + pub fn tree_health_check(&self) -> TreeHealthCheck { + let receiver = self.status_sender.subscribe(); + TreeHealthCheck::new(receiver, self.updater.mode()) } - // Applies the sealed block to the tree and returns the new root hash - #[tracing::instrument(skip(self, storage, block))] - async fn process_block( - &mut self, - storage: &mut StorageProcessor<'_>, - block: WitnessBlockWithLogs, - ) -> H256 { - let start = Instant::now(); - let mut start_stage = Instant::now(); - - assert_eq!(self.mode, MetadataCalculatorMode::Lightweight); - - let storage_logs = get_filtered_storage_logs(&block.storage_logs, self.mode); - let total_logs: usize = storage_logs.len(); - let previous_root_hash = self.tree.root_hash(); - let metadata_at_block = self.tree.process_block(storage_logs); - - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "compute" - ); - start_stage = Instant::now(); - - let metadata = Self::build_block_metadata(&metadata_at_block, &block.header); - let root_hash = metadata.root_hash; - - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "prepare_results" - ); - - let block_with_metadata = - Self::reestimate_block_commit_gas(storage, block.header, metadata); - - start_stage = Instant::now(); - - // for consistency it's important to save to postgres before rocksDB - storage.blocks_dal().save_blocks_metadata( - block_with_metadata.header.number, - block_with_metadata.metadata, - H256::from_slice(&previous_root_hash), - ); - - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "save_postgres" - ); - - start_stage = Instant::now(); - - self.tree.save().expect("Unable to update tree state"); - - // only metrics after this point - self.update_metrics( - &[block_with_metadata.header], - total_logs, - start_stage, - start, - ); - root_hash + /// Returns the tag for this calculator usable in metrics reporting. + pub fn tree_tag(&self) -> &'static str { + self.updater.mode().as_tag() } - #[tracing::instrument(skip(self, storage, blocks))] - async fn process_multiple_blocks( - &mut self, - storage: &mut StorageProcessor<'_>, - blocks: Vec, - ) -> H256 { - let start = Instant::now(); - - assert_eq!( - self.mode, - MetadataCalculatorMode::Full, - "Lightweight tree shouldn't process multiple blocks" - ); - - let mut start_stage = Instant::now(); - - let total_logs: usize = blocks.iter().map(|block| block.storage_logs.len()).sum(); - let storage_logs = blocks.iter().map(|block| block.storage_logs.iter()); - let previous_root_hash = self.tree.root_hash(); - let metadata = self.tree.process_blocks(storage_logs); - - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "compute" - ); - - let root_hashes: Vec<_> = std::iter::once(&previous_root_hash) - .chain(metadata.iter().map(|metadata| &metadata.root_hash)) - .map(|hash| H256::from_slice(hash)) - .collect(); - let last_root_hash = *root_hashes.last().unwrap(); - let mut block_headers = Vec::with_capacity(blocks.len()); - - for ((metadata_at_block, block), previous_root_hash) in - metadata.into_iter().zip(blocks).zip(root_hashes) - { - start_stage = Instant::now(); - - let metadata = Self::build_block_metadata(&metadata_at_block, &block.header); - - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "prepare_results" - ); - - let block_with_metadata = - Self::reestimate_block_commit_gas(storage, block.header, metadata); - - start_stage = Instant::now(); - - // Save witness input only when running in Full mode. - self.object_store - .put( - WITNESS_INPUT_BUCKET_PATH, - merkle_tree_paths_blob_url(block_with_metadata.header.number), - metadata_at_block.witness_input, - ) - .unwrap(); - - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "save_gcs" - ); - - start_stage = Instant::now(); - - // Save the metadata in case the lightweight tree is behind / not running - storage.blocks_dal().save_blocks_metadata( - block_with_metadata.header.number, - block_with_metadata.metadata, - previous_root_hash, - ); - - storage - .witness_generator_dal() - .save_witness_inputs(block_with_metadata.header.number); + fn db_path(config: &DBConfig, mode: MetadataCalculatorMode) -> &str { + use self::TreeImplementation::{New, Old}; - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "save_postgres" - ); - - block_headers.push(block_with_metadata.header); + match mode { + MetadataCalculatorMode::Full(Old) => config.path(), + MetadataCalculatorMode::Lightweight(Old) => config.merkle_tree_fast_ssd_path(), + MetadataCalculatorMode::Full(New) | MetadataCalculatorMode::Lightweight(New) => { + &config.new_merkle_tree_ssd_path + } } - start_stage = Instant::now(); - - self.tree.save().expect("Unable to update tree state"); + } - // only metrics after this point - self.update_metrics(&block_headers, total_logs, start_stage, start); + fn create_db(path: &str) -> RocksDB { + let db = RocksDB::new(Database::MerkleTree, path, true); + if cfg!(test) { + // We need sync writes for the unit tests to execute reliably. With the default config, + // some writes to RocksDB may occur, but not be visible to the test code. + db.with_sync_writes() + } else { + db + } + } - last_root_hash + pub fn run(self, pool: &ConnectionPool, stop_receiver: watch::Receiver) { + self.updater.loop_updating_tree( + self.delayer, + self.throttler, + pool, + stop_receiver, + self.status_sender, + ); } /// This is used to improve L1 gas estimation for the commit operation. The estimations are computed @@ -364,84 +186,34 @@ impl MetadataCalculator { block_header: L1BatchHeader, metadata: BlockMetadata, ) -> BlockWithMetadata { - let start_stage = Instant::now(); - let unsorted_factory_deps = storage - .blocks_dal() - .get_l1_batch_factory_deps(block_header.number); - let block_with_metadata = - BlockWithMetadata::new(block_header, metadata, unsorted_factory_deps); - let commit_gas_cost = crate::gas_tracker::commit_gas_count_for_block(&block_with_metadata); - storage - .blocks_dal() - .update_predicted_block_commit_gas(block_with_metadata.header.number, commit_gas_cost); - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "reestimate_block_commit_gas_cost" - ); - block_with_metadata - } - - fn update_metrics( - &self, - block_headers: &[L1BatchHeader], - total_logs: usize, - start_stage: Instant, - start: Instant, - ) { - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - start_stage.elapsed(), - "stage" => "save_rocksdb" - ); - metrics::histogram!( - "server.metadata_calculator.update_tree.latency", - start.elapsed() - ); - - if total_logs > 0 { - metrics::histogram!( - "server.metadata_calculator.update_tree.per_log.latency", - start.elapsed().div_f32(total_logs as f32) + TreeUpdateStage::ReestimateGasCost.run(|| { + let unsorted_factory_deps = storage + .blocks_dal() + .get_l1_batch_factory_deps(block_header.number); + let block_with_metadata = + BlockWithMetadata::new(block_header, metadata, unsorted_factory_deps); + let commit_gas_cost = + crate::gas_tracker::commit_gas_count_for_block(&block_with_metadata); + storage.blocks_dal().update_predicted_block_commit_gas( + block_with_metadata.header.number, + commit_gas_cost, ); - } - - let total_tx: usize = block_headers.iter().map(|block| block.tx_count()).sum(); - let total_l1_tx: u16 = block_headers.iter().map(|block| block.l1_tx_count).sum(); - metrics::counter!("server.processed_txs", total_tx as u64, "stage" => "tree"); - metrics::counter!("server.processed_l1_txs", total_l1_tx as u64, "stage" => "tree"); - metrics::histogram!("server.metadata_calculator.log_batch", total_logs as f64); - metrics::histogram!( - "server.metadata_calculator.blocks_batch", - block_headers.len() as f64 - ); - - let last_block_number = block_headers.last().unwrap().number.0; - vlog::info!("block {:?} processed in tree", last_block_number); - metrics::gauge!( - "server.block_number", - last_block_number as f64, - "stage" => format!("tree_{:?}_mode", self.mode).to_lowercase() - ); - metrics::histogram!( - "server.block_latency", - (seconds_since_epoch() - block_headers.first().unwrap().timestamp) as f64, - "stage" => format!("tree_{:?}_mode", self.mode).to_lowercase() - ); + block_with_metadata + }) } fn build_block_metadata( - tree_metadata_at_block: &TreeMetadata, + tree_metadata_at_block: TreeMetadata, l1_batch_header: &L1BatchHeader, ) -> BlockMetadata { - let merkle_root_hash = H256::from_slice(&tree_metadata_at_block.root_hash); + let merkle_root_hash = tree_metadata_at_block.root_hash; let block_commitment = BlockCommitment::new( l1_batch_header.l2_to_l1_logs.clone(), tree_metadata_at_block.rollup_last_leaf_index, merkle_root_hash, - tree_metadata_at_block.initial_writes.clone(), - tree_metadata_at_block.repeated_writes.clone(), + tree_metadata_at_block.initial_writes, + tree_metadata_at_block.repeated_writes, l1_batch_header.base_system_contracts_hashes.bootloader, l1_batch_header.base_system_contracts_hashes.default_aa, ); @@ -467,133 +239,32 @@ impl MetadataCalculator { metadata } - /// Encodes storage key using the pre-defined zkSync hasher. - pub fn key_hash_fn(key: &StorageKey) -> Vec { - key.hashed_key().to_fixed_bytes().to_vec() - } - - fn restore_from_backup(db_config: &DBConfig) { - let mut engine = BackupEngine::open( - &BackupEngineOptions::default(), - db_config.merkle_tree_backup_path(), - ) - .expect("failed to initialize restore engine"); + fn _restore_from_backup(db_config: &DBConfig) { + let backup_path = db_config.merkle_tree_backup_path(); + let mut engine = BackupEngine::open(&BackupEngineOptions::default(), backup_path) + .expect("failed to initialize restore engine"); + let rocksdb_path = db_config.path(); if let Err(err) = engine.restore_from_latest_backup( - db_config.path(), - db_config.path(), + rocksdb_path, + rocksdb_path, &RestoreOptions::default(), ) { vlog::warn!("can't restore tree from backup {:?}", err); } } - fn _backup(&mut self) { - let started_at = Instant::now(); - let mut engine = BackupEngine::open( - &BackupEngineOptions::default(), - self.config.merkle_tree_backup_path(), - ) - .expect("failed to create backup engine"); - let rocksdb_path = Self::rocksdb_path(&self.config, self.mode); + fn _backup(config: &DBConfig, mode: MetadataCalculatorMode) { + let backup_latency = TreeUpdateStage::_Backup.start(); + let backup_path = config.merkle_tree_backup_path(); + let mut engine = BackupEngine::open(&BackupEngineOptions::default(), backup_path) + .expect("failed to create backup engine"); + let rocksdb_path = Self::db_path(config, mode); let db = DB::open_for_read_only(&Options::default(), rocksdb_path, false) .expect("failed to open db for backup"); engine.create_new_backup(&db).unwrap(); engine - .purge_old_backups(self.config.backup_count()) + .purge_old_backups(config.backup_count()) .expect("failed to purge old backups"); - metrics::histogram!( - "server.metadata_calculator.update_tree.latency.stage", - started_at.elapsed(), - "stage" => "backup_tree" - ); - } -} - -/// Filters the storage log based on the MetadataCalculatorMode and StorageLogKind. -/// | MetadataCalculatorMode | Processing | -/// |------------------------------|------------------------| -/// | Full | Read + Write | -/// | Lightweight | Write | -/// | Backup | Write | -fn get_filtered_storage_logs( - storage_logs: &[WitnessStorageLog], - mode: MetadataCalculatorMode, -) -> Vec<&WitnessStorageLog> { - storage_logs - .iter() - .filter(|log| { - mode == MetadataCalculatorMode::Full || log.storage_log.kind == StorageLogKind::Write - }) - .collect() -} - -pub(crate) fn get_logs_for_l1_batch( - storage: &mut StorageProcessor<'_>, - l1_batch_number: L1BatchNumber, -) -> Option { - let header = storage.blocks_dal().get_block_header(l1_batch_number)?; - - // `BTreeMap` is used because tree needs to process slots in lexicographical order. - let mut storage_logs: BTreeMap = BTreeMap::new(); - - let protective_reads = storage - .storage_logs_dedup_dal() - .get_protective_reads_for_l1_batch(l1_batch_number); - let touched_slots = storage - .storage_logs_dedup_dal() - .get_touched_slots_for_l1_batch(l1_batch_number); - - let hashed_keys = protective_reads - .iter() - .chain(touched_slots.keys()) - .map(|key| key.hashed_key()) - .collect(); - let previous_values = storage - .storage_logs_dedup_dal() - .get_previous_storage_values(hashed_keys, l1_batch_number); - - for storage_key in protective_reads { - let previous_value = previous_values - .get(&storage_key.hashed_key()) - .cloned() - .unwrap(); - - // Sanity check: value must not change for slots that require protective reads. - if let Some(value) = touched_slots.get(&storage_key) { - assert_eq!( - previous_value, *value, - "Value was changed for slot that requires protective read" - ); - } - - storage_logs.insert( - storage_key, - WitnessStorageLog { - storage_log: StorageLog::new_read_log(storage_key, previous_value), - previous_value, - }, - ); - } - - for (storage_key, value) in touched_slots { - let previous_value = previous_values - .get(&storage_key.hashed_key()) - .cloned() - .unwrap(); - - if previous_value != value { - storage_logs.insert( - storage_key, - WitnessStorageLog { - storage_log: StorageLog::new_write_log(storage_key, value), - previous_value, - }, - ); - } + backup_latency.report(); } - - Some(WitnessBlockWithLogs { - header, - storage_logs: storage_logs.into_values().collect(), - }) } diff --git a/core/bin/zksync_core/src/metadata_calculator/tests.rs b/core/bin/zksync_core/src/metadata_calculator/tests.rs index 8294cc1efdec..e23fc4d4c317 100644 --- a/core/bin/zksync_core/src/metadata_calculator/tests.rs +++ b/core/bin/zksync_core/src/metadata_calculator/tests.rs @@ -1,163 +1,328 @@ -use itertools::Itertools; -use std::path::Path; -use std::str::FromStr; - +use assert_matches::assert_matches; use db_test_macro::db_test; use tempfile::TempDir; use tokio::sync::watch; -use crate::genesis::{chain_schema_genesis, operations_schema_genesis}; -use crate::metadata_calculator::MetadataCalculator; +use std::{ + ops, panic, + path::Path, + sync::mpsc, + thread, + time::{Duration, Instant}, +}; -use crate::MetadataCalculatorMode; +use crate::genesis::{create_genesis_block, save_genesis_block_metadata}; +use crate::metadata_calculator::{MetadataCalculator, MetadataCalculatorMode, TreeImplementation}; use zksync_config::ZkSyncConfig; use zksync_contracts::BaseSystemContracts; -use zksync_dal::ConnectionPool; -use zksync_merkle_tree::ZkSyncTree; -use zksync_storage::db::Database; -use zksync_storage::RocksDB; +use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_health_check::{CheckHealth, CheckHealthStatus}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_types::{ - block::{L1BatchHeader, MiniblockHeader}, + block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, commitment::BlockCommitment, - AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, H256, + proofs::PrepareBasicCircuitsJob, + AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, StorageKey, StorageLog, + H256, }; use zksync_utils::{miniblock_hash, u32_to_h256}; +const RUN_TIMEOUT: Duration = Duration::from_secs(5); + +fn run_with_timeout(timeout: Duration, action: F) -> T +where + T: Send + 'static, + F: FnOnce() -> T + Send + 'static, +{ + let (termination_sx, termination_rx) = mpsc::channel(); + let join_handle = thread::spawn(move || { + termination_sx.send(action()).ok(); + }); + let output = termination_rx + .recv_timeout(timeout) + .expect("timed out waiting for metadata calculator"); + match join_handle.join() { + Ok(()) => output, + Err(panic_object) => panic::resume_unwind(panic_object), + } +} + +fn test_genesis_creation(pool: &ConnectionPool, implementation: TreeImplementation) { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + + let (calculator, _) = setup_calculator(temp_dir.path(), pool, implementation); + assert!(calculator.tree_tag().starts_with("full")); + run_calculator(calculator, pool.clone()); + let (calculator, _) = setup_calculator(temp_dir.path(), pool, implementation); + assert_eq!(calculator.updater.tree().block_number(), 1); +} + +#[db_test] +async fn genesis_creation(pool: ConnectionPool) { + test_genesis_creation(&pool, TreeImplementation::Old); +} + #[db_test] -async fn genesis_creation(connection_pool: ConnectionPool) { +async fn genesis_creation_with_new_tree(pool: ConnectionPool) { + test_genesis_creation(&pool, TreeImplementation::New); +} + +fn test_basic_workflow( + pool: &ConnectionPool, + implementation: TreeImplementation, +) -> PrepareBasicCircuitsJob { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - { - let metadata_calculator = - setup_metadata_calculator(temp_dir.path(), connection_pool.clone()).await; - metadata_calculator - .run(connection_pool.clone(), watch::channel(false).1) - .await; - } + let (calculator, object_store) = setup_calculator(temp_dir.path(), pool, implementation); + reset_db_state(pool, 1); + run_calculator(calculator, pool.clone()); - let mut metadata_calculator = setup_metadata_calculator(temp_dir.path(), connection_pool).await; - assert_eq!( - metadata_calculator.get_current_rocksdb_block_number(), - L1BatchNumber(1) - ); + let job: PrepareBasicCircuitsJob = object_store.get(L1BatchNumber(1)).unwrap(); + assert!(job.next_enumeration_index() > 0); + let merkle_paths: Vec<_> = job.clone().into_merkle_paths().collect(); + assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 100); + // ^ The exact values depend on ops in genesis block + assert!(merkle_paths.iter().all(|log| log.is_write)); + + let (calculator, _) = setup_calculator(temp_dir.path(), pool, implementation); + assert_eq!(calculator.updater.tree().block_number(), 2); + job } -#[ignore] #[db_test] -async fn backup_recovery(connection_pool: ConnectionPool) { - let backup_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let backup_path = backup_dir.path().to_str().unwrap().to_string(); - - { - let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let metadata_calculator = setup_metadata_calculator_with_options( - temp_dir.path(), - connection_pool.clone(), - MetadataCalculatorMode::Backup, - Some(backup_path.clone()), - ) - .await; - reset_db_state(connection_pool.clone(), 1).await; - metadata_calculator - .run(connection_pool.clone(), watch::channel(false).1) - .await; +async fn basic_workflow(pool: ConnectionPool) { + let old_job = test_basic_workflow(&pool, TreeImplementation::Old); + let new_job = test_basic_workflow(&pool, TreeImplementation::New); + assert_jobs_eq(old_job, new_job); +} + +fn assert_jobs_eq(old_job: PrepareBasicCircuitsJob, new_job: PrepareBasicCircuitsJob) { + assert_eq!( + old_job.next_enumeration_index(), + new_job.next_enumeration_index() + ); + let old_merkle_paths = old_job.into_merkle_paths(); + let new_merkle_paths = new_job.into_merkle_paths(); + assert_eq!(old_merkle_paths.len(), new_merkle_paths.len()); + for (old_path, new_path) in old_merkle_paths.zip(new_merkle_paths) { + assert_eq!(old_path, new_path); } +} +#[db_test] +async fn status_receiver_has_correct_states(pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let mut metadata_calculator = setup_metadata_calculator_with_options( - temp_dir.path(), - connection_pool, - MetadataCalculatorMode::Full, - Some(backup_path), - ) - .await; + + let (calculator, _) = setup_calculator(temp_dir.path(), &pool, TreeImplementation::Old); + let tree_health_check = calculator.tree_health_check(); + assert_matches!( + tree_health_check.check_health(), + CheckHealthStatus::NotReady(msg) if msg.contains("full") + ); + let other_tree_health_check = calculator.tree_health_check(); + assert_matches!( + other_tree_health_check.check_health(), + CheckHealthStatus::NotReady(msg) if msg.contains("full") + ); + reset_db_state(&pool, 1); + run_calculator(calculator, pool); + assert_eq!(tree_health_check.check_health(), CheckHealthStatus::Ready); assert_eq!( - metadata_calculator.get_current_rocksdb_block_number(), - L1BatchNumber(2) + other_tree_health_check.check_health(), + CheckHealthStatus::Ready ); } -#[db_test] -async fn basic_workflow(connection_pool: ConnectionPool) { +fn test_multi_block_workflow( + pool: ConnectionPool, + implementation: TreeImplementation, +) -> Box { + // Run all transactions as a single block let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let (calculator, _) = setup_calculator(temp_dir.path(), &pool, implementation); + reset_db_state(&pool, 1); + let root_hash = run_calculator(calculator, pool.clone()); + + // Run the same transactions as multiple blocks + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let (calculator, object_store) = setup_calculator(temp_dir.path(), &pool, implementation); + reset_db_state(&pool, 10); + let multi_block_root_hash = run_calculator(calculator, pool); + assert_eq!(multi_block_root_hash, root_hash); - { - let metadata_calculator = - setup_metadata_calculator(temp_dir.path(), connection_pool.clone()).await; - reset_db_state(connection_pool.clone(), 1).await; - metadata_calculator - .run(connection_pool.clone(), watch::channel(false).1) - .await; + let mut prev_index = None; + for block_number in 1..=10 { + let block_number = L1BatchNumber(block_number); + let job: PrepareBasicCircuitsJob = object_store.get(block_number).unwrap(); + let next_enumeration_index = job.next_enumeration_index(); + let merkle_paths: Vec<_> = job.into_merkle_paths().collect(); + assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 10); + + if let Some(prev_index) = prev_index { + assert_eq!(next_enumeration_index, prev_index + 1); + } + let max_leaf_index_in_block = merkle_paths + .iter() + .filter_map(|log| log.first_write.then_some(log.leaf_enumeration_index)) + .max(); + prev_index = max_leaf_index_in_block.or(prev_index); } + object_store +} - let mut metadata_calculator = setup_metadata_calculator(temp_dir.path(), connection_pool).await; - assert_eq!( - metadata_calculator.get_current_rocksdb_block_number(), - L1BatchNumber(2) - ); +#[db_test] +async fn multi_block_workflow(pool: ConnectionPool) { + let old_store = test_multi_block_workflow(pool.clone(), TreeImplementation::Old); + let new_store = test_multi_block_workflow(pool, TreeImplementation::New); + + for block_number in 1..=10 { + let old_job: PrepareBasicCircuitsJob = old_store.get(L1BatchNumber(block_number)).unwrap(); + let new_job: PrepareBasicCircuitsJob = new_store.get(L1BatchNumber(block_number)).unwrap(); + assert_jobs_eq(old_job, new_job); + } +} + +fn test_switch_from_old_to_new_tree_without_catchup(pool: ConnectionPool, block_count: usize) { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + + let calculator = setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::Old); + assert!(calculator.tree_tag().starts_with("lightweight")); + reset_db_state(&pool, block_count); + let old_root_hash = run_calculator(calculator, pool.clone()); + + let calculator = setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::New); + let new_root_hash = run_calculator(calculator, pool); + assert_eq!(new_root_hash, old_root_hash); +} + +#[db_test] +async fn switching_from_old_to_new_tree_without_catchup(pool: ConnectionPool) { + test_switch_from_old_to_new_tree_without_catchup(pool, 1); } #[db_test] -async fn multi_block_workflow(connection_pool: ConnectionPool) { - // run all transactions as single block +async fn switching_from_old_to_new_tree_in_multiple_blocks_without_catchup(pool: ConnectionPool) { + test_switch_from_old_to_new_tree_without_catchup(pool, 10); +} + +#[db_test] +async fn switching_between_tree_impls_with_additional_blocks(pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - { - let metadata_calculator = - setup_metadata_calculator(temp_dir.path(), connection_pool.clone()).await; - reset_db_state(connection_pool.clone(), 1).await; - metadata_calculator - .run(connection_pool.clone(), watch::channel(false).1) - .await; - } + let calculator = setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::Old); + reset_db_state(&pool, 5); + run_calculator(calculator, pool.clone()); + + let mut calculator = + setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::New); + let (stop_sx, stop_rx) = watch::channel(false); + let (delay_sx, delay_rx) = mpsc::channel(); + calculator.delayer.delay_notifier = delay_sx; + + let calculator_handle = { + let pool = pool.clone(); + thread::spawn(move || calculator.run(&pool, stop_rx)) + }; + // Wait until the calculator has processed initial blocks. + let (block_count, _) = delay_rx + .recv_timeout(RUN_TIMEOUT) + .expect("metadata calculator timed out processing initial blocks"); + assert_eq!(block_count, 6); - let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); - let root_hash = { - let tree = ZkSyncTree::new(db); - tree.root_hash() + // Add some new blocks to the storage. + let new_logs = gen_storage_logs(100..200, 10); + extend_db_state(&mut pool.access_storage_blocking(), new_logs); + + // Wait until these blocks are processed. The calculator may have spurious delays, + // thus we wait in a loop. + let updated_root_hash = loop { + let (block_count, root_hash) = delay_rx + .recv_timeout(RUN_TIMEOUT) + .expect("metadata calculator shut down prematurely"); + if block_count == 16 { + stop_sx.send(true).unwrap(); // Shut down the calculator. + break root_hash; + } }; + run_with_timeout(RUN_TIMEOUT, || calculator_handle.join()).unwrap(); + + // Switch back to the old implementation. It should process new blocks independently + // and result in the same tree root hash. + let calculator = setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::Old); + let root_hash_for_old_tree = run_calculator(calculator, pool); + assert_eq!(root_hash_for_old_tree, updated_root_hash); +} - // run same transactions as multiple blocks +#[db_test] +async fn throttling_new_tree(pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - { - let metadata_calculator = - setup_metadata_calculator(temp_dir.path(), connection_pool.clone()).await; - reset_db_state(connection_pool.clone(), 10).await; - metadata_calculator - .run(connection_pool.clone(), watch::channel(false).1) - .await; - } - let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); - let tree = ZkSyncTree::new(db); - let multi_block_root_hash = tree.root_hash(); + let mut config = create_config(temp_dir.path()); + config.db.new_merkle_tree_throttle_ms = 100; + let mut calculator = setup_calculator_with_options( + &config, + &pool, + &ObjectStoreFactory::mock(), + MetadataCalculatorMode::Lightweight(TreeImplementation::New), + ); + let (delay_sx, delay_rx) = mpsc::channel(); + calculator.throttler.delay_notifier = delay_sx; + reset_db_state(&pool, 5); - // verify that hashes match - assert_eq!(multi_block_root_hash, root_hash); + let start = Instant::now(); + run_calculator(calculator, pool); + let elapsed = start.elapsed(); + assert!(elapsed >= Duration::from_millis(100), "{:?}", elapsed); + + // Throttling should be enabled only once, when we have no more blocks to process + let (block_count, _) = delay_rx.try_recv().unwrap(); + assert_eq!(block_count, 6); + delay_rx.try_recv().unwrap_err(); } -async fn setup_metadata_calculator(db_path: &Path, pool: ConnectionPool) -> MetadataCalculator { - setup_metadata_calculator_with_options(db_path, pool, MetadataCalculatorMode::Full, None).await +fn setup_calculator( + db_path: &Path, + pool: &ConnectionPool, + implementation: TreeImplementation, +) -> (MetadataCalculator, Box) { + let store_factory = ObjectStoreFactory::mock(); + let config = create_config(db_path); + let mode = MetadataCalculatorMode::Full(implementation); + let calculator = setup_calculator_with_options(&config, pool, &store_factory, mode); + (calculator, store_factory.create_store()) } -async fn setup_metadata_calculator_with_options( +fn setup_lightweight_calculator( db_path: &Path, - pool: ConnectionPool, - mode: MetadataCalculatorMode, - backup_directory: Option, + pool: &ConnectionPool, + implementation: TreeImplementation, ) -> MetadataCalculator { - let backup_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let mut config = ZkSyncConfig::from_env().clone(); - config.db.path = db_path.to_str().unwrap().to_string(); - config.db.merkle_tree_fast_ssd_path = config.db.path.clone(); - config.db.merkle_tree_backup_path = - backup_directory.unwrap_or_else(|| backup_dir.path().to_str().unwrap().to_string()); + let mode = MetadataCalculatorMode::Lightweight(implementation); + let config = create_config(db_path); + setup_calculator_with_options(&config, pool, &ObjectStoreFactory::mock(), mode) +} + +fn create_config(db_path: &Path) -> ZkSyncConfig { + let mut config = ZkSyncConfig::from_env(); + config.chain.operations_manager.delay_interval = 50; // ms + config.db.path = path_to_string(db_path); + config.db.merkle_tree_fast_ssd_path = path_to_string(&db_path.join("old")); + config.db.new_merkle_tree_ssd_path = path_to_string(&db_path.join("new")); config.db.backup_interval_ms = 0; - let fee_address = Address::repeat_byte(0x01); - let mut storage = pool.access_storage().await; - let metadata_calculator = MetadataCalculator::new(&config, mode); + config +} + +fn setup_calculator_with_options( + config: &ZkSyncConfig, + pool: &ConnectionPool, + store_factory: &ObjectStoreFactory, + mode: MetadataCalculatorMode, +) -> MetadataCalculator { + let store_factory = matches!(mode, MetadataCalculatorMode::Full(_)).then_some(store_factory); + let metadata_calculator = MetadataCalculator::new(config, store_factory, mode); + let mut storage = pool.access_storage_blocking(); if storage.blocks_dal().is_genesis_needed() { - let chain_id = H256::from_low_u64_be(config.chain.eth.zksync_network_id as u64); + let chain_id = L2ChainId(config.chain.eth.zksync_network_id); let base_system_contracts = BaseSystemContracts::load_from_disk(); let block_commitment = BlockCommitment::new( vec![], @@ -169,42 +334,76 @@ async fn setup_metadata_calculator_with_options( base_system_contracts.default_aa.hash, ); - chain_schema_genesis(&mut storage, fee_address, chain_id, base_system_contracts).await; - operations_schema_genesis( + let fee_address = Address::repeat_byte(0x01); + create_genesis_block(&mut storage, fee_address, chain_id, base_system_contracts); + save_genesis_block_metadata( &mut storage, &block_commitment, - H256::from_slice(&metadata_calculator.tree.root_hash()), + metadata_calculator.updater.tree().root_hash(), 1, ); } metadata_calculator } -async fn reset_db_state(pool: ConnectionPool, num_blocks: usize) { - let mut storage = pool.access_storage().await; - // Drops all blocks (except the block with number = 0) and theirs storage logs. +fn path_to_string(path: &Path) -> String { + path.to_str().unwrap().to_owned() +} + +fn run_calculator(mut calculator: MetadataCalculator, pool: ConnectionPool) -> H256 { + let (stop_sx, stop_rx) = watch::channel(false); + let (delay_sx, delay_rx) = mpsc::channel(); + calculator.delayer.delay_notifier = delay_sx; + let delayer_handle = thread::spawn(move || { + // Wait until the calculator has processed all initially available blocks, + // then stop it via signal. + let (_, root_hash) = delay_rx + .recv() + .expect("metadata calculator shut down prematurely"); + stop_sx.send(true).unwrap(); + root_hash + }); + + run_with_timeout(RUN_TIMEOUT, move || calculator.run(&pool, stop_rx)); + delayer_handle.join().unwrap() +} + +fn reset_db_state(pool: &ConnectionPool, num_blocks: usize) { + let mut storage = pool.access_storage_blocking(); + // Drops all blocks (except the block with number = 0) and their storage logs. storage .storage_logs_dal() .rollback_storage_logs(MiniblockNumber(0)); storage.blocks_dal().delete_miniblocks(MiniblockNumber(0)); storage.blocks_dal().delete_l1_batches(L1BatchNumber(0)); + let logs = gen_storage_logs(0..100, num_blocks); + extend_db_state(&mut storage, logs); +} + +fn extend_db_state( + storage: &mut StorageProcessor<'_>, + new_logs: impl IntoIterator>, +) { + let next_block = storage.blocks_dal().get_sealed_block_number().0 + 1; + let base_system_contracts = BaseSystemContracts::load_from_disk(); - let all_logs = gen_storage_logs(num_blocks); - for (block_number, block_logs) in (1..=(num_blocks as u32)).zip(all_logs) { + for (idx, block_logs) in (next_block..).zip(new_logs) { + let block_number = L1BatchNumber(idx); let mut header = L1BatchHeader::new( - L1BatchNumber(block_number), + block_number, 0, Address::default(), base_system_contracts.hashes(), ); header.is_finished = true; - // Assumes that L1 batch consists of only one miniblock. + // Assumes that L1 batch consists of only one miniblock. + let miniblock_number = MiniblockNumber(idx); let miniblock_header = MiniblockHeader { - number: MiniblockNumber(block_number), + number: miniblock_number, timestamp: header.timestamp, - hash: miniblock_hash(MiniblockNumber(block_number)), + hash: miniblock_hash(miniblock_number), l1_tx_count: header.l1_tx_count, l2_tx_count: header.l2_tx_count, base_fee_per_gas: header.base_fee_per_gas, @@ -215,44 +414,47 @@ async fn reset_db_state(pool: ConnectionPool, num_blocks: usize) { storage .blocks_dal() - .insert_l1_batch(header.clone(), Default::default()); + .insert_l1_batch(header, BlockGasCount::default()); storage.blocks_dal().insert_miniblock(miniblock_header); - storage.storage_logs_dal().insert_storage_logs( - MiniblockNumber(block_number), - &[(H256::default(), block_logs)], - ); + storage + .storage_logs_dal() + .insert_storage_logs(miniblock_number, &[(H256::zero(), block_logs)]); storage .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(header.number); + .mark_miniblocks_as_executed_in_l1_batch(block_number); } } -fn gen_storage_logs(num_blocks: usize) -> Vec> { - // Note, addresses and keys of storage logs must be sorted for the multi_block_workflow test. - let addrs = vec![ +fn gen_storage_logs(indices: ops::Range, num_blocks: usize) -> Vec> { + // Addresses and keys of storage logs must be sorted for the `multi_block_workflow` test. + let mut accounts = [ "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2", "ef4bb7b21c5fe7432a7d63876cc59ecc23b46636", "89b8988a018f5348f52eeac77155a793adf03ecc", "782806db027c08d36b2bed376b4271d1237626b3", "b2b57b76717ee02ae1327cc3cf1f40e76f692311", ] - .into_iter() - .map(|s| Address::from_str(s).unwrap()) - .sorted(); - - let proof_keys: Vec<_> = addrs - .flat_map(|addr| { - (0..20).map(move |i| StorageKey::new(AccountTreeId::new(addr), u32_to_h256(i))) - }) + .map(|s| AccountTreeId::new(s.parse::
().unwrap())); + accounts.sort_unstable(); + + let account_keys = (indices.start / 5)..(indices.end / 5); + let proof_keys = accounts.iter().flat_map(|&account| { + account_keys + .clone() + .map(move |i| StorageKey::new(account, u32_to_h256(i))) + }); + let proof_values = indices.map(u32_to_h256); + + let logs: Vec<_> = proof_keys + .zip(proof_values) + .map(|(proof_key, proof_value)| StorageLog::new_write_log(proof_key, proof_value)) .collect(); - let proof_values: Vec<_> = (0..100).map(u32_to_h256).collect(); + for window in logs.windows(2) { + let [prev, next] = window else { unreachable!() }; + assert!(prev.key < next.key); + } - let logs = proof_keys - .iter() - .zip(proof_values.iter()) - .map(|(proof_key, &proof_value)| StorageLog::new_write_log(*proof_key, proof_value)) - .collect::>(); logs.chunks(logs.len() / num_blocks) - .map(|v| v.into()) + .map(<[_]>::to_vec) .collect() } diff --git a/core/bin/zksync_core/src/metadata_calculator/updater.rs b/core/bin/zksync_core/src/metadata_calculator/updater.rs new file mode 100644 index 000000000000..d7917c69c852 --- /dev/null +++ b/core/bin/zksync_core/src/metadata_calculator/updater.rs @@ -0,0 +1,223 @@ +//! Tree updater trait and its implementations. + +use tokio::sync::watch; + +use std::time::Instant; + +use zksync_config::DBConfig; +use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_merkle_tree::ZkSyncTree as OldTree; +use zksync_merkle_tree2::domain::ZkSyncTree as NewTree; +use zksync_object_store::ObjectStore; +use zksync_storage::RocksDB; +use zksync_types::{block::WitnessBlockWithLogs, L1BatchNumber}; + +use super::{ + get_logs_for_l1_batch, + helpers::{Delayer, ZkSyncTree}, + metrics::TreeUpdateStage, + MetadataCalculator, MetadataCalculatorMode, MetadataCalculatorStatus, TreeImplementation, +}; + +#[derive(Debug)] +pub(super) struct TreeUpdater { + mode: MetadataCalculatorMode, + tree: ZkSyncTree, + max_block_batch: usize, + object_store: Option>, +} + +impl TreeUpdater { + pub fn new( + mode: MetadataCalculatorMode, + db: RocksDB, + config: &DBConfig, + object_store: Option>, + ) -> Self { + use self::TreeImplementation::{New, Old}; + + let tree = match mode { + MetadataCalculatorMode::Full(Old) => ZkSyncTree::Old(OldTree::new(db)), + MetadataCalculatorMode::Full(New) => ZkSyncTree::New(NewTree::new(db)), + MetadataCalculatorMode::Lightweight(Old) => { + ZkSyncTree::Old(OldTree::new_lightweight(db)) + } + MetadataCalculatorMode::Lightweight(New) => { + ZkSyncTree::New(NewTree::new_lightweight(db)) + } + }; + + let max_block_batch = if matches!(mode, MetadataCalculatorMode::Lightweight(Old)) { + // The old tree implementation does not support processing multiple blocks + // in the lightweight mode. + 1 + } else { + config.max_block_batch + }; + Self { + mode, + tree, + max_block_batch, + object_store, + } + } + + #[cfg(test)] + pub fn tree(&self) -> &ZkSyncTree { + &self.tree + } + + pub fn mode(&self) -> MetadataCalculatorMode { + self.mode + } + + #[tracing::instrument(skip(self, storage, blocks))] + fn process_multiple_blocks( + &mut self, + storage: &mut StorageProcessor<'_>, + blocks: Vec, + ) { + let start = Instant::now(); + + let compute_latency = TreeUpdateStage::Compute.start(); + let total_logs: usize = blocks.iter().map(|block| block.storage_logs.len()).sum(); + let storage_logs = blocks.iter().map(|block| block.storage_logs.as_slice()); + + let mut previous_root_hash = self.tree.root_hash(); + let metadata = self.tree.process_blocks(storage_logs); + compute_latency.report(); + + let mut block_headers = Vec::with_capacity(blocks.len()); + for (mut metadata_at_block, block) in metadata.into_iter().zip(blocks) { + let prepare_results_latency = TreeUpdateStage::PrepareResults.start(); + let witness_input = metadata_at_block.witness_input.take(); + + let next_root_hash = metadata_at_block.root_hash; + let metadata = + MetadataCalculator::build_block_metadata(metadata_at_block, &block.header); + prepare_results_latency.report(); + + let block_with_metadata = + MetadataCalculator::reestimate_block_commit_gas(storage, block.header, metadata); + let block_number = block_with_metadata.header.number; + + let object_key = self.object_store.as_ref().map(|object_store| { + let witness_input = + witness_input.expect("No witness input provided by tree; this is a bug"); + + TreeUpdateStage::SaveWitnesses + .run(|| object_store.put(block_number, &witness_input).unwrap()) + }); + + // Save the metadata in case the lightweight tree is behind / not running + let metadata = block_with_metadata.metadata; + TreeUpdateStage::SavePostgres.run(|| { + storage.blocks_dal().save_blocks_metadata( + block_number, + metadata, + previous_root_hash, + ); + // ^ Note that `save_blocks_metadata()` will not blindly overwrite changes if the block + // metadata already exists; instead, it'll check that the old an new metadata match. + // That is, if we run both tree implementations, we'll get metadata correspondence + // right away without having to implement dedicated code. + + if let Some(object_key) = &object_key { + storage + .witness_generator_dal() + .save_witness_inputs(block_number, object_key); + } + }); + + previous_root_hash = next_root_hash; + block_headers.push(block_with_metadata.header); + } + + TreeUpdateStage::SaveRocksDB.run(|| self.tree.save()); + MetadataCalculator::update_metrics(self.mode, &block_headers, total_logs, start); + } + + fn tree_implementation(&self) -> TreeImplementation { + match &self.tree { + ZkSyncTree::Old(_) => TreeImplementation::Old, + ZkSyncTree::New(_) => TreeImplementation::New, + } + } + + fn step(&mut self, mut storage: StorageProcessor<'_>, next_block_to_seal: &mut L1BatchNumber) { + let new_blocks: Vec<_> = TreeUpdateStage::LoadChanges.run(|| { + let last_sealed_block = storage.blocks_dal().get_sealed_block_number(); + (next_block_to_seal.0..=last_sealed_block.0) + .map(L1BatchNumber) + .take(self.max_block_batch) + .flat_map(|block_number| get_logs_for_l1_batch(&mut storage, block_number)) + .collect() + }); + + if let Some(last_block) = new_blocks.last() { + *next_block_to_seal = last_block.header.number + 1; + self.process_multiple_blocks(&mut storage, new_blocks); + } + } + + /// The processing loop for this updater. + pub fn loop_updating_tree( + mut self, + delayer: Delayer, + throttler: Delayer, + pool: &ConnectionPool, + mut stop_receiver: watch::Receiver, + status_sender: watch::Sender, + ) { + let mut storage = pool.access_storage_blocking(); + + // Ensure genesis creation + let tree = &mut self.tree; + if tree.is_empty() { + let Some(logs) = get_logs_for_l1_batch(&mut storage, L1BatchNumber(0)) else { + panic!("Missing storage logs for the genesis block"); + }; + tree.process_block(&logs.storage_logs); + tree.save(); + } + let mut next_block_to_seal = L1BatchNumber(tree.block_number()); + + let current_db_block = storage.blocks_dal().get_sealed_block_number() + 1; + let last_block_number_with_metadata = + storage.blocks_dal().get_last_block_number_with_metadata() + 1; + drop(storage); + + let tree_tag = self.tree_implementation().as_tag(); + vlog::info!( + "Initialized metadata calculator with {} tree implementation. \ + Current RocksDB block: {}. Current Postgres block: {}", + tree_tag, + next_block_to_seal, + current_db_block + ); + metrics::gauge!( + "server.metadata_calculator.backup_lag", + (last_block_number_with_metadata - *next_block_to_seal).0 as f64, + "tree" => tree_tag + ); + status_sender.send_replace(MetadataCalculatorStatus::Ready); + + loop { + if *stop_receiver.borrow_and_update() { + vlog::info!("Stop signal received, metadata_calculator is shutting down"); + break; + } + let storage = pool.access_storage_blocking(); + + let next_block_snapshot = *next_block_to_seal; + self.step(storage, &mut next_block_to_seal); + if next_block_snapshot == *next_block_to_seal { + // We didn't make any progress. + delayer.wait(&self.tree); + } else { + // We've made some progress; apply throttling if necessary. + throttler.wait(&self.tree); + } + } + } +} diff --git a/core/bin/zksync_core/src/reorg_detector/mod.rs b/core/bin/zksync_core/src/reorg_detector/mod.rs new file mode 100644 index 000000000000..ab1ab015930e --- /dev/null +++ b/core/bin/zksync_core/src/reorg_detector/mod.rs @@ -0,0 +1,188 @@ +use std::time::Duration; +use zksync_web3_decl::{ + jsonrpsee::core::{error::Error as RpcError, RpcResult}, + jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + namespaces::ZksNamespaceClient, +}; + +use zksync_dal::ConnectionPool; +use zksync_types::L1BatchNumber; + +const SLEEP_INTERVAL: Duration = Duration::from_secs(5); + +/// This is a component that is responsible for detecting the batch reorgs. +/// Batch reorg is a rare event of manual intervention, when the node operator +/// decides to revert some of the not yet finalized batches for some reason +/// (e.g. inability to generate a proof), and then potentially +/// re-organize transactions in them to fix the problem. +/// +/// To detect them, we constantly check the latest sealed batch root hash, +/// and in the event of mismatch, we know that there has been a reorg. +/// We then perform a binary search to find the latest correct block +/// and revert all batches after it, to keep being consistent with the main node. +/// +/// This is the only component that is expected to finish its execution +/// in the even of reorg, since we have to restart the node after a rollback is performed, +/// and is special-cased in the `zksync_external_node` crate. +#[derive(Debug)] +pub struct ReorgDetector { + client: HttpClient, + pool: ConnectionPool, +} + +impl ReorgDetector { + pub fn new(url: &str, pool: ConnectionPool) -> Self { + let client = HttpClientBuilder::default() + .build(url) + .expect("Failed to create HTTP client"); + Self { client, pool } + } + + /// Compares root hashes of the latest local batch and of the same batch from the main node. + async fn root_hashes_match(&self, block_number: L1BatchNumber) -> RpcResult { + // Unwrapping is fine since the caller always checks that these root hashes exist. + let local_hash = self + .pool + .access_storage_blocking() + .blocks_dal() + .get_block_state_root(block_number) + .unwrap_or_else(|| { + panic!("Root hash does not exist for local batch #{}", block_number) + }); + let Some(hash) = self + .client + .get_l1_batch_details(block_number) + .await? + .and_then(|b| b.root_hash) + else { + // Due to reorg, locally we may be ahead of the main node. + // Lack of the root hash on the main node is treated as a hash mismatch, + // so we can continue searching for the last correct block. + return Ok(false); + }; + Ok(hash == local_hash) + } + + /// Localizes a reorg: performs binary search to determine the last non-diverged block. + async fn detect_reorg(&self, diverged_block: L1BatchNumber) -> RpcResult { + binary_search_with(1, diverged_block.0, |block_number| { + self.root_hashes_match(L1BatchNumber(block_number)) + }) + .await + .map(L1BatchNumber) + } + + pub async fn run(self) -> L1BatchNumber { + loop { + match self.run_inner().await { + Ok(batch_number) => return batch_number, + Err(err @ RpcError::Transport(_) | err @ RpcError::RequestTimeout) => { + vlog::warn!("Following transport error occurred: {}", err); + vlog::info!("Trying again after a delay"); + tokio::time::sleep(SLEEP_INTERVAL).await; + } + Err(err) => { + panic!("Unexpected error in the reorg detector: {}", err); + } + } + } + } + + /// Checks if the external node is ahead of the main node *NOT* because of a reorg. + /// In such an event, we should not do anything. + /// + /// Theoretically, external node might calculate batch root hash before the main + /// node. Therefore, we need to be sure that we check a batch which has root hashes + /// both on the main node and on the external node. + async fn is_legally_ahead_of_main_node( + &self, + sealed_block_number: L1BatchNumber, + ) -> RpcResult { + // We must know the latest batch on the main node *before* we ask it for a root hash + // to prevent a race condition (asked for root hash, batch sealed on main node, we've got + // inconsistent results). + let last_main_node_batch = self.client.get_l1_batch_number().await?; + let main_node_batch_root_hash = self + .client + .get_l1_batch_details(sealed_block_number) + .await? + .and_then(|b| b.root_hash); + + let en_ahead_for = sealed_block_number + .0 + .checked_sub(last_main_node_batch.as_u32()); + // Theoretically it's possible that the EN would not only calculate the root hash, but also seal the batch + // quicker than the main node. So, we allow us to be at most one batch ahead of the main node. + // If the gap is bigger, it's certainly a reorg. + // Allowing the gap is safe: if reorg has happened, it'll be detected anyway in the future iterations. + Ok(main_node_batch_root_hash.is_none() && en_ahead_for <= Some(1)) + } + + async fn run_inner(&self) -> RpcResult { + loop { + let sealed_block_number = self + .pool + .access_storage_blocking() + .blocks_dal() + .get_last_block_number_with_metadata(); + + // If the main node has to catch up with us, we should not do anything just yet. + if self + .is_legally_ahead_of_main_node(sealed_block_number) + .await? + { + vlog::trace!( + "Local state was updated ahead of the main node. Waiting for the main node to seal the batch" + ); + tokio::time::sleep(SLEEP_INTERVAL).await; + continue; + } + + // At this point we're certain that if we detect a reorg, it's real. + vlog::trace!("Checking for reorgs - batch number {}", sealed_block_number); + if self.root_hashes_match(sealed_block_number).await? { + metrics::gauge!( + "external_node.last_correct_batch", + sealed_block_number.0 as f64, + "component" => "reorg_detector", + ); + tokio::time::sleep(SLEEP_INTERVAL).await; + } else { + vlog::warn!("Reorg detected: last state hash doesn't match the state hash from main node (batch #{sealed_block_number})"); + vlog::info!("Searching for the first diverged batch"); + let last_correct_block = self.detect_reorg(sealed_block_number).await?; + vlog::info!("Reorg localized: last correct batch is #{last_correct_block}",); + return Ok(last_correct_block); + } + } + } +} + +async fn binary_search_with(mut left: u32, mut right: u32, mut f: F) -> Result +where + F: FnMut(u32) -> Fut, + Fut: std::future::Future>, +{ + while left + 1 < right { + let middle = (left + right) / 2; + if f(middle).await? { + left = middle; + } else { + right = middle; + } + } + Ok(left) +} + +#[cfg(test)] +mod tests { + /// Tests the binary search algorithm. + #[tokio::test] + async fn test_binary_search() { + for divergence_point in [1, 50, 51, 100] { + let mut f = |x| async move { Ok::<_, ()>(x < divergence_point) }; + let result = super::binary_search_with(0, 100, &mut f).await; + assert_eq!(result, Ok(divergence_point - 1)); + } + } +} diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs index 169eb60b7aaf..83caa96ca9e3 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -7,7 +7,7 @@ use vm::{ init_vm, init_vm_with_gas_limit, push_transaction_to_bootloader_memory, BootloaderJobType, TxExecutionMode, }, - TxRevertReason, VmBlockResult, VmInstance, + HistoryEnabled, HistoryMode, TxRevertReason, VmBlockResult, VmInstance, }; use zksync_dal::ConnectionPool; use zksync_state::{secondary_storage::SecondaryStateStorage, storage_view::StorageView}; @@ -15,6 +15,8 @@ use zksync_storage::{db::Database, RocksDB}; use zksync_types::{tx::ExecutionMetrics, Transaction, U256}; use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; +use itertools::Itertools; + use crate::gas_tracker::{gas_count_from_metrics, gas_count_from_tx_and_metrics}; use crate::state_keeper::io::L1BatchParams; @@ -24,81 +26,35 @@ use crate::state_keeper::types::ExecutionMetricsForCriteria; mod tests; /// Representation of a transaction executed in the virtual machine. -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct TxExecutionResult { - /// `Ok(_)` represents a transaction that was executed (even if it reverted), while - /// `Err(_)` represents a rejected transaction (one that can't be applied to the state). - pub(super) tx_result: Result, - /// Result of dry run executing the bootloader tip. Will be `None` if the transaction was rejected - /// (`tx_result` field is `err). - pub(super) bootloader_dry_run_result: Option>, - /// Execution metrics for the transaction itself. - /// Will be `None` if the transaction was rejected. - pub(super) tx_metrics: Option, - /// Execution metrics for the bootloader tip dry run. - /// Will be `None` if either the transaction was rejected or if bootloader tip dry run failed. - pub(super) bootloader_dry_run_metrics: Option, - /// Bytecodes that are to be published as compressed with this transaction - pub(super) compressed_bytecodes: Vec, +#[derive(Debug, Clone)] +pub(crate) enum TxExecutionResult { + /// Successful execution of the tx and the block tip dry run. + Success { + tx_result: Box, + tx_metrics: ExecutionMetricsForCriteria, + bootloader_dry_run_metrics: ExecutionMetricsForCriteria, + bootloader_dry_run_result: Box, + compressed_bytecodes: Vec, + }, + /// The VM rejected the tx for some reason. + RejectedByVm { rejection_reason: TxRevertReason }, + /// Bootloader gas limit is not enough to execute the tx. + BootloaderOutOfGasForTx, + /// Bootloader gas limit is enough to run the tx but not enough to execute block tip. + BootloaderOutOfGasForBlockTip, } impl TxExecutionResult { - pub(crate) fn new( - tx_result: Result<(VmTxExecutionResult, Vec), TxRevertReason>, - ) -> Self { - let (tx_result, compressed_bytecodes) = match tx_result { - Ok((result, compressed_bytecodes)) => (Ok(result), compressed_bytecodes), - Err(err) => (Err(err), vec![]), - }; - - Self { - tx_result, - bootloader_dry_run_result: None, - tx_metrics: None, - bootloader_dry_run_metrics: None, - compressed_bytecodes, - } - } - - pub(crate) fn add_tx_metrics(&mut self, tx_metrics: ExecutionMetricsForCriteria) { - self.tx_metrics = Some(tx_metrics); - } - - pub(crate) fn add_bootloader_result( - &mut self, - bootloader_dry_run_result: Result, - ) { - self.bootloader_dry_run_result = Some(bootloader_dry_run_result); - } - - pub(crate) fn add_bootloader_metrics( - &mut self, - bootloader_dry_run_metrics: ExecutionMetricsForCriteria, - ) { - self.bootloader_dry_run_metrics = Some(bootloader_dry_run_metrics); - } - - /// Returns `true` if both transaction and bootloader tip dry run were successful. - pub(super) fn success(&self) -> bool { - self.tx_result.is_ok() - && self - .bootloader_dry_run_result - .as_ref() - .map(|res| res.is_ok()) - .unwrap_or(false) - } - - /// Returns a revert reason if either transaction was rejected or bootloader dry run tip failed. + /// Returns a revert reason if either transaction was rejected or bootloader ran out of gas. pub(super) fn err(&self) -> Option { - self.tx_result - .as_ref() - .err() - .or_else(|| { - self.bootloader_dry_run_result - .as_ref() - .and_then(|res| res.as_ref().err()) - }) - .cloned() + match self { + TxExecutionResult::Success { .. } => None, + TxExecutionResult::RejectedByVm { rejection_reason } => Some(rejection_reason.clone()), + TxExecutionResult::BootloaderOutOfGasForTx + | TxExecutionResult::BootloaderOutOfGasForBlockTip { .. } => { + Some(TxRevertReason::BootloaderOutOfGas) + } + } } } @@ -112,26 +68,26 @@ pub trait L1BatchExecutorBuilder: 'static + std::fmt::Debug + Send { /// The default implementation of the `BatchExecutorBuilder`. /// Creates a "real" batch executor which maintains the VM (as opposed to the test builder which doesn't use the VM). #[derive(Debug, Clone)] -pub(crate) struct MainBatchExecutorBuilder { +pub struct MainBatchExecutorBuilder { state_keeper_db_path: String, pool: ConnectionPool, - reexecute_each_tx: bool, + save_call_traces: bool, max_allowed_tx_gas_limit: U256, validation_computational_gas_limit: u32, } impl MainBatchExecutorBuilder { - pub(crate) fn new( + pub fn new( state_keeper_db_path: String, pool: ConnectionPool, - reexecute_each_tx: bool, max_allowed_tx_gas_limit: U256, + save_call_traces: bool, validation_computational_gas_limit: u32, ) -> Self { Self { state_keeper_db_path, pool, - reexecute_each_tx, + save_call_traces, max_allowed_tx_gas_limit, validation_computational_gas_limit, } @@ -163,7 +119,7 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { secondary_storage.get_estimated_map_size() as f64, ); BatchExecutorHandle::new( - self.reexecute_each_tx, + self.save_call_traces, self.max_allowed_tx_gas_limit, self.validation_computational_gas_limit, secondary_storage, @@ -184,7 +140,7 @@ pub struct BatchExecutorHandle { impl BatchExecutorHandle { pub(super) fn new( - reexecute_each_tx: bool, + save_call_traces: bool, max_allowed_tx_gas_limit: U256, validation_computational_gas_limit: u32, secondary_storage: SecondaryStateStorage, @@ -193,7 +149,7 @@ impl BatchExecutorHandle { ) -> Self { let (commands_sender, commands_receiver) = mpsc::channel(); let executor = BatchExecutor { - reexecute_each_tx, + save_call_traces, max_allowed_tx_gas_limit, validation_computational_gas_limit, commands: commands_receiver, @@ -262,7 +218,7 @@ impl BatchExecutorHandle { /// be constructed. #[derive(Debug)] pub(super) struct BatchExecutor { - reexecute_each_tx: bool, + save_call_traces: bool, max_allowed_tx_gas_limit: U256, validation_computational_gas_limit: u32, commands: mpsc::Receiver, @@ -292,7 +248,8 @@ impl BatchExecutor { ); let mut storage_view = StorageView::new(&secondary_storage); - let mut oracle_tools = vm::OracleTools::new(&mut storage_view as &mut dyn Storage); + let mut oracle_tools = + vm::OracleTools::new(&mut storage_view as &mut dyn Storage, HistoryEnabled); let mut vm = match self.vm_gas_limit { Some(vm_gas_limit) => init_vm_with_gas_limit( @@ -332,7 +289,11 @@ impl BatchExecutor { vlog::info!("State keeper exited with an unfinished batch"); } - fn execute_tx(&self, tx: &Transaction, vm: &mut VmInstance) -> TxExecutionResult { + fn execute_tx( + &self, + tx: &Transaction, + vm: &mut VmInstance<'_, HistoryEnabled>, + ) -> TxExecutionResult { let gas_consumed_before_tx = vm.gas_consumed(); // Save pre-`execute_next_tx` VM snapshot. @@ -347,12 +308,8 @@ impl BatchExecutor { tx.hash(), tx.gas_limit() ); - return TxExecutionResult { - tx_result: Err(TxRevertReason::TooBigGasLimit), - bootloader_dry_run_result: None, - tx_metrics: None, - bootloader_dry_run_metrics: None, - compressed_bytecodes: vec![], + return TxExecutionResult::RejectedByVm { + rejection_reason: TxRevertReason::TooBigGasLimit, }; } @@ -374,38 +331,35 @@ impl BatchExecutor { "stage" => "state_keeper" ); - if self.reexecute_each_tx { - self.reexecute_tx_in_vm(vm, tx, tx_result.clone()); - } + let (exec_result, compressed_bytecodes) = match tx_result { + Err(TxRevertReason::BootloaderOutOfGas) => { + return TxExecutionResult::BootloaderOutOfGasForTx + } + Err(rejection_reason) => return TxExecutionResult::RejectedByVm { rejection_reason }, + Ok((exec_result, compressed_bytecodes)) => (exec_result, compressed_bytecodes), + }; - let mut result = TxExecutionResult::new(tx_result.clone()); - if result.err().is_some() { - return result; - } - - let tx_metrics = Self::get_execution_metrics( - vm, - Some(tx), - &tx_result.as_ref().unwrap().0.result, - gas_consumed_before_tx, - ); - result.add_tx_metrics(tx_metrics); + let tx_metrics = + Self::get_execution_metrics(vm, Some(tx), &exec_result.result, gas_consumed_before_tx); match self.dryrun_block_tip(vm) { - Ok((exec_result, metrics)) => { - result.add_bootloader_result(Ok(exec_result)); - result.add_bootloader_metrics(metrics); + Ok((bootloader_dry_run_result, bootloader_dry_run_metrics)) => { + TxExecutionResult::Success { + tx_result: Box::new(exec_result), + tx_metrics, + bootloader_dry_run_metrics, + bootloader_dry_run_result: Box::new(bootloader_dry_run_result), + compressed_bytecodes, + } } Err(err) => { vlog::warn!("VM reverted while executing block tip: {}", err); - result.add_bootloader_result(Err(err)); + TxExecutionResult::BootloaderOutOfGasForBlockTip } } - - result } - fn rollback_last_tx(&self, vm: &mut VmInstance) { + fn rollback_last_tx(&self, vm: &mut VmInstance<'_, HistoryEnabled>) { let stage_started_at = Instant::now(); vm.rollback_to_latest_snapshot_popping(); metrics::histogram!( @@ -415,7 +369,7 @@ impl BatchExecutor { ); } - fn finish_batch(&self, vm: &mut VmInstance) -> VmBlockResult { + fn finish_batch(&self, vm: &mut VmInstance<'_, H>) -> VmBlockResult { vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing) } @@ -426,7 +380,7 @@ impl BatchExecutor { fn execute_tx_in_vm( &self, tx: &Transaction, - vm: &mut VmInstance, + vm: &mut VmInstance<'_, HistoryEnabled>, ) -> Result<(VmTxExecutionResult, Vec), TxRevertReason> { // Note, that the space where we can put the calldata for compressing transactions // is limited and the transactions do not pay for taking it. @@ -435,7 +389,7 @@ impl BatchExecutor { // We try to execute the transaction with compressed bytecodes. // If it fails and the compressed bytecodes have not been published, // it means that there is no sense in pollutting the space of compressed bytecodes, - // and so we reeexecute the transaction, but without compressions. + // and so we reexecute the transaction, but without compressions. // Saving the snapshot before executing vm.save_current_vm_as_snapshot(); @@ -444,12 +398,16 @@ impl BatchExecutor { // For L1 transactions there are no compressed bytecodes vec![] } else { + // Deduplicate and filter factory deps preserving original order. tx.execute .factory_deps .as_ref() .unwrap() .iter() - .filter(|dep| { + .enumerate() + .sorted_by_key(|(_idx, dep)| *dep) + .dedup_by(|x, y| x.1 == y.1) + .filter(|(_idx, dep)| { !vm.state .storage .storage @@ -457,7 +415,8 @@ impl BatchExecutor { .borrow_mut() .is_bytecode_known(&hash_bytecode(dep)) }) - .filter_map(|dep| CompressedBytecodeInfo::from_original(dep.clone()).ok()) + .sorted_by_key(|(idx, _dep)| *idx) + .filter_map(|(_idx, dep)| CompressedBytecodeInfo::from_original(dep.clone()).ok()) .collect() }; @@ -467,8 +426,10 @@ impl BatchExecutor { TxExecutionMode::VerifyExecute, Some(compressed_bytecodes.clone()), ); - let result_with_compression = - vm.execute_next_tx(self.validation_computational_gas_limit)?; + let result_with_compression = vm.execute_next_tx( + self.validation_computational_gas_limit, + self.save_call_traces, + )?; let at_least_one_unpublished = compressed_bytecodes.iter().any(|info| { !vm.state @@ -480,7 +441,7 @@ impl BatchExecutor { }); if at_least_one_unpublished { - // Rollbacking and now trying to execute one more time. + // Rolling back and trying to execute one more time. vm.rollback_to_latest_snapshot_popping(); push_transaction_to_bootloader_memory( vm, @@ -489,8 +450,11 @@ impl BatchExecutor { Some(vec![]), ); - vm.execute_next_tx(self.validation_computational_gas_limit) - .map(|val| (val, vec![])) + vm.execute_next_tx( + self.validation_computational_gas_limit, + self.save_call_traces, + ) + .map(|val| (val, vec![])) } else { // Remove the snapshot taken at the start of this function as it is not needed anymore. vm.pop_snapshot_no_rollback(); @@ -499,31 +463,9 @@ impl BatchExecutor { } } - fn reexecute_tx_in_vm( - &self, - vm: &mut VmInstance<'_>, - tx: &Transaction, - expected_tx_result: Result< - (VmTxExecutionResult, Vec), - TxRevertReason, - >, - ) { - // Rollback to the pre-`execute_next_tx` VM snapshot. - // `rollback_to_latest_snapshot` (not `rollback_to_latest_snapshot_popping`) is used here because - // we will need this snapshot again if seal criteria will result in `ExcludeAndSeal`. - vm.rollback_to_latest_snapshot(); - let alternative_result = self.execute_tx_in_vm(tx, vm); - assert_eq!( - alternative_result, - expected_tx_result, - "Failed to reexecute transaction {}", - tx.hash() - ); - } - fn dryrun_block_tip( &self, - vm: &mut VmInstance, + vm: &mut VmInstance<'_, HistoryEnabled>, ) -> Result<(VmPartialExecutionResult, ExecutionMetricsForCriteria), TxRevertReason> { let stage_started_at = Instant::now(); let gas_consumed_before = vm.gas_consumed(); @@ -555,8 +497,8 @@ impl BatchExecutor { result } - fn get_execution_metrics( - vm: &VmInstance, + fn get_execution_metrics( + vm: &VmInstance<'_, H>, tx: Option<&Transaction>, execution_result: &VmPartialExecutionResult, gas_consumed_before: u32, @@ -583,6 +525,7 @@ impl BatchExecutor { total_factory_deps, execution_result.contracts_used, execution_result.cycles_used, + execution_result.computational_gas_used, ); let l1_gas = match tx { diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 0901eef91b4e..928213b00178 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -1,38 +1,34 @@ -use crate::state_keeper::batch_executor::tests::tester::TestConfig; - -use self::tester::{Account, Tester}; use assert_matches::assert_matches; use db_test_macro::db_test; -use vm::TxRevertReason; + +use zksync_dal::ConnectionPool; use zksync_types::{tx::tx_execution_info::TxExecutionStatus, PriorityOpId}; mod tester; +use self::tester::{Account, Tester}; use super::TxExecutionResult; +use crate::state_keeper::batch_executor::tests::tester::TestConfig; /// Ensures that the transaction was executed successfully. fn assert_executed(execution_result: TxExecutionResult) { - assert_matches!(execution_result.tx_result, Ok(_)); - assert_matches!(execution_result.bootloader_dry_run_result, Some(Ok(_))); - assert_matches!(execution_result.tx_metrics, Some(_)); - assert_matches!(execution_result.bootloader_dry_run_metrics, Some(_)); + assert_matches!(execution_result, TxExecutionResult::Success { .. }); } -/// Ensures that the transaction was rejected. +/// Ensures that the transaction was rejected by the VM. fn assert_rejected(execution_result: TxExecutionResult) { - assert_matches!(execution_result.tx_result, Err(_)); - assert_matches!(execution_result.bootloader_dry_run_result, None); - assert_matches!(execution_result.tx_metrics, None); - assert_matches!(execution_result.bootloader_dry_run_metrics, None); + assert_matches!(execution_result, TxExecutionResult::RejectedByVm { .. }); } /// Ensures that the transaction was executed successfully but reverted by the VM. fn assert_reverted(execution_result: TxExecutionResult) { assert_executed(execution_result.clone()); - assert_matches!( - execution_result.tx_result.unwrap().status, - TxExecutionStatus::Failure - ); + + if let TxExecutionResult::Success { tx_result, .. } = execution_result { + assert_matches!(tx_result.status, TxExecutionStatus::Failure); + } else { + unreachable!(); + } } /// Checks that we can successfully execute a single L2 tx in batch executor. @@ -41,7 +37,7 @@ async fn execute_l2_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -56,7 +52,7 @@ async fn execute_l1_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -71,7 +67,7 @@ async fn execute_l2_and_l1_txs(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -90,7 +86,7 @@ async fn rollback(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -104,8 +100,21 @@ async fn rollback(connection_pool: ConnectionPool) { let res_new = executor.execute_tx(tx); assert_executed(res_new.clone()); + let ( + TxExecutionResult::Success { + tx_metrics: tx_metrics_old, + .. + }, + TxExecutionResult::Success { + tx_metrics: tx_metrics_new, + .. + }, + ) = (res_old, res_new) else { + unreachable!(); + }; + assert_eq!( - res_old.tx_metrics, res_new.tx_metrics, + tx_metrics_old, tx_metrics_new, "Execution results must be the same" ); @@ -118,7 +127,7 @@ async fn reject_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); let executor = tester.create_batch_executor(); // Wallet is not funded, it can't pay for fees. @@ -133,7 +142,7 @@ async fn too_big_gas_limit(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -146,7 +155,22 @@ async fn too_big_gas_limit(connection_pool: ConnectionPool) { let res2 = executor.execute_tx(bad_tx); assert_rejected(res2.clone()); - assert_eq!(res1, res2); + let ( + TxExecutionResult::RejectedByVm { + rejection_reason: rejection_reason_old, + .. + }, + TxExecutionResult::RejectedByVm { + rejection_reason: rejection_reason_new, + .. + }, + ) = (res1, res2) else { + unreachable!(); + }; + assert_eq!( + rejection_reason_old, rejection_reason_new, + "Rejection reasons must be the same" + ); // Ensure that now we can execute a valid tx. alice.nonce -= 1; // Reset the nonce. @@ -161,7 +185,7 @@ async fn tx_cant_be_reexecuted(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -181,7 +205,7 @@ async fn deploy_and_call_loadtest(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -206,7 +230,7 @@ async fn execute_reverted_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -229,7 +253,7 @@ async fn execute_realistic_scenario(connection_pool: ConnectionPool) { let mut bob = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); tester.fund(&[bob.address()]); let executor = tester.create_batch_executor(); @@ -278,34 +302,33 @@ async fn bootloader_out_of_gas_for_any_tx(connection_pool: ConnectionPool) { let tester = Tester::with_config( connection_pool, TestConfig { - reexecute_each_tx: false, + save_call_traces: false, vm_gas_limit: Some(10), max_allowed_tx_gas_limit: u32::MAX, validation_computational_gas_limit: u32::MAX, }, ); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); let res = executor.execute_tx(alice.execute()); - assert_rejected(res.clone()); - assert_matches!(res.err().unwrap(), TxRevertReason::BootloaderOutOfGas); + assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForTx); executor.finish_batch(); } /// Checks that we can handle the bootloader out of gas error on tip phase. #[db_test] -#[ignore] // This test is blocked by [PLA-50] as gas calculation are affected by the underflow. +#[ignore] // This test fails. async fn bootloader_tip_out_of_gas(connection_pool: ConnectionPool) { let mut alice = Account::random(); let mut tester = Tester::new(connection_pool); - tester.genesis().await; + tester.genesis(); tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); @@ -317,7 +340,7 @@ async fn bootloader_tip_out_of_gas(connection_pool: ConnectionPool) { // Just a bit below the gas used for the previous batch execution should be fine to execute the tx // but not enough to execute the block tip. tester.set_config(TestConfig { - reexecute_each_tx: false, + save_call_traces: false, vm_gas_limit: Some(vm_block_res.full_result.gas_used - 10), max_allowed_tx_gas_limit: u32::MAX, validation_computational_gas_limit: u32::MAX, @@ -326,11 +349,7 @@ async fn bootloader_tip_out_of_gas(connection_pool: ConnectionPool) { let second_executor = tester.create_batch_executor(); let res = second_executor.execute_tx(alice.execute()); - assert!(res.tx_result.is_ok()); - assert_matches!( - res.bootloader_dry_run_result, - Some(Err(TxRevertReason::BootloaderOutOfGas)) - ); + assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForBlockTip); second_executor.finish_batch(); } diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index 6f97af77a872..a2f736b0ca16 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -1,7 +1,7 @@ //! Testing harness for the batch executor. //! Contains helper functionality to initialize test context and perform tests without too much boilerplate. -use crate::genesis::chain_schema_genesis; +use crate::genesis::create_genesis_block; use crate::state_keeper::{ batch_executor::BatchExecutorHandle, io::L1BatchParams, @@ -42,7 +42,7 @@ const CHAIN_ID: L2ChainId = L2ChainId(270); /// Has sensible defaults for most tests, each of which can be overridden. #[derive(Debug)] pub(super) struct TestConfig { - pub(super) reexecute_each_tx: bool, + pub(super) save_call_traces: bool, pub(super) vm_gas_limit: Option, pub(super) max_allowed_tx_gas_limit: u32, pub(super) validation_computational_gas_limit: u32, @@ -54,8 +54,8 @@ impl TestConfig { let config = ZkSyncConfig::from_env(); Self { - reexecute_each_tx: true, vm_gas_limit: None, + save_call_traces: false, max_allowed_tx_gas_limit: config.chain.state_keeper.max_allowed_l2_tx_gas_limit, validation_computational_gas_limit: config .chain @@ -112,7 +112,7 @@ impl Tester { // We don't use the builder because it would require us to clone the `ConnectionPool`, which is forbidden // for the test pool (see the doc-comment on `TestPool` for details). BatchExecutorHandle::new( - self.config.reexecute_each_tx, + self.config.save_call_traces, self.config.max_allowed_tx_gas_limit.into(), self.config.validation_computational_gas_limit, secondary_storage, @@ -153,17 +153,15 @@ impl Tester { } /// Performs the genesis in the storage. - pub(super) async fn genesis(&self) { + pub(super) fn genesis(&self) { let mut storage = self.pool.access_storage_blocking(); if storage.blocks_dal().is_genesis_needed() { - let chain_id = H256::from_low_u64_be(CHAIN_ID.0 as u64); - chain_schema_genesis( + create_genesis_block( &mut storage, self.fee_account, - chain_id, + CHAIN_ID, BASE_SYSTEM_CONTRACTS.clone(), - ) - .await; + ); } } @@ -270,7 +268,7 @@ impl Account { priority_queue_type: PriorityQueueType::Deque, eth_hash: H256::random(), eth_block: 1, - gas_per_pubdata_limit: U256::from(1_000_000), + gas_per_pubdata_limit: U256::from(800), to_mint: gas_limit * max_fee_per_gas + execute.value, refund_recipient: self.address(), }; diff --git a/core/bin/zksync_core/src/state_keeper/extractors.rs b/core/bin/zksync_core/src/state_keeper/extractors.rs index b2ad2851d6f9..7f7587b91c6b 100644 --- a/core/bin/zksync_core/src/state_keeper/extractors.rs +++ b/core/bin/zksync_core/src/state_keeper/extractors.rs @@ -240,10 +240,6 @@ pub(crate) fn wait_for_l1_batch_state_root_unchecked( number.0, stage_started_at.elapsed() ); - metrics::histogram!( - "server.state_keeper.wait_for_prev_hash_time", - stage_started_at.elapsed() - ); return h256_to_u256(root); } diff --git a/core/bin/zksync_core/src/state_keeper/io/common.rs b/core/bin/zksync_core/src/state_keeper/io/common.rs index e52dee78e86f..a725d05ec570 100644 --- a/core/bin/zksync_core/src/state_keeper/io/common.rs +++ b/core/bin/zksync_core/src/state_keeper/io/common.rs @@ -5,10 +5,13 @@ use vm::{ zk_evm::block_properties::BlockProperties, }; use zksync_contracts::BaseSystemContracts; +use zksync_dal::StorageProcessor; use zksync_types::{Address, L1BatchNumber, U256, ZKPORTER_IS_AVAILABLE}; use zksync_utils::h256_to_u256; -use super::L1BatchParams; +use crate::state_keeper::extractors; + +use super::{L1BatchParams, PendingBatchData}; #[derive(Debug)] pub(crate) struct StateKeeperStats { @@ -62,3 +65,51 @@ pub(crate) fn poll_until Option>( } None } + +/// Loads the pending L1 block data from the database. +pub(crate) fn load_pending_batch( + storage: &mut StorageProcessor<'_>, + current_l1_batch_number: L1BatchNumber, + fee_account: Address, +) -> Option { + // If pending miniblock doesn't exist, it means that there is no unsynced state (i.e. no transaction + // were executed after the last sealed batch). + let pending_miniblock_number = { + let (_, last_miniblock_number_included_in_l1_batch) = storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(current_l1_batch_number - 1) + .unwrap(); + last_miniblock_number_included_in_l1_batch + 1 + }; + let pending_miniblock_header = storage + .blocks_dal() + .get_miniblock_header(pending_miniblock_number)?; + + vlog::info!("Getting previous batch hash"); + let previous_l1_batch_hash = + extractors::wait_for_prev_l1_batch_state_root_unchecked(storage, current_l1_batch_number); + + let base_system_contracts = storage.storage_dal().get_base_system_contracts( + pending_miniblock_header + .base_system_contracts_hashes + .bootloader, + pending_miniblock_header + .base_system_contracts_hashes + .default_aa, + ); + + vlog::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); + let params = l1_batch_params( + current_l1_batch_number, + fee_account, + pending_miniblock_header.timestamp, + previous_l1_batch_hash, + pending_miniblock_header.l1_gas_price, + pending_miniblock_header.l2_fair_gas_price, + base_system_contracts, + ); + + let txs = storage.transactions_dal().get_transactions_to_reexecute(); + + Some(PendingBatchData { params, txs }) +} diff --git a/core/bin/zksync_core/src/state_keeper/io/mempool.rs b/core/bin/zksync_core/src/state_keeper/io/mempool.rs index ee439b7ead28..351ef5bbd621 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mempool.rs @@ -1,35 +1,41 @@ -use std::sync::Arc; -use std::time::Duration; -use std::time::Instant; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; -use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; -use vm::vm_with_bootloader::DerivedBlockContext; -use vm::VmBlockResult; +use vm::{ + vm_with_bootloader::{derive_base_fee_and_gas_per_pubdata, DerivedBlockContext}, + VmBlockResult, +}; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; -use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_eth_client::EthInterface; +use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction}; use zksync_utils::time::millis_since_epoch; -use crate::gas_adjuster::GasAdjuster; -use crate::state_keeper::{ - extractors, - io::{ - common::{l1_batch_params, poll_until, StateKeeperStats}, - seal_logic::{seal_l1_batch_impl, seal_miniblock_impl}, - L1BatchParams, PendingBatchData, StateKeeperIO, +use crate::state_keeper::mempool_actor::l2_tx_filter; +use crate::{ + l1_gas_price::L1GasPriceProvider, + state_keeper::{ + extractors, + io::{ + common::{l1_batch_params, poll_until, StateKeeperStats}, + seal_logic::{seal_l1_batch_impl, seal_miniblock_impl}, + L1BatchParams, PendingBatchData, StateKeeperIO, + }, + updates::UpdatesManager, + MempoolGuard, }, - updates::UpdatesManager, - MempoolGuard, }; +use super::common::load_pending_batch; + /// Mempool-based IO for the state keeper. /// Receives transactions from the database through the mempool filtering logic. /// Decides which batch parameters should be used for the new batch. /// This is an IO for the main server application. #[derive(Debug)] -pub(crate) struct MempoolIO { +pub(crate) struct MempoolIO { mempool: MempoolGuard, pool: ConnectionPool, filter: L2TxFilter, @@ -43,12 +49,14 @@ pub(crate) struct MempoolIO { statistics: StateKeeperStats, // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. - gas_adjuster: Arc>, + l1_gas_price_provider: Arc, base_system_contracts: BaseSystemContracts, } -impl StateKeeperIO for MempoolIO { +impl StateKeeperIO + for MempoolIO +{ fn current_l1_batch_number(&self) -> L1BatchNumber { self.current_l1_batch_number } @@ -60,49 +68,15 @@ impl StateKeeperIO fo fn load_pending_batch(&mut self) -> Option { let mut storage = self.pool.access_storage_blocking(); - // If pending miniblock doesn't exist, it means that there is no unsynced state (i.e. no transaction - // were executed after the last sealed batch). - let pending_miniblock_number = self.pending_miniblock_number(&mut storage); - let pending_miniblock_header = storage - .blocks_dal() - .get_miniblock_header(pending_miniblock_number)?; - - vlog::info!("getting previous block hash"); - let previous_l1_batch_hash = extractors::wait_for_prev_l1_batch_state_root_unchecked( - &mut storage, - self.current_l1_batch_number, - ); - - let base_system_contracts = storage.storage_dal().get_base_system_contracts( - pending_miniblock_header - .base_system_contracts_hashes - .bootloader, - pending_miniblock_header - .base_system_contracts_hashes - .default_aa, - ); - - vlog::info!("previous_l1_batch_hash: {}", previous_l1_batch_hash); - let params = l1_batch_params( - self.current_l1_batch_number, - self.fee_account, - pending_miniblock_header.timestamp, - previous_l1_batch_hash, - pending_miniblock_header.l1_gas_price, - pending_miniblock_header.l2_fair_gas_price, - base_system_contracts, - ); - - let txs = storage.transactions_dal().get_transactions_to_reexecute(); - + let PendingBatchData { params, txs } = + load_pending_batch(&mut storage, self.current_l1_batch_number, self.fee_account)?; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. - let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( - pending_miniblock_header.l1_gas_price, - pending_miniblock_header.l2_fair_gas_price, - ); + let context = params.context_mode.inner_block_context().context; + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(context.l1_gas_price, context.fair_l2_gas_price); self.filter = L2TxFilter { - l1_gas_price: pending_miniblock_header.l1_gas_price, + l1_gas_price: context.l1_gas_price, fee_per_gas: base_fee, gas_per_pubdata: gas_per_pubdata as u32, }; @@ -116,16 +90,23 @@ impl StateKeeperIO fo poll_until(self.delay_interval, max_wait, || { // We create a new filter each time, since parameters may change and a previously // ignored transaction in the mempool may be scheduled for the execution. - self.filter = self.gas_adjuster.l2_tx_filter(self.fair_l2_gas_price); + self.filter = l2_tx_filter(self.l1_gas_price_provider.as_ref(), self.fair_l2_gas_price); self.mempool.has_next(&self.filter).then(|| { // We only need to get the root hash when we're certain that we have a new transaction. vlog::info!("getting previous block hash"); let previous_l1_batch_hash = { let mut storage = self.pool.access_storage_blocking(); - extractors::wait_for_prev_l1_batch_state_root_unchecked( + + let stage_started_at: Instant = Instant::now(); + let hash = extractors::wait_for_prev_l1_batch_state_root_unchecked( &mut storage, self.current_l1_batch_number, - ) + ); + metrics::histogram!( + "server.state_keeper.wait_for_prev_hash_time", + stage_started_at.elapsed() + ); + hash }; vlog::info!("previous_l1_batch_hash: {}", previous_l1_batch_hash); vlog::info!( @@ -227,7 +208,6 @@ impl StateKeeperIO fo self.current_miniblock_number, self.current_l1_batch_number, &mut self.statistics, - self.fee_account, &mut storage, block_result, updates_manager, @@ -238,14 +218,14 @@ impl StateKeeperIO fo } } -impl MempoolIO { +impl MempoolIO { pub(crate) fn new( mempool: MempoolGuard, pool: ConnectionPool, fee_account: Address, fair_l2_gas_price: u64, delay_interval: Duration, - gas_adjuster: Arc>, + l1_gas_price_provider: Arc, base_system_contracts_hashes: BaseSystemContractsHashes, ) -> Self { let mut storage = pool.access_storage_blocking(); @@ -270,16 +250,16 @@ impl MempoolIO { fair_l2_gas_price, delay_interval, statistics: StateKeeperStats { num_contracts }, - gas_adjuster, + l1_gas_price_provider, base_system_contracts, } } +} - fn pending_miniblock_number(&self, storage: &mut StorageProcessor<'_>) -> MiniblockNumber { - let (_, last_miniblock_number_included_in_l1_batch) = storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(self.current_l1_batch_number - 1) - .unwrap(); - last_miniblock_number_included_in_l1_batch + 1 +/// Getters reqiored for testing the MempoolIO. +#[cfg(test)] +impl MempoolIO { + pub(super) fn filter(&self) -> &L2TxFilter { + &self.filter } } diff --git a/core/bin/zksync_core/src/state_keeper/io/mod.rs b/core/bin/zksync_core/src/state_keeper/io/mod.rs index 1b567e123012..a8eb6ac6ffb8 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mod.rs @@ -15,9 +15,12 @@ pub(crate) mod common; pub(crate) mod mempool; pub(crate) mod seal_logic; +#[cfg(test)] +mod tests; + /// System parameters for L1 batch. /// It includes system params such as Basic System Contracts and zkPorter configuration -/// And l1batch-specific parameters like timestamp, number, etc. +/// and l1batch-specific parameters like timestamp, number, etc. #[derive(Debug, Clone)] pub struct L1BatchParams { pub context_mode: BlockContextMode, @@ -57,7 +60,7 @@ pub trait StateKeeperIO: 'static + std::fmt::Debug + Send { /// Returns the data required to initialize the VM for the next batch. fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option; /// Blocks for up to `max_wait` until the parameters for the next miniblock are available. - /// Right now it's only a timetamp. + /// Right now it's only a timestamp. fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option; /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. diff --git a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs index 3fbe18360a39..b30b496c40ad 100644 --- a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs @@ -1,7 +1,7 @@ //! This module is a source-of-truth on what is expected to be done when sealing a block. //! It contains the logic of the block sealing, which is used by both the mempool-based and external node IO. -use std::time::Instant; +use std::time::{Duration, Instant}; use vm::vm_with_bootloader::BlockContextMode; use vm::vm_with_bootloader::DerivedBlockContext; @@ -12,7 +12,7 @@ use zksync_types::{ block::MiniblockHeader, event::{extract_added_tokens, extract_long_l2_to_l1_messages}, zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - Address, L1BatchNumber, MiniblockNumber, + L1BatchNumber, MiniblockNumber, }; use zksync_utils::{miniblock_hash, time::millis_since_epoch}; @@ -21,12 +21,10 @@ use crate::state_keeper::{extractors, io::common::StateKeeperStats, updates::Upd /// Persists an L1 batch in the storage. /// This action includes a creation of an empty "fictive" miniblock that contains the events /// generated during the bootloader "tip phase". -#[allow(clippy::too_many_arguments)] pub(crate) fn seal_l1_batch_impl( current_miniblock_number: MiniblockNumber, current_l1_batch_number: L1BatchNumber, statistics: &mut StateKeeperStats, - fee_account: Address, storage: &mut StorageProcessor<'_>, block_result: VmBlockResult, mut updates_manager: UpdatesManager, @@ -48,7 +46,7 @@ pub(crate) fn seal_l1_batch_impl( "VM must not revert when finalizing block. Revert reason: {:?}", full_result.revert_reason ); - track_l1_batch_execution_stage("vm_finalization", &mut stage_started_at); + track_l1_batch_execution_stage("vm_finalization", &mut stage_started_at, None); updates_manager.extend_from_fictive_transaction(block_tip_result.logs); // Seal fictive miniblock with last events and storage logs. @@ -60,7 +58,7 @@ pub(crate) fn seal_l1_batch_impl( &updates_manager, true, ); - track_l1_batch_execution_stage("fictive_miniblock", &mut stage_started_at); + track_l1_batch_execution_stage("fictive_miniblock", &mut stage_started_at, None); let (_, deduped_log_queries) = sort_storage_access_queries( full_result @@ -68,7 +66,11 @@ pub(crate) fn seal_l1_batch_impl( .iter() .map(|log| &log.log_query), ); - track_l1_batch_execution_stage("log_deduplication", &mut stage_started_at); + track_l1_batch_execution_stage( + "log_deduplication", + &mut stage_started_at, + Some(deduped_log_queries.len()), + ); let (l1_tx_count, l2_tx_count) = extractors::l1_l2_tx_count(&updates_manager.l1_batch.executed_transactions); @@ -94,7 +96,7 @@ pub(crate) fn seal_l1_batch_impl( number: current_l1_batch_number, is_finished: true, timestamp: block_context.context.block_timestamp, - fee_account_address: fee_account, + fee_account_address: block_context.context.operator_address, priority_ops_onchain_data: updates_manager.l1_batch.priority_ops_onchain_data.clone(), l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, @@ -115,12 +117,16 @@ pub(crate) fn seal_l1_batch_impl( transaction .blocks_dal() .insert_l1_batch(l1_batch, updates_manager.l1_batch.l1_gas_count); - track_l1_batch_execution_stage("insert_l1_batch_header", &mut stage_started_at); + track_l1_batch_execution_stage("insert_l1_batch_header", &mut stage_started_at, None); transaction .blocks_dal() .mark_miniblocks_as_executed_in_l1_batch(current_l1_batch_number); - track_l1_batch_execution_stage("set_l1_batch_number_for_miniblocks", &mut stage_started_at); + track_l1_batch_execution_stage( + "set_l1_batch_number_for_miniblocks", + &mut stage_started_at, + None, + ); transaction .transactions_dal() @@ -128,12 +134,11 @@ pub(crate) fn seal_l1_batch_impl( current_l1_batch_number, &updates_manager.l1_batch.executed_transactions, ); - track_l1_batch_execution_stage("mark_txs_as_executed_in_l1_batch", &mut stage_started_at); - - transaction - .storage_logs_dedup_dal() - .insert_storage_logs(current_l1_batch_number, &deduped_log_queries); - track_l1_batch_execution_stage("insert_storage_dedup_logs", &mut stage_started_at); + track_l1_batch_execution_stage( + "mark_txs_as_executed_in_l1_batch", + &mut stage_started_at, + None, + ); let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = deduped_log_queries .into_iter() @@ -141,15 +146,23 @@ pub(crate) fn seal_l1_batch_impl( transaction .storage_logs_dedup_dal() .insert_protective_reads(current_l1_batch_number, &protective_reads); - track_l1_batch_execution_stage("insert_protective_reads", &mut stage_started_at); + track_l1_batch_execution_stage( + "insert_protective_reads", + &mut stage_started_at, + Some(protective_reads.len()), + ); transaction .storage_logs_dedup_dal() .insert_initial_writes(current_l1_batch_number, &deduplicated_writes); - track_l1_batch_execution_stage("insert_initial_writes", &mut stage_started_at); + track_l1_batch_execution_stage( + "insert_initial_writes", + &mut stage_started_at, + Some(deduplicated_writes.len()), + ); transaction.commit_blocking(); - track_l1_batch_execution_stage("commit_l1_batch", &mut stage_started_at); + track_l1_batch_execution_stage("commit_l1_batch", &mut stage_started_at, None); let writes_metrics = updates_manager.storage_writes_deduplicator.metrics(); // Sanity check. @@ -234,7 +247,12 @@ pub(crate) fn seal_miniblock_impl( }; transaction.blocks_dal().insert_miniblock(miniblock_header); - track_miniblock_execution_stage("insert_miniblock_header", &mut stage_started_at); + track_miniblock_execution_stage( + "insert_miniblock_header", + &mut stage_started_at, + None, + is_fictive, + ); transaction .transactions_dal() @@ -243,7 +261,12 @@ pub(crate) fn seal_miniblock_impl( &updates_manager.miniblock.executed_transactions, updates_manager.base_fee_per_gas().into(), ); - track_miniblock_execution_stage("mark_transactions_in_miniblock", &mut stage_started_at); + track_miniblock_execution_stage( + "mark_transactions_in_miniblock", + &mut stage_started_at, + Some(updates_manager.miniblock.executed_transactions.len()), + is_fictive, + ); let storage_logs = extractors::log_queries_to_storage_logs( &updates_manager.miniblock.storage_logs, @@ -251,22 +274,39 @@ pub(crate) fn seal_miniblock_impl( is_fictive, ); let write_logs = extractors::write_logs_from_storage_logs(storage_logs); + let write_logs_len = write_logs.iter().flat_map(|(_, logs)| logs).count(); transaction .storage_logs_dal() .insert_storage_logs(current_miniblock_number, &write_logs); - track_miniblock_execution_stage("insert_storage_logs", &mut stage_started_at); + track_miniblock_execution_stage( + "insert_storage_logs", + &mut stage_started_at, + Some(write_logs_len), + is_fictive, + ); let unique_updates = transaction.storage_dal().apply_storage_logs(&write_logs); - track_miniblock_execution_stage("apply_storage_logs", &mut stage_started_at); + track_miniblock_execution_stage( + "apply_storage_logs", + &mut stage_started_at, + Some(write_logs_len), + is_fictive, + ); let new_factory_deps = updates_manager.miniblock.new_factory_deps.clone(); + let new_factory_deps_len = new_factory_deps.iter().flat_map(|(_, deps)| deps).count(); if !new_factory_deps.is_empty() { transaction .storage_dal() .insert_factory_deps(current_miniblock_number, new_factory_deps); } - track_miniblock_execution_stage("insert_factory_deps", &mut stage_started_at); + track_miniblock_execution_stage( + "insert_factory_deps", + &mut stage_started_at, + Some(new_factory_deps_len), + is_fictive, + ); // Factory deps should be inserted before using `contracts_deployed_this_miniblock`. let deployed_contracts = @@ -274,35 +314,91 @@ pub(crate) fn seal_miniblock_impl( if !deployed_contracts.is_empty() { statistics.num_contracts += deployed_contracts.len() as u64; } + let deployed_contracts_len = deployed_contracts + .iter() + .flat_map(|(_, contracts)| contracts) + .count(); + track_miniblock_execution_stage( + "extract_contracts_deployed", + &mut stage_started_at, + Some(deployed_contracts_len), + is_fictive, + ); let added_tokens = extract_added_tokens(&updates_manager.miniblock.events); + track_miniblock_execution_stage( + "extract_added_tokens", + &mut stage_started_at, + Some(added_tokens.len()), + is_fictive, + ); + let added_tokens_len = added_tokens.len(); if !added_tokens.is_empty() { transaction.tokens_dal().add_tokens(added_tokens); } - track_miniblock_execution_stage("insert_tokens", &mut stage_started_at); + track_miniblock_execution_stage( + "insert_tokens", + &mut stage_started_at, + Some(added_tokens_len), + is_fictive, + ); let events_this_miniblock = extractors::extract_events_this_block( &updates_manager.miniblock.events, updates_manager, is_fictive, ); + + let events_this_miniblock_len = events_this_miniblock + .iter() + .flat_map(|(_, events)| events.iter()) + .count(); + + track_miniblock_execution_stage( + "extract_events", + &mut stage_started_at, + Some(events_this_miniblock_len), + is_fictive, + ); transaction .events_dal() .save_events(current_miniblock_number, events_this_miniblock); - track_miniblock_execution_stage("insert_events", &mut stage_started_at); + track_miniblock_execution_stage( + "insert_events", + &mut stage_started_at, + Some(events_this_miniblock_len), + is_fictive, + ); let l2_to_l1_logs_this_miniblock = extractors::extract_l2_to_l1_logs_this_block( &updates_manager.miniblock.l2_to_l1_logs, updates_manager, is_fictive, ); + + let l2_to_l1_logs_this_miniblock_len = l2_to_l1_logs_this_miniblock + .iter() + .flat_map(|(_, l2_to_l1_logs)| l2_to_l1_logs.iter()) + .count(); + + track_miniblock_execution_stage( + "extract_l2_to_l1_logs", + &mut stage_started_at, + Some(l2_to_l1_logs_this_miniblock_len), + is_fictive, + ); transaction .events_dal() .save_l2_to_l1_logs(current_miniblock_number, l2_to_l1_logs_this_miniblock); - track_miniblock_execution_stage("insert_l2_to_l1_logs", &mut stage_started_at); + track_miniblock_execution_stage( + "insert_l2_to_l1_logs", + &mut stage_started_at, + Some(l2_to_l1_logs_this_miniblock_len), + is_fictive, + ); transaction.commit_blocking(); - track_miniblock_execution_stage("commit_miniblock", &mut stage_started_at); + track_miniblock_execution_stage("commit_miniblock", &mut stage_started_at, None, is_fictive); metrics::histogram!( "server.state_keeper.miniblock.transactions_in_miniblock", @@ -336,6 +432,8 @@ pub(crate) fn seal_miniblock_impl( track_miniblock_execution_stage( "apply_miniblock_updates_to_l1_batch_updates_accumulator", &mut stage_started_at, + None, + is_fictive, ); } @@ -365,20 +463,66 @@ fn miniblock_assertions(updates_manager: &UpdatesManager, is_fictive: bool) { } } -fn track_l1_batch_execution_stage(stage: &'static str, stage_started_at: &mut Instant) { +fn track_l1_batch_execution_stage( + stage: &'static str, + stage_started_at: &mut Instant, + count: Option, +) { metrics::histogram!( "server.state_keeper.l1_batch.sealed_time_stage", stage_started_at.elapsed(), "stage" => stage ); + if let Some(count) = count { + metrics::histogram!( + "server.state_keeper.l1_batch.sealed_entity_count", + count as f64, + "stage" => stage + ); + metrics::histogram!( + "server.state_keeper.l1_batch.sealed_entity_per_unit", + stage_started_at.elapsed().div_f64(count as f64), + "stage" => stage + ); + } *stage_started_at = Instant::now(); } -fn track_miniblock_execution_stage(stage: &'static str, stage_started_at: &mut Instant) { +fn track_miniblock_execution_stage( + stage: &'static str, + stage_started_at: &mut Instant, + count: Option, + is_fictive: bool, +) { + if stage_started_at.elapsed() > Duration::from_millis(10) { + vlog::debug!( + "miniblock execution stage {} took {:?} with count {:?}", + stage, + stage_started_at.elapsed(), + count + ); + } metrics::histogram!( "server.state_keeper.miniblock.sealed_time_stage", stage_started_at.elapsed(), - "stage" => stage + "stage" => stage, + "is_fictive" => is_fictive.to_string(), ); + if let Some(count) = count { + metrics::histogram!( + "server.state_keeper.miniblock.sealed_entity_count", + count as f64, + "stage" => stage, + "is_fictive" => is_fictive.to_string(), + ); + if count > 0 { + metrics::histogram!( + "server.state_keeper.miniblock.sealed_entity_per_unit", + stage_started_at.elapsed().div_f64(count as f64), + "stage" => stage, + "is_fictive" => is_fictive.to_string(), + ); + } + } *stage_started_at = Instant::now(); } diff --git a/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs new file mode 100644 index 000000000000..98a689a22991 --- /dev/null +++ b/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs @@ -0,0 +1,114 @@ +use std::time::Duration; + +use db_test_macro::db_test; +use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; +use zksync_dal::ConnectionPool; +use zksync_mempool::L2TxFilter; + +use self::tester::Tester; +use crate::state_keeper::{io::StateKeeperIO, mempool_actor::l2_tx_filter}; + +mod tester; + +/// Ensure that MempoolIO.filter is correctly initialized right after mempool initialization. +#[db_test] +async fn test_filter_initialization(connection_pool: ConnectionPool) { + let tester = Tester::new(); + + // Genesis is needed for proper mempool initialization. + tester.genesis(&connection_pool); + + let (mempool, _) = tester + .create_test_mempool_io(connection_pool) + .await + .unwrap(); + + // Upon initialization, the filter should be set to the default values. + assert_eq!(mempool.filter(), &L2TxFilter::default()); +} + +/// Ensure that MempoolIO.filter is modified correctly if there is a pending batch upon mempool initialization. +#[db_test] +async fn test_filter_with_pending_batch(connection_pool: ConnectionPool) { + let tester = Tester::new(); + + tester.genesis(&connection_pool); + + // Insert a sealed batch so there will be a prev_l1_batch_state_root. + // These gas values are random and don't matter for filter calculation as there will be a + // pending batch the filter will be based off of. + tester.insert_miniblock(&connection_pool, 1, 5, 55, 555); + + tester.insert_sealed_batch(&connection_pool, 1); + + // Inserting a pending miniblock that isn't included in a sealed batch means there is a pending batch. + // The gas values are randomly chosen but so affect filter values calculation. + let (give_l1_gas_price, give_fair_l2_gas_price) = (100, 1000); + tester.insert_miniblock( + &connection_pool, + 2, + 10, + give_l1_gas_price, + give_fair_l2_gas_price, + ); + + let (mut mempool, _) = tester + .create_test_mempool_io(connection_pool) + .await + .unwrap(); + + // Before the mempool knows there is a pending batch, the filter is still set to the default values. + assert_eq!(mempool.filter(), &L2TxFilter::default()); + + mempool.load_pending_batch(); + + let (want_base_fee, want_gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(give_l1_gas_price, give_fair_l2_gas_price); + + let want_filter = L2TxFilter { + l1_gas_price: give_l1_gas_price, + fee_per_gas: want_base_fee, + gas_per_pubdata: want_gas_per_pubdata as u32, + }; + + assert_eq!(mempool.filter(), &want_filter); +} + +/// Ensure that MempoolIO.filter is modified correctly if there is no pending batch. +#[db_test] +async fn test_filter_with_no_pending_batch(connection_pool: ConnectionPool) { + let tester = Tester::new(); + tester.genesis(&connection_pool); + + // Insert a sealed batch so there will be a prev_l1_batch_state_root. + // These gas values are random and don't matter for filter calculation. + tester.insert_miniblock(&connection_pool, 1, 5, 55, 555); + tester.insert_sealed_batch(&connection_pool, 1); + + // Create a copy of the tx filter that the mempool will use. + let want_filter = l2_tx_filter( + &tester.create_gas_adjuster().await, + tester.fair_l2_gas_price(), + ); + + // Create a mempool without pending batch and ensure that filter is not initialized just yet. + let (mut mempool, mut guard) = tester + .create_test_mempool_io(connection_pool) + .await + .unwrap(); + assert_eq!(mempool.filter(), &L2TxFilter::default()); + + // Insert a transaction that matches the expected filter. + tester.insert_tx( + &mut guard, + want_filter.fee_per_gas, + want_filter.gas_per_pubdata, + ); + + // Now, given that there is a transaction matching the expected filter, waiting for the new batch params + // should succeed and initialize the filter. + mempool + .wait_for_new_batch_params(Duration::from_secs(10)) + .expect("No batch params in the test mempool"); + assert_eq!(mempool.filter(), &want_filter); +} diff --git a/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs new file mode 100644 index 000000000000..7841cef8e907 --- /dev/null +++ b/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs @@ -0,0 +1,174 @@ +//! Testing harness for the IO. + +use crate::genesis::create_genesis_block; +use crate::l1_gas_price::GasAdjuster; +use crate::state_keeper::{MempoolGuard, MempoolIO}; +use std::{ + sync::{Arc, Mutex}, + time::Duration, +}; +use zksync_config::GasAdjusterConfig; +use zksync_contracts::BaseSystemContracts; +use zksync_dal::ConnectionPool; +use zksync_eth_client::{clients::mock::MockEthereum, types::Error}; +use zksync_mempool::MempoolStore; +use zksync_types::fee::Fee; +use zksync_types::l2::L2Tx; +use zksync_types::{ + block::{L1BatchHeader, MiniblockHeader}, + Address, L1BatchNumber, MiniblockNumber, PriorityOpId, H256, +}; +use zksync_types::{L2ChainId, Nonce}; + +#[derive(Debug)] +pub(super) struct Tester { + base_system_contracts: BaseSystemContracts, +} + +impl Tester { + pub(super) fn new() -> Self { + let base_system_contracts = BaseSystemContracts::load_from_disk(); + Self { + base_system_contracts, + } + } + + pub(super) async fn create_gas_adjuster(&self) -> GasAdjuster { + let eth_client = + MockEthereum::default().with_fee_history(vec![0, 4, 6, 8, 7, 5, 5, 8, 10, 9]); + + let gas_adjuster_config = GasAdjusterConfig { + default_priority_fee_per_gas: 10, + max_base_fee_samples: 10, + pricing_formula_parameter_a: 1.0, + pricing_formula_parameter_b: 1.0, + internal_l1_pricing_multiplier: 1.0, + internal_enforced_l1_gas_price: None, + poll_period: 10, + }; + + GasAdjuster::new(eth_client, gas_adjuster_config) + .await + .unwrap() + } + + // Constant value to be used both in tests and inside of the IO. + pub(super) fn fair_l2_gas_price(&self) -> u64 { + 100 + } + + pub(super) async fn create_test_mempool_io( + &self, + pool: ConnectionPool, + ) -> Result<(MempoolIO>, MempoolGuard), Error> { + let gas_adjuster = Arc::new(self.create_gas_adjuster().await); + + let mempool = MempoolGuard(Arc::new(Mutex::new(MempoolStore::new( + PriorityOpId(0), + 100, + )))); + + Ok(( + MempoolIO::new( + mempool.clone(), + pool, + Address::default(), + self.fair_l2_gas_price(), + Duration::from_secs(1), + gas_adjuster, + self.base_system_contracts.hashes(), + ), + mempool, + )) + } + + pub(super) fn genesis(&self, pool: &ConnectionPool) { + let mut storage = pool.access_storage_blocking(); + if storage.blocks_dal().is_genesis_needed() { + create_genesis_block( + &mut storage, + Address::repeat_byte(0x01), + L2ChainId(270), + self.base_system_contracts.clone(), + ); + } + } + + pub(super) fn insert_miniblock( + &self, + pool: &ConnectionPool, + number: u32, + base_fee_per_gas: u64, + l1_gas_price: u64, + l2_fair_gas_price: u64, + ) { + let mut storage = pool.access_storage_blocking(); + storage.blocks_dal().insert_miniblock(MiniblockHeader { + number: MiniblockNumber(number), + timestamp: 0, + hash: Default::default(), + l1_tx_count: 0, + l2_tx_count: 0, + base_fee_per_gas, + l1_gas_price, + l2_fair_gas_price, + base_system_contracts_hashes: self.base_system_contracts.hashes(), + }); + } + + pub(super) fn insert_sealed_batch(&self, pool: &ConnectionPool, number: u32) { + let mut batch_header = L1BatchHeader::new( + L1BatchNumber(number), + 0, + Address::default(), + self.base_system_contracts.hashes(), + ); + batch_header.is_finished = true; + + let mut storage = pool.access_storage_blocking(); + + storage + .blocks_dal() + .insert_l1_batch(batch_header.clone(), Default::default()); + + storage + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(batch_header.number); + + storage + .blocks_dal() + .set_l1_batch_hash(batch_header.number, H256::default()); + } + + pub(super) fn insert_tx( + &self, + guard: &mut MempoolGuard, + fee_per_gas: u64, + gas_per_pubdata: u32, + ) { + let fee = Fee { + gas_limit: 1000u64.into(), + max_fee_per_gas: fee_per_gas.into(), + max_priority_fee_per_gas: 0u64.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }; + let mut tx = L2Tx::new_signed( + Address::random(), + vec![], + Nonce(0), + fee, + Default::default(), + L2ChainId(271), + &H256::repeat_byte(0x11u8), + None, + Default::default(), + ) + .unwrap(); + // Input means all transaction data (NOT calldata, but all tx fields) that came from the API. + // This input will be used for the derivation of the tx hash, so put some random to it to be sure + // that the transaction hash is unique. + tx.set_input(H256::random().0.to_vec(), H256::random()); + + guard.insert(vec![tx.into()], Default::default()); + } +} diff --git a/core/bin/zksync_core/src/state_keeper/keeper.rs b/core/bin/zksync_core/src/state_keeper/keeper.rs index e8340c6fdd3e..c6c2d348037a 100644 --- a/core/bin/zksync_core/src/state_keeper/keeper.rs +++ b/core/bin/zksync_core/src/state_keeper/keeper.rs @@ -1,13 +1,11 @@ -use std::time::Duration; +use std::time::{Duration, Instant}; use tokio::sync::watch::Receiver; -use vm::transaction_data::TransactionData; -use vm::TxRevertReason; +use vm::{transaction_data::TransactionData, TxRevertReason}; use zksync_types::{ storage_writes_deduplicator::StorageWritesDeduplicator, MiniblockNumber, Transaction, }; -use zksync_utils::time::millis_since_epoch; use crate::gas_tracker::gas_count_from_writes; use crate::state_keeper::{ @@ -112,7 +110,7 @@ impl ZkSyncStateKeeper { ); let mut batch_executor = self.batch_executor_base.init_batch(l1_batch_params.clone()); - self.restore_state(&batch_executor, &mut updates_manager, txs_to_reexecute); + self.restore_state(&batch_executor, &mut updates_manager, txs_to_reexecute)?; loop { self.check_if_cancelled()?; @@ -175,13 +173,16 @@ impl ZkSyncStateKeeper { /// Applies the "pending state" on the `UpdatesManager`. /// Pending state means transactions that were executed before the server restart. Before we continue processing the /// batch, we need to restore the state. We must ensure that every transaction is executed successfully. + /// + /// Additionally, it initialized the next miniblock timestamp. fn restore_state( &mut self, batch_executor: &BatchExecutorHandle, updates_manager: &mut UpdatesManager, txs_to_reexecute: Vec<(MiniblockNumber, Vec)>, - ) { - for (miniblock_number, txs) in txs_to_reexecute { + ) -> Result<(), Canceled> { + let miniblocks_count = txs_to_reexecute.len(); + for (idx, (miniblock_number, txs)) in txs_to_reexecute.into_iter().enumerate() { vlog::info!( "Starting to reexecute transactions from sealed miniblock {}", miniblock_number @@ -189,25 +190,30 @@ impl ZkSyncStateKeeper { for tx in txs { let result = batch_executor.execute_tx(tx.clone()); - if !result.success() { - let err = result.err().unwrap(); - panic!( - "Re-executing stored tx failed. Tx: {:?}. Err: {:?}", - tx, err - ) + let TxExecutionResult::Success { + tx_result, + tx_metrics, + compressed_bytecodes, + .. + } = result else { + panic!( + "Re-executing stored tx failed. Tx: {:?}. Err: {:?}", + tx, + result.err() + ); }; - let tx_execution_result = result.tx_result.unwrap(); - let tx_execution_status = tx_execution_result.status; let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, - } = result.tx_metrics.unwrap(); + } = tx_metrics; + + let exec_result_status = tx_result.status; updates_manager.extend_from_executed_transaction( &tx, - tx_execution_result, - result.compressed_bytecodes, + *tx_result, + compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, ); @@ -222,7 +228,7 @@ impl ZkSyncStateKeeper { self.io.current_l1_batch_number().0, updates_manager.miniblock.executed_transactions.len(), miniblock_number, - tx_execution_status, + exec_result_status, tx_l1_gas_this_tx, updates_manager.pending_l1_gas_count(), &tx_execution_metrics, @@ -230,12 +236,17 @@ impl ZkSyncStateKeeper { ); } - // For old miniblocks that we reexecute the correct timestamps are already persisted in the DB and won't be overwritten. - // However, `seal_miniblock` method of `UpdatesManager` takes the only parameter `new_miniblock_timstamp` - // that will be used as a timestamp for the next sealed miniblock. - // So, we should care about passing the correct timestamp for miniblock that comes after the pending batch. - updates_manager.seal_miniblock((millis_since_epoch() / 1000) as u64); + if idx == miniblocks_count - 1 { + // We've processed all the miniblocks, and right now we're initializing the next *actual* miniblock. + let new_timestamp = self.wait_for_new_miniblock_params()?; + updates_manager.seal_miniblock(new_timestamp); + } else { + // For all the blocks except the last one we pass 0 as a timestamp, since we don't expect it to be used + // anywhere. Using an obviously wrong value would make bugs easier to spot. + updates_manager.seal_miniblock(0); + } } + Ok(()) } fn process_l1_batch( @@ -256,39 +267,41 @@ impl ZkSyncStateKeeper { let new_timestamp = self.wait_for_new_miniblock_params()?; updates_manager.seal_miniblock(new_timestamp); } + let started_waiting = Instant::now(); let Some(tx) = self.io.wait_for_next_tx(POLL_WAIT_DURATION) else { + metrics::histogram!("server.state_keeper.waiting_for_tx", started_waiting.elapsed()); vlog::trace!("No new transactions. Waiting!"); continue; }; + metrics::histogram!( + "server.state_keeper.waiting_for_tx", + started_waiting.elapsed(), + ); let (seal_resolution, exec_result) = self.process_one_tx(batch_executor, updates_manager, &tx); match &seal_resolution { - SealResolution::NoSeal => { - let ExecutionMetricsForCriteria { - l1_gas: tx_l1_gas_this_tx, - execution_metrics: tx_execution_metrics, - .. - } = exec_result.tx_metrics.unwrap(); - updates_manager.extend_from_executed_transaction( - &tx, - exec_result.tx_result.unwrap(), - exec_result.compressed_bytecodes, - tx_l1_gas_this_tx, - tx_execution_metrics, - ); - } - SealResolution::IncludeAndSeal => { + SealResolution::NoSeal | SealResolution::IncludeAndSeal => { + let TxExecutionResult::Success { + tx_result, + tx_metrics, + compressed_bytecodes, + .. + } = exec_result else { + panic!( + "Tx inclusion seal resolution must be a result of a successful tx execution", + ); + }; let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, .. - } = exec_result.tx_metrics.unwrap(); + } = tx_metrics; updates_manager.extend_from_executed_transaction( &tx, - exec_result.tx_result.unwrap(), - exec_result.compressed_bytecodes, + *tx_result, + compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, ); @@ -323,16 +336,9 @@ impl ZkSyncStateKeeper { tx: &Transaction, ) -> (SealResolution, TxExecutionResult) { let exec_result = batch_executor.execute_tx(tx.clone()); - let TxExecutionResult { - tx_result, - bootloader_dry_run_result, - tx_metrics, - bootloader_dry_run_metrics, - .. - } = exec_result.clone(); - - match tx_result { - Err(TxRevertReason::BootloaderOutOfGas) => { + + match exec_result.clone() { + TxExecutionResult::BootloaderOutOfGasForTx => { metrics::increment_counter!( "server.tx_aggregation.reason", "criterion" => "bootloader_tx_out_of_gas", @@ -340,18 +346,42 @@ impl ZkSyncStateKeeper { ); (SealResolution::ExcludeAndSeal, exec_result) } - Err(rejection) => ( - SealResolution::Unexecutable(rejection.to_string()), - exec_result, - ), - Ok(tx_execution_result) => { - let tx_execution_status = tx_execution_result.status; + TxExecutionResult::BootloaderOutOfGasForBlockTip => { + metrics::increment_counter!( + "server.tx_aggregation.reason", + "criterion" => "bootloader_block_tip_failed", + "seal_resolution" => "exclude_and_seal", + ); + (SealResolution::ExcludeAndSeal, exec_result) + } + TxExecutionResult::RejectedByVm { rejection_reason } => match rejection_reason { + TxRevertReason::NotEnoughGasProvided => { + metrics::increment_counter!( + "server.tx_aggregation.reason", + "criterion" => "not_enough_gas_provided_to_start_tx", + "seal_resolution" => "exclude_and_seal", + ); + (SealResolution::ExcludeAndSeal, exec_result) + } + _ => ( + SealResolution::Unexecutable(rejection_reason.to_string()), + exec_result, + ), + }, + TxExecutionResult::Success { + tx_result, + tx_metrics, + bootloader_dry_run_metrics, + bootloader_dry_run_result, + .. + } => { + let tx_execution_status = tx_result.status; let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, - } = tx_metrics.unwrap(); + } = tx_metrics; - vlog::debug!( + vlog::trace!( "finished tx {:?} by {:?} (is_l1: {}) (#{} in l1 batch {}) (#{} in miniblock {}) \ status: {:?}. L1 gas spent: {:?}, total in l1 batch: {:?}, \ tx execution metrics: {:?}, block execution metrics: {:?}", @@ -369,29 +399,16 @@ impl ZkSyncStateKeeper { updates_manager.pending_execution_metrics() + tx_execution_metrics, ); - let bootloader_dry_run_result = - if let Ok(bootloader_dry_run_result) = bootloader_dry_run_result.unwrap() { - bootloader_dry_run_result - } else { - // Exclude and seal. - metrics::increment_counter!( - "server.tx_aggregation.reason", - "criterion" => "bootloader_block_tip_failed", - "seal_resolution" => "exclude_and_seal", - ); - return (SealResolution::ExcludeAndSeal, exec_result); - }; - let ExecutionMetricsForCriteria { l1_gas: finish_block_l1_gas, execution_metrics: finish_block_execution_metrics, .. - } = bootloader_dry_run_metrics.unwrap(); + } = bootloader_dry_run_metrics; let tx_data: TransactionData = tx.clone().into(); let encoding_len = tx_data.into_tokens().len(); - let logs_to_apply_iter = tx_execution_result + let logs_to_apply_iter = tx_result .result .logs .storage_logs diff --git a/core/bin/zksync_core/src/state_keeper/mempool_actor.rs b/core/bin/zksync_core/src/state_keeper/mempool_actor.rs index 27c6e35b0793..30d7a1597b2d 100644 --- a/core/bin/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/bin/zksync_core/src/state_keeper/mempool_actor.rs @@ -1,30 +1,49 @@ use super::types::MempoolGuard; -use crate::GasAdjuster; +use crate::l1_gas_price::L1GasPriceProvider; use std::sync::Arc; use std::time::Duration; use std::time::Instant; use tokio::sync::watch; +use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; use zksync_config::ZkSyncConfig; use zksync_dal::ConnectionPool; -use zksync_eth_client::clients::http_client::EthereumClient; +use zksync_mempool::L2TxFilter; + +/// Creates a mempool filter for L2 transactions based on the current L1 gas price. +/// The filter is used to filter out transactions from the mempool that do not cover expenses +/// to process them. +pub fn l2_tx_filter( + gas_price_provider: &G, + fair_l2_gas_price: u64, +) -> L2TxFilter { + let effective_gas_price = gas_price_provider.estimate_effective_gas_price(); + + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(effective_gas_price, fair_l2_gas_price); + L2TxFilter { + l1_gas_price: effective_gas_price, + fee_per_gas: base_fee, + gas_per_pubdata: gas_per_pubdata as u32, + } +} #[derive(Debug)] -pub struct MempoolFetcher { +pub struct MempoolFetcher { mempool: MempoolGuard, - gas_adjuster: Arc>, + l1_gas_price_provider: Arc, sync_interval: Duration, sync_batch_size: usize, } -impl MempoolFetcher { +impl MempoolFetcher { pub fn new( mempool: MempoolGuard, - gas_adjuster: Arc>, + l1_gas_price_provider: Arc, config: &ZkSyncConfig, ) -> Self { Self { mempool, - gas_adjuster, + l1_gas_price_provider, sync_interval: config.chain.mempool.sync_interval(), sync_batch_size: config.chain.mempool.sync_batch_size, } @@ -39,7 +58,7 @@ impl MempoolFetcher { stop_receiver: watch::Receiver, ) { { - let mut storage = pool.access_storage().await; + let mut storage = pool.access_storage_blocking(); if remove_stuck_txs { let removed_txs = storage .transactions_dal() @@ -55,9 +74,9 @@ impl MempoolFetcher { break; } let started_at = Instant::now(); - let mut storage = pool.access_storage().await; + let mut storage = pool.access_storage_blocking(); let mempool_info = self.mempool.get_mempool_info(); - let l2_tx_filter = self.gas_adjuster.l2_tx_filter(fair_l2_gas_price); + let l2_tx_filter = l2_tx_filter(self.l1_gas_price_provider.as_ref(), fair_l2_gas_price); let (transactions, nonces) = storage.transactions_dal().sync_mempool( mempool_info.stashed_accounts, diff --git a/core/bin/zksync_core/src/state_keeper/mod.rs b/core/bin/zksync_core/src/state_keeper/mod.rs index 11d8f51799b5..bec3aed24e48 100644 --- a/core/bin/zksync_core/src/state_keeper/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/mod.rs @@ -6,17 +6,16 @@ use zksync_config::constants::MAX_TXS_IN_BLOCK; use zksync_config::ZkSyncConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::ConnectionPool; -use zksync_eth_client::EthInterface; use self::batch_executor::MainBatchExecutorBuilder; use self::io::MempoolIO; -use crate::gas_adjuster::GasAdjuster; +use crate::l1_gas_price::L1GasPriceProvider; use crate::state_keeper::seal_criteria::SealManager; pub use self::{keeper::ZkSyncStateKeeper, types::MempoolGuard}; -pub(crate) mod batch_executor; -mod extractors; +pub mod batch_executor; +pub(crate) mod extractors; pub(crate) mod io; mod keeper; pub(crate) mod mempool_actor; @@ -26,15 +25,15 @@ mod tests; pub(crate) mod types; pub(crate) mod updates; -pub(crate) fn start_state_keeper( +pub(crate) fn start_state_keeper( config: &ZkSyncConfig, pool: &ConnectionPool, mempool: MempoolGuard, - gas_adjuster: Arc>, + l1_gas_price_provider: Arc, stop_receiver: Receiver, ) -> ZkSyncStateKeeper where - E: EthInterface + 'static + std::fmt::Debug + Send + Sync, + G: L1GasPriceProvider + 'static + std::fmt::Debug + Send + Sync, { assert!( config.chain.state_keeper.transaction_slots <= MAX_TXS_IN_BLOCK, @@ -44,8 +43,8 @@ where let batch_executor_base = MainBatchExecutorBuilder::new( config.db.state_keeper_db_path.clone(), pool.clone(), - config.chain.state_keeper.reexecute_each_tx, config.chain.state_keeper.max_allowed_l2_tx_gas_limit.into(), + config.chain.state_keeper.save_call_traces, config.chain.state_keeper.validation_computational_gas_limit, ); let io = MempoolIO::new( @@ -54,7 +53,7 @@ where config.chain.state_keeper.fee_account_addr, config.chain.state_keeper.fair_l2_gas_price, config.chain.operations_manager.delay_interval(), - gas_adjuster, + l1_gas_price_provider, BaseSystemContractsHashes { bootloader: config.chain.state_keeper.bootloader_hash, default_aa: config.chain.state_keeper.default_aa_hash, diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs new file mode 100644 index 000000000000..77efc145d32d --- /dev/null +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs @@ -0,0 +1,130 @@ +//! This module represents the conditional sealer, which can decide whether the batch +//! should be sealed after executing a particular transaction. +//! It is used on the main node to decide when the batch should be sealed (as opposed to the external node, +//! which unconditionally follows the instructions from the main node). + +use zksync_config::configs::chain::StateKeeperConfig; +use zksync_types::{ + block::BlockGasCount, + tx::{tx_execution_info::DeduplicatedWritesMetrics, ExecutionMetrics}, +}; + +use super::{criteria, SealCriterion, SealResolution}; + +#[derive(Debug)] +pub struct ConditionalSealer { + config: StateKeeperConfig, + /// Primary sealers set that is used to check if batch should be sealed after executing a transaction. + sealers: Vec>, +} + +impl ConditionalSealer { + pub(crate) fn new(config: StateKeeperConfig) -> Self { + let sealers: Vec> = Self::get_default_sealers(); + + Self { config, sealers } + } + + #[cfg(test)] + pub(crate) fn with_sealers( + config: StateKeeperConfig, + sealers: Vec>, + ) -> Self { + Self { config, sealers } + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn should_seal_l1_batch( + &self, + l1_batch_number: u32, + block_open_timestamp_ms: u128, + tx_count: usize, + block_execution_metrics: ExecutionMetrics, + tx_execution_metrics: ExecutionMetrics, + block_gas_count: BlockGasCount, + tx_gas_count: BlockGasCount, + block_included_txs_size: usize, + tx_size: usize, + block_writes_metrics: DeduplicatedWritesMetrics, + tx_writes_metrics: DeduplicatedWritesMetrics, + ) -> SealResolution { + let mut final_seal_resolution = SealResolution::NoSeal; + for sealer in &self.sealers { + let seal_resolution = sealer.should_seal( + &self.config, + block_open_timestamp_ms, + tx_count, + block_execution_metrics, + tx_execution_metrics, + block_gas_count, + tx_gas_count, + block_included_txs_size, + tx_size, + block_writes_metrics, + tx_writes_metrics, + ); + match seal_resolution { + SealResolution::IncludeAndSeal => { + vlog::debug!( + "Seal block with resolution: IncludeAndSeal {} {} block: {:?}", + l1_batch_number, + sealer.prom_criterion_name(), + block_execution_metrics + ); + metrics::counter!( + "server.tx_aggregation.reason", + 1, + "criterion" => sealer.prom_criterion_name(), + "seal_resolution" => "include_and_seal", + ); + } + SealResolution::ExcludeAndSeal => { + vlog::debug!( + "Seal block with resolution: ExcludeAndSeal {} {} block: {:?}", + l1_batch_number, + sealer.prom_criterion_name(), + block_execution_metrics + ); + metrics::counter!( + "server.tx_aggregation.reason", + 1, + "criterion" => sealer.prom_criterion_name(), + "seal_resolution" => "exclude_and_seal", + ); + } + SealResolution::Unexecutable(_) => { + vlog::debug!( + "Unexecutable {} {} block: {:?}", + l1_batch_number, + sealer.prom_criterion_name(), + block_execution_metrics + ); + metrics::counter!( + "server.tx_aggregation.reason", + 1, + "criterion" => sealer.prom_criterion_name(), + "seal_resolution" => "unexecutable", + ); + } + _ => {} + } + + final_seal_resolution = final_seal_resolution.stricter(seal_resolution); + } + final_seal_resolution + } + + pub(crate) fn get_default_sealers() -> Vec> { + let sealers: Vec> = vec![ + Box::new(criteria::slots::SlotsCriterion), + Box::new(criteria::gas::GasCriterion), + Box::new(criteria::pubdata_bytes::PubDataBytesCriterion), + Box::new(criteria::geometry_seal_criteria::InitialWritesCriterion), + Box::new(criteria::geometry_seal_criteria::RepeatedWritesCriterion), + Box::new(criteria::geometry_seal_criteria::MaxCyclesCriterion), + Box::new(criteria::geometry_seal_criteria::ComputationalGasCriterion), + Box::new(criteria::tx_encoding_size::TxEncodingSizeCriterion), + ]; + sealers + } +} diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/function.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/function.rs similarity index 96% rename from core/bin/zksync_core/src/state_keeper/seal_criteria/function.rs rename to core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/function.rs index d5cf7205e6b1..a2d45583337b 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/function.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/function.rs @@ -2,7 +2,7 @@ pub(self) use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::block::BlockGasCount; use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; -use super::{SealCriterion, SealResolution}; +use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution}; /// Represents a thread-safe function pointer. type CustomSealerFn = dyn Fn( diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/gas.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs similarity index 98% rename from core/bin/zksync_core/src/state_keeper/seal_criteria/gas.rs rename to core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs index e8b9e0389b0b..46834a2988fe 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/gas.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs @@ -1,5 +1,5 @@ -use super::{SealCriterion, SealResolution, StateKeeperConfig}; use crate::gas_tracker::new_block_gas_count; +use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; use zksync_types::block::BlockGasCount; use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/geometry_seal_criteria.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs similarity index 90% rename from core/bin/zksync_core/src/state_keeper/seal_criteria/geometry_seal_criteria.rs rename to core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs index 7b2e3fbba9c5..b211cc0acd7b 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/geometry_seal_criteria.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs @@ -1,9 +1,10 @@ use std::fmt::Debug; -use vm::MAX_CYCLES_FOR_TX; +use vm::{zk_evm::zkevm_opcode_defs::system_params::ERGS_PER_CIRCUIT, MAX_CYCLES_FOR_TX}; use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::circuit::GEOMETRY_CONFIG; use zksync_types::{ block::BlockGasCount, + circuit::SCHEDULER_UPPER_BOUND, tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, }; // Local uses @@ -12,14 +13,14 @@ use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution}; // Collected vm execution metrics should fit into geometry limits. // Otherwise witness generation will fail and proof won't be generated. -#[derive(Debug, Default)] -pub struct BytecodeHashesCriterion; #[derive(Debug, Default)] pub struct RepeatedWritesCriterion; #[derive(Debug, Default)] pub struct InitialWritesCriterion; #[derive(Debug, Default)] pub struct MaxCyclesCriterion; +#[derive(Debug, Default)] +pub struct ComputationalGasCriterion; trait MetricExtractor { const PROM_METRIC_CRITERION_NAME: &'static str; @@ -69,18 +70,6 @@ where } } -impl MetricExtractor for BytecodeHashesCriterion { - const PROM_METRIC_CRITERION_NAME: &'static str = "used_contract_hashes"; - - fn limit_per_block() -> usize { - GEOMETRY_CONFIG.limit_for_code_decommitter_sorter as usize - } - - fn extract(metrics: &ExecutionMetrics, _writes: &DeduplicatedWritesMetrics) -> usize { - metrics.contracts_used - } -} - impl MetricExtractor for RepeatedWritesCriterion { const PROM_METRIC_CRITERION_NAME: &'static str = "repeated_storage_writes"; @@ -117,18 +106,36 @@ impl MetricExtractor for MaxCyclesCriterion { } } +impl MetricExtractor for ComputationalGasCriterion { + const PROM_METRIC_CRITERION_NAME: &'static str = "computational_gas"; + + fn limit_per_block() -> usize { + // We subtract constant to take into account that circuits may be not fully filled. + // This constant should be greater than number of circuits types + // but we keep it larger to be on the safe side. + const MARGIN_NUMBER_OF_CIRCUITS: usize = 100; + const MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS: usize = + SCHEDULER_UPPER_BOUND as usize - MARGIN_NUMBER_OF_CIRCUITS; + + MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS * ERGS_PER_CIRCUIT as usize + } + + fn extract(metrics: &ExecutionMetrics, _writes: &DeduplicatedWritesMetrics) -> usize { + metrics.computational_gas_used as usize + } +} + #[cfg(test)] mod tests { use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::tx::tx_execution_info::DeduplicatedWritesMetrics; use zksync_types::tx::ExecutionMetrics; - use crate::state_keeper::seal_criteria::geometry_seal_criteria::MaxCyclesCriterion; - use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution}; - use super::{ - BytecodeHashesCriterion, InitialWritesCriterion, MetricExtractor, RepeatedWritesCriterion, + ComputationalGasCriterion, InitialWritesCriterion, MaxCyclesCriterion, MetricExtractor, + RepeatedWritesCriterion, }; + use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution}; fn get_config() -> StateKeeperConfig { StateKeeperConfig { @@ -318,11 +325,6 @@ mod tests { }; } - #[test] - fn bytecode_hashes_criterion() { - test_scenario_execution_metrics!(BytecodeHashesCriterion, contracts_used, usize); - } - #[test] fn repeated_writes_seal_criterion() { test_scenario_writes_metrics!(RepeatedWritesCriterion, repeated_storage_writes, usize); @@ -334,7 +336,12 @@ mod tests { } #[test] - fn initial_max_cycles_seal_criterion() { + fn max_cycles_seal_criterion() { test_scenario_execution_metrics!(MaxCyclesCriterion, cycles_used, u32); } + + #[test] + fn computational_gas_seal_criterion() { + test_scenario_execution_metrics!(ComputationalGasCriterion, computational_gas_used, u32); + } } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/mod.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/mod.rs new file mode 100644 index 000000000000..3e0b7c5cb27d --- /dev/null +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/mod.rs @@ -0,0 +1,8 @@ +pub(crate) mod function; +pub(crate) mod gas; +pub(crate) mod slots; + +pub(super) mod geometry_seal_criteria; +pub(super) mod pubdata_bytes; +pub(super) mod timeout; +pub(super) mod tx_encoding_size; diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/pubdata_bytes.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/pubdata_bytes.rs similarity index 95% rename from core/bin/zksync_core/src/state_keeper/seal_criteria/pubdata_bytes.rs rename to core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/pubdata_bytes.rs index ec7e067cf48a..906631b85997 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/pubdata_bytes.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/pubdata_bytes.rs @@ -1,7 +1,7 @@ use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; use zksync_types::{block::BlockGasCount, MAX_PUBDATA_PER_L1_BATCH}; -use super::{SealCriterion, SealResolution, StateKeeperConfig}; +use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; #[derive(Debug)] pub struct PubDataBytesCriterion; @@ -51,8 +51,7 @@ impl SealCriterion for PubDataBytesCriterion { #[cfg(test)] mod tests { - use super::{PubDataBytesCriterion, SealCriterion, SealResolution}; - use crate::state_keeper::seal_criteria::pubdata_bytes::MAX_PUBDATA_PER_L1_BATCH; + use super::{PubDataBytesCriterion, SealCriterion, SealResolution, MAX_PUBDATA_PER_L1_BATCH}; use zksync_config::ZkSyncConfig; use zksync_types::tx::ExecutionMetrics; @@ -75,6 +74,7 @@ mod tests { storage_logs: 0, total_log_queries: 0, cycles_used: 0, + computational_gas_used: 0, }; let empty_block_resolution = criterion.should_seal( @@ -106,6 +106,7 @@ mod tests { storage_logs: 0, total_log_queries: 0, cycles_used: 0, + computational_gas_used: 0, }; let full_block_resolution = criterion.should_seal( @@ -134,6 +135,7 @@ mod tests { storage_logs: 0, total_log_queries: 0, cycles_used: 0, + computational_gas_used: 0, }; let full_block_resolution = criterion.should_seal( &config, diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/slots.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/slots.rs similarity index 96% rename from core/bin/zksync_core/src/state_keeper/seal_criteria/slots.rs rename to core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/slots.rs index b0593260d17d..4c78aeae9ba2 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/slots.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/slots.rs @@ -1,4 +1,4 @@ -use super::{SealCriterion, SealResolution, StateKeeperConfig}; +use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; use zksync_types::block::BlockGasCount; use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/timeout.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/timeout.rs similarity index 97% rename from core/bin/zksync_core/src/state_keeper/seal_criteria/timeout.rs rename to core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/timeout.rs index db2fa33efd7e..625e1b102d3b 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/timeout.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/timeout.rs @@ -2,7 +2,7 @@ use zksync_types::block::BlockGasCount; use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; use zksync_utils::time::millis_since_epoch; -use super::{SealCriterion, SealResolution, StateKeeperConfig}; +use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; /// Checks whether we should seal the block because we've reached the block commit timeout. #[derive(Debug)] diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/tx_encoding_size.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs similarity index 97% rename from core/bin/zksync_core/src/state_keeper/seal_criteria/tx_encoding_size.rs rename to core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs index d4e200a2206b..86c75f05bb60 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/tx_encoding_size.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs @@ -2,7 +2,7 @@ use vm::vm_with_bootloader::BOOTLOADER_TX_ENCODING_SPACE; use zksync_types::block::BlockGasCount; use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; -use super::{SealCriterion, SealResolution, StateKeeperConfig}; +use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; #[derive(Debug)] pub struct TxEncodingSizeCriterion; diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs index f8e0c9b8c5ad..d30332f322b7 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs @@ -17,15 +17,11 @@ use zksync_types::block::BlockGasCount; use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; use zksync_utils::time::{millis_since, millis_since_epoch}; +use self::conditional_sealer::ConditionalSealer; use super::updates::UpdatesManager; -pub(crate) mod function; -pub(crate) mod gas; -mod geometry_seal_criteria; -mod pubdata_bytes; -pub(crate) mod slots; -mod timeout; -mod tx_encoding_size; +pub(crate) mod conditional_sealer; +pub(crate) mod criteria; /// Reported decision regarding block sealing. #[derive(Debug, Clone, PartialEq)] @@ -102,68 +98,64 @@ pub trait SealCriterion: Debug + Send + 'static { pub type SealerFn = dyn Fn(&UpdatesManager) -> bool + Send; pub struct SealManager { - config: StateKeeperConfig, - /// Primary sealers set that is used to check if batch should be sealed after executing a transaction. - sealers: Vec>, + /// Conditional sealer, i.e. one that can decide whether the batch should be sealed after executing a tx. + /// Currently, it's expected to be `Some` on the main node and `None` on the external nodes, since external nodes + /// do not decide whether to seal the batch or not. + conditional_sealer: Option, /// Unconditional batch sealer, i.e. one that can be used if we should seal the batch *without* executing a tx. - unconditional_sealer: Box, + /// If any of the unconditional sealers returns `true`, the batch will be sealed. + /// + /// Note: only non-empty batch can be sealed. + unconditional_sealers: Vec>, /// Miniblock sealer function used to determine if we should seal the miniblock. - miniblock_sealer: Box, + /// If any of the miniblock sealers returns `true`, the miniblock will be sealed. + miniblock_sealers: Vec>, } impl Debug for SealManager { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SealManager") - .field("config", &self.config) - .field("sealers", &self.sealers) - .finish() + f.debug_struct("SealManager").finish() } } impl SealManager { - /// Creates a default pre-configured seal manager. + /// Creates a default pre-configured seal manager for the main node. pub(crate) fn new(config: StateKeeperConfig) -> Self { - let sealers: Vec> = Self::get_default_sealers(); - let unconditional_sealer = Self::timeout_and_code_hash_batch_sealer( - config.block_commit_deadline_ms, - BaseSystemContractsHashes { - bootloader: config.bootloader_hash, - default_aa: config.default_aa_hash, - }, - ); - let miniblock_sealer = Self::timeout_miniblock_sealer(config.miniblock_commit_deadline_ms); + let timeout_batch_sealer = Self::timeout_batch_sealer(config.block_commit_deadline_ms); + let code_hash_batch_sealer = Self::code_hash_batch_sealer(BaseSystemContractsHashes { + bootloader: config.bootloader_hash, + default_aa: config.default_aa_hash, + }); + let timeout_miniblock_sealer = + Self::timeout_miniblock_sealer(config.miniblock_commit_deadline_ms); + let conditional_sealer = ConditionalSealer::new(config); - Self::custom(config, sealers, unconditional_sealer, miniblock_sealer) + Self::custom( + Some(conditional_sealer), + vec![timeout_batch_sealer, code_hash_batch_sealer], + vec![timeout_miniblock_sealer], + ) } /// Allows to create a seal manager object from externally-defined sealers. - /// Mostly useful for test configuration. pub fn custom( - config: StateKeeperConfig, - sealers: Vec>, - unconditional_sealer: Box, - miniblock_sealer: Box, + conditional_sealer: Option, + unconditional_sealer: Vec>, + miniblock_sealer: Vec>, ) -> Self { Self { - config, - sealers, - unconditional_sealer, - miniblock_sealer, + conditional_sealer, + unconditional_sealers: unconditional_sealer, + miniblock_sealers: miniblock_sealer, } } /// Creates a sealer function that would seal the batch because of the timeout. - pub(crate) fn timeout_and_code_hash_batch_sealer( - block_commit_deadline_ms: u64, - base_system_contracts_hashes: BaseSystemContractsHashes, - ) -> Box { + pub(crate) fn timeout_batch_sealer(block_commit_deadline_ms: u64) -> Box { Box::new(move |manager| { // Verify timestamp let should_seal_timeout = millis_since(manager.batch_timestamp()) > block_commit_deadline_ms; - // Verify code hashes - let should_seal_code_hashes = - base_system_contracts_hashes != manager.base_system_contract_hashes(); if should_seal_timeout { metrics::increment_counter!( @@ -178,6 +170,20 @@ impl SealManager { ); } + should_seal_timeout + }) + } + + /// Creates a sealer function that would seal the batch if the provided base system contract hashes are different + /// from ones in the updates manager. + pub(crate) fn code_hash_batch_sealer( + base_system_contracts_hashes: BaseSystemContractsHashes, + ) -> Box { + Box::new(move |manager| { + // Verify code hashes + let should_seal_code_hashes = + base_system_contracts_hashes != manager.base_system_contract_hashes(); + if should_seal_code_hashes { metrics::increment_counter!( "server.tx_aggregation.reason", @@ -192,7 +198,7 @@ impl SealManager { ); } - should_seal_timeout || should_seal_code_hashes + should_seal_code_hashes }) } @@ -220,10 +226,9 @@ impl SealManager { block_writes_metrics: DeduplicatedWritesMetrics, tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution { - let mut final_seal_resolution = SealResolution::NoSeal; - for sealer in &self.sealers { - let seal_resolution = sealer.should_seal( - &self.config, + if let Some(sealer) = self.conditional_sealer.as_ref() { + sealer.should_seal_l1_batch( + l1_batch_number, block_open_timestamp_ms, tx_count, block_execution_metrics, @@ -234,64 +239,22 @@ impl SealManager { tx_size, block_writes_metrics, tx_writes_metrics, - ); - match seal_resolution { - SealResolution::IncludeAndSeal => { - vlog::debug!( - "Seal block with resolution: IncludeAndSeal {} {} block: {:?}", - l1_batch_number, - sealer.prom_criterion_name(), - block_execution_metrics - ); - metrics::counter!( - "server.tx_aggregation.reason", - 1, - "criterion" => sealer.prom_criterion_name(), - "seal_resolution" => "include_and_seal", - ); - } - SealResolution::ExcludeAndSeal => { - vlog::debug!( - "Seal block with resolution: ExcludeAndSeal {} {} block: {:?}", - l1_batch_number, - sealer.prom_criterion_name(), - block_execution_metrics - ); - metrics::counter!( - "server.tx_aggregation.reason", - 1, - "criterion" => sealer.prom_criterion_name(), - "seal_resolution" => "exclude_and_seal", - ); - } - SealResolution::Unexecutable(_) => { - vlog::debug!( - "Unexecutable {} {} block: {:?}", - l1_batch_number, - sealer.prom_criterion_name(), - block_execution_metrics - ); - metrics::counter!( - "server.tx_aggregation.reason", - 1, - "criterion" => sealer.prom_criterion_name(), - "seal_resolution" => "unexecutable", - ); - } - _ => {} - } - - final_seal_resolution = final_seal_resolution.stricter(seal_resolution); + ) + } else { + SealResolution::NoSeal } - final_seal_resolution } pub(crate) fn should_seal_l1_batch_unconditionally( &self, updates_manager: &UpdatesManager, ) -> bool { + // Regardless of which sealers are provided, we never want to seal an empty batch. updates_manager.pending_executed_transactions_len() != 0 - && (self.unconditional_sealer)(updates_manager) + && self + .unconditional_sealers + .iter() + .any(|sealer| (sealer)(updates_manager)) } pub(crate) fn should_seal_miniblock(&self, updates_manager: &UpdatesManager) -> bool { @@ -300,21 +263,9 @@ impl SealManager { // where we have to replicate the state of the main node, including the last (empty) miniblock of the batch). // The check for the number of transactions is expected to be done, if relevant, in the `miniblock_sealer` // directly. - (self.miniblock_sealer)(updates_manager) - } - - pub(crate) fn get_default_sealers() -> Vec> { - let sealers: Vec> = vec![ - Box::new(slots::SlotsCriterion), - Box::new(gas::GasCriterion), - Box::new(pubdata_bytes::PubDataBytesCriterion), - Box::new(geometry_seal_criteria::BytecodeHashesCriterion), - Box::new(geometry_seal_criteria::InitialWritesCriterion), - Box::new(geometry_seal_criteria::RepeatedWritesCriterion), - Box::new(geometry_seal_criteria::MaxCyclesCriterion), - Box::new(tx_encoding_size::TxEncodingSizeCriterion), - ]; - sealers + self.miniblock_sealers + .iter() + .any(|sealer| (sealer)(updates_manager)) } } @@ -371,7 +322,9 @@ mod tests { revert_reason: None, contracts_used: 0, cycles_used: 0, + computational_gas_used: 0, }, + call_traces: vec![], gas_refunded: 0, operator_suggested_refund: 0, }, @@ -384,7 +337,7 @@ mod tests { /// This test mostly exists to make sure that we can't seal empty miniblocks on the main node. #[test] fn timeout_miniblock_sealer() { - let timeout_miniblock_sealer = SealManager::timeout_miniblock_sealer(1000); + let timeout_miniblock_sealer = SealManager::timeout_miniblock_sealer(10_000); let mut manager = create_manager(); // Empty miniblock should not trigger. @@ -401,7 +354,9 @@ mod tests { "Non-empty miniblock with old timestamp should be sealed" ); - // Check the timestamp logic. + // Check the timestamp logic. This relies on the fact that the test shouldn't run + // for more than 10 seconds (while the test itself is trivial, it may be preempted + // by other tests). manager.miniblock.timestamp = seconds_since_epoch(); assert!( !timeout_miniblock_sealer(&manager), diff --git a/core/bin/zksync_core/src/state_keeper/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/tests/mod.rs index f0a46423e7fc..55315511126b 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/mod.rs @@ -16,19 +16,22 @@ use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_types::{ block::BlockGasCount, zk_evm::block_properties::BlockProperties, MiniblockNumber, }; -use zksync_utils::{h256_to_u256, time::millis_since_epoch}; +use zksync_utils::h256_to_u256; use crate::state_keeper::{ - seal_criteria::{gas::GasCriterion, slots::SlotsCriterion, SealManager}, + seal_criteria::{ + criteria::{gas::GasCriterion, slots::SlotsCriterion}, + SealManager, + }, types::ExecutionMetricsForCriteria, }; use self::tester::{ bootloader_tip_out_of_gas, pending_batch_data, random_tx, rejected_exec, successful_exec, - TestScenario, + successful_exec_with_metrics, TestScenario, }; -use super::keeper::POLL_WAIT_DURATION; +use super::{keeper::POLL_WAIT_DURATION, seal_criteria::conditional_sealer::ConditionalSealer}; mod tester; @@ -48,11 +51,16 @@ fn sealed_by_number_of_txs() { transaction_slots: 2, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); let scenario = TestScenario::new(); @@ -74,20 +82,24 @@ fn sealed_by_gas() { close_block_at_gas_percentage: 0.5, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(GasCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); - let mut execution_result = successful_exec(); let l1_gas_per_tx = BlockGasCount { commit: 1, // Both txs together with block_base_cost would bring it over the block 31_001 commit bound. prove: 0, execute: 0, }; - execution_result.add_tx_metrics(ExecutionMetricsForCriteria { + let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { l1_gas: l1_gas_per_tx, execution_metrics: Default::default(), }); @@ -126,15 +138,19 @@ fn sealed_by_gas_then_by_num_tx() { transaction_slots: 3, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(GasCriterion), Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); - let mut execution_result = successful_exec(); - execution_result.add_tx_metrics(ExecutionMetricsForCriteria { + let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { l1_gas: BlockGasCount { commit: 1, prove: 0, @@ -164,11 +180,16 @@ fn batch_sealed_before_miniblock_does() { transaction_slots: 2, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 3), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 3 + })], ); let scenario = TestScenario::new(); @@ -194,11 +215,16 @@ fn basic_flow() { transaction_slots: 2, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); TestScenario::new() @@ -216,11 +242,16 @@ fn rejected_tx() { transaction_slots: 2, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); let rejected_tx = random_tx(1); @@ -241,11 +272,16 @@ fn bootloader_tip_out_of_gas_flow() { transaction_slots: 2, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); let first_tx = random_tx(1); @@ -278,21 +314,15 @@ fn bootloader_tip_out_of_gas_flow() { #[test] fn bootloader_config_has_been_updated() { - let config = StateKeeperConfig { - transaction_slots: 300, - ..Default::default() - }; let sealer = SealManager::custom( - config, - vec![], - SealManager::timeout_and_code_hash_batch_sealer( - u64::MAX, + None, + vec![SealManager::code_hash_batch_sealer( BaseSystemContractsHashes { bootloader: Default::default(), default_aa: Default::default(), }, - ), - Box::new(|_| false), + )], + vec![Box::new(|_| false)], ); let pending_batch = @@ -325,11 +355,16 @@ fn pending_batch_is_applied() { transaction_slots: 3, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); let pending_batch = pending_batch_data(vec![ @@ -372,11 +407,16 @@ fn unconditional_sealing() { transaction_slots: 2, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(move |_| batch_seal_trigger_checker.load(Ordering::Relaxed)), - Box::new(move |upd_manager| { + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(move |_| { + batch_seal_trigger_checker.load(Ordering::Relaxed) + })], + vec![Box::new(move |upd_manager| { if upd_manager.pending_executed_transactions_len() != 0 && start.elapsed() >= seal_miniblock_after { @@ -385,7 +425,7 @@ fn unconditional_sealing() { } else { false } - }), + })], ); TestScenario::new() @@ -404,17 +444,20 @@ fn miniblock_timestamp_after_pending_batch() { transaction_slots: 2, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); let pending_batch = pending_batch_data(vec![(MiniblockNumber(1), vec![random_tx(1)])]); - let current_timestamp = (millis_since_epoch() / 1000) as u64; - TestScenario::new() .load_pending_batch(pending_batch) .next_tx( @@ -424,8 +467,8 @@ fn miniblock_timestamp_after_pending_batch() { ) .miniblock_sealed_with("Miniblock with a single tx", move |updates| { assert!( - updates.miniblock.timestamp >= current_timestamp, - "Timestamp cannot decrease" + updates.miniblock.timestamp == 1, + "Timestamp for the new block must be taken from the test IO" ); }) .batch_sealed("Batch is sealed with two transactions") @@ -446,11 +489,16 @@ fn time_is_monotonic() { transaction_slots: 2, ..Default::default() }; - let sealer = SealManager::custom( + let conditional_sealer = Some(ConditionalSealer::with_sealers( config, vec![Box::new(SlotsCriterion)], - Box::new(|_| false), - Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + )); + let sealer = SealManager::custom( + conditional_sealer, + vec![Box::new(|_| false)], + vec![Box::new(|updates| { + updates.miniblock.executed_transactions.len() == 1 + })], ); let scenario = TestScenario::new(); diff --git a/core/bin/zksync_core/src/state_keeper/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/tests/tester.rs index b9c4584e6898..0278fe2f2598 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/tester.rs @@ -13,6 +13,7 @@ use vm::{ vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, VmBlockResult, VmExecutionResult, }; +use zksync_types::vm_trace::{VmExecutionTrace, VmTrace}; use zksync_types::{ l2::L2Tx, tx::tx_execution_info::TxExecutionStatus, Address, L1BatchNumber, MiniblockNumber, Nonce, Transaction, H256, U256, @@ -210,55 +211,66 @@ fn partial_execution_result() -> VmPartialExecutionResult { revert_reason: Default::default(), contracts_used: Default::default(), cycles_used: Default::default(), + computational_gas_used: Default::default(), } } /// Creates a `TxExecutionResult` object denoting a successful tx execution. pub(crate) fn successful_exec() -> TxExecutionResult { - let mut result = TxExecutionResult::new(Ok(( - VmTxExecutionResult { + TxExecutionResult::Success { + tx_result: Box::new(VmTxExecutionResult { status: TxExecutionStatus::Success, result: partial_execution_result(), + call_traces: vec![], gas_refunded: 0, operator_suggested_refund: 0, + }), + tx_metrics: ExecutionMetricsForCriteria { + l1_gas: Default::default(), + execution_metrics: Default::default(), }, - vec![], - ))); - result.add_tx_metrics(ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }); - result.add_bootloader_result(Ok(partial_execution_result())); - result.add_bootloader_metrics(ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }); - result + bootloader_dry_run_metrics: ExecutionMetricsForCriteria { + l1_gas: Default::default(), + execution_metrics: Default::default(), + }, + bootloader_dry_run_result: Box::new(partial_execution_result()), + compressed_bytecodes: vec![], + } +} + +/// Creates a `TxExecutionResult` object denoting a successful tx execution with the given execution metrics. +pub(crate) fn successful_exec_with_metrics( + tx_metrics: ExecutionMetricsForCriteria, +) -> TxExecutionResult { + TxExecutionResult::Success { + tx_result: Box::new(VmTxExecutionResult { + status: TxExecutionStatus::Success, + result: partial_execution_result(), + call_traces: vec![], + gas_refunded: 0, + operator_suggested_refund: 0, + }), + tx_metrics, + bootloader_dry_run_metrics: ExecutionMetricsForCriteria { + l1_gas: Default::default(), + execution_metrics: Default::default(), + }, + bootloader_dry_run_result: Box::new(partial_execution_result()), + compressed_bytecodes: vec![], + } } /// Creates a `TxExecutionResult` object denoting a tx that was rejected. pub(crate) fn rejected_exec() -> TxExecutionResult { - TxExecutionResult::new(Err(vm::TxRevertReason::InnerTxError)) + TxExecutionResult::RejectedByVm { + rejection_reason: vm::TxRevertReason::InnerTxError, + } } /// Creates a `TxExecutionResult` object denoting a transaction that was executed, but caused a bootloader tip out of /// gas error. pub(crate) fn bootloader_tip_out_of_gas() -> TxExecutionResult { - let mut result = TxExecutionResult::new(Ok(( - VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: partial_execution_result(), - gas_refunded: 0, - operator_suggested_refund: 0, - }, - vec![], - ))); - result.add_tx_metrics(ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }); - result.add_bootloader_result(Err(vm::TxRevertReason::BootloaderOutOfGas)); - result + TxExecutionResult::BootloaderOutOfGasForBlockTip } /// Creates a mock `PendingBatchData` object containing the provided sequence of miniblocks. @@ -290,7 +302,7 @@ pub(crate) fn pending_batch_data( #[allow(clippy::type_complexity, clippy::large_enum_variant)] // It's OK for tests. enum ScenarioItem { - /// Configures scenraio to repeatedly return `None` to tx requests until the next action from the scenario happens. + /// Configures scenario to repeatedly return `None` to tx requests until the next action from the scenario happens. NoTxsUntilNextAction(&'static str), Tx(&'static str, Transaction, TxExecutionResult), Rollback(&'static str, Transaction), @@ -483,15 +495,17 @@ impl TestBatchExecutor { gas_used: Default::default(), contracts_used: Default::default(), revert_reason: Default::default(), - trace: Default::default(), + trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), total_log_queries: Default::default(), cycles_used: Default::default(), + computational_gas_used: Default::default(), }, block_tip_result: VmPartialExecutionResult { logs: Default::default(), revert_reason: Default::default(), contracts_used: Default::default(), cycles_used: Default::default(), + computational_gas_used: Default::default(), }, }; diff --git a/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs b/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs index 50ba9c8efcfb..bc01b97bffd4 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs @@ -77,7 +77,9 @@ mod tests { revert_reason: None, contracts_used: 0, cycles_used: 0, + computational_gas_used: 0, }, + call_traces: vec![], gas_refunded: 0, operator_suggested_refund: 0, }, diff --git a/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs b/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs index 042af683c221..9b4382deab14 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs @@ -91,6 +91,11 @@ impl MiniblockUpdates { refunded_gas: tx_execution_result.gas_refunded, operator_suggested_refund: tx_execution_result.operator_suggested_refund, compressed_bytecodes, + call_traces: tx_execution_result.call_traces, + revert_reason: tx_execution_result + .result + .revert_reason + .map(|reason| reason.to_string()), }); self.events.extend(tx_execution_result.result.logs.events); @@ -140,7 +145,9 @@ mod tests { revert_reason: None, contracts_used: 0, cycles_used: 0, + computational_gas_used: 0, }, + call_traces: vec![], gas_refunded: 0, operator_suggested_refund: 0, }, diff --git a/core/bin/zksync_core/src/state_keeper/updates/mod.rs b/core/bin/zksync_core/src/state_keeper/updates/mod.rs index 75cb99a178f9..e5939f3ceb0d 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/mod.rs @@ -181,7 +181,9 @@ mod tests { revert_reason: None, contracts_used: 0, cycles_used: 0, + computational_gas_used: 0, }, + call_traces: vec![], gas_refunded: 0, operator_suggested_refund: 0, }, diff --git a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs index 316c19176e9a..e794d1746415 100644 --- a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs +++ b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs @@ -1,32 +1,101 @@ +use std::time::{Duration, Instant}; + +use tokio::sync::watch::Receiver; + +use zksync_dal::ConnectionPool; +use zksync_types::aggregated_operations::AggregatedActionType; + use super::ActionQueue; /// The task that keeps checking for the new batch status changes and persists them in the database. -pub fn run_batch_status_updater(actions: ActionQueue) { +pub fn run_batch_status_updater( + pool: ConnectionPool, + actions: ActionQueue, + stop_receiver: Receiver, +) { loop { - let changes = actions.take_status_changes(); - for change in changes.commit { + if *stop_receiver.borrow() { + vlog::info!("Stop signal receiver, exiting the batch status updater routine"); + return; + } + + let start = Instant::now(); + let mut storage = pool.access_storage_blocking(); + // Anything past this batch is not saved to the database. + let last_sealed_batch = storage.blocks_dal().get_newest_block_header(); + + let changes = actions.take_status_changes(last_sealed_batch.number); + if changes.is_empty() { + const DELAY_INTERVAL: Duration = Duration::from_secs(5); + std::thread::sleep(DELAY_INTERVAL); + continue; + } + + for change in changes.commit.into_iter() { + assert!( + change.number <= last_sealed_batch.number, + "Commit status change for the batch that is not sealed yet. Last sealed batch: {}, change: {:?}", + last_sealed_batch.number, + change + ); vlog::info!( "Commit status change: number {}, hash {}, happened at {}", change.number, change.l1_tx_hash, change.happened_at ); + storage.eth_sender_dal().insert_bogus_confirmed_eth_tx( + change.number, + AggregatedActionType::CommitBlocks, + change.l1_tx_hash, + change.happened_at, + ); } - for change in changes.prove { + for change in changes.prove.into_iter() { + assert!( + change.number <= last_sealed_batch.number, + "Prove status change for the batch that is not sealed yet. Last sealed batch: {}, change: {:?}", + last_sealed_batch.number, + change + ); vlog::info!( "Prove status change: number {}, hash {}, happened at {}", change.number, change.l1_tx_hash, change.happened_at ); + storage.eth_sender_dal().insert_bogus_confirmed_eth_tx( + change.number, + AggregatedActionType::PublishProofBlocksOnchain, + change.l1_tx_hash, + change.happened_at, + ); } - for change in changes.execute { + for change in changes.execute.into_iter() { + assert!( + change.number <= last_sealed_batch.number, + "Execute status change for the batch that is not sealed yet. Last sealed batch: {}, change: {:?}", + last_sealed_batch.number, + change + ); vlog::info!( "Execute status change: number {}, hash {}, happened at {}", change.number, change.l1_tx_hash, change.happened_at ); + + storage.eth_sender_dal().insert_bogus_confirmed_eth_tx( + change.number, + AggregatedActionType::ExecuteBlocks, + change.l1_tx_hash, + change.happened_at, + ); } + + metrics::histogram!( + "external_node.batch_status_updater.loop_iteration", + start.elapsed() + ); } } diff --git a/core/bin/zksync_core/src/sync_layer/cached_main_node_client.rs b/core/bin/zksync_core/src/sync_layer/cached_main_node_client.rs new file mode 100644 index 000000000000..462f7e2b40a1 --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/cached_main_node_client.rs @@ -0,0 +1,185 @@ +use std::{collections::HashMap, time::Instant}; + +use zksync_types::{explorer_api::BlockDetails, L1BatchNumber, MiniblockNumber, Transaction, U64}; +use zksync_web3_decl::{ + jsonrpsee::{ + core::RpcResult, + http_client::{HttpClient, HttpClientBuilder}, + }, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, +}; + +/// Maximum number of concurrent requests to the main node. +const MAX_CONCURRENT_REQUESTS: usize = 100; +/// Set of fields fetched together for a single miniblock. +type MiniblockData = (BlockDetails, Option<(U64, U64)>, Vec); + +/// This is a temporary implementation of a cache layer for the main node HTTP requests. +/// It was introduced to quickly develop a way to fetch data from the main node concurrently, +/// while not changing the logic of the fetcher itself. +/// It is intentionally designed in an "easy-to-inject, easy-to-remove" way, so that we can easily +/// switch it to a more performant implementation later. +/// +/// The main part of this structure's logic is the ability to concurrently populate the cache +/// of responses and then consume them in a non-concurrent way. +/// +/// Note: not every request is guaranted cached, only the ones that are used to build the action queue. +/// For example, if batch status updater requests a miniblock header long after it was processed by the main +/// fetcher routine, most likely it'll be a cache miss. +#[derive(Debug)] +pub(super) struct CachedMainNodeClient { + /// HTTP client. + client: HttpClient, + /// Earliest miniblock number that is not yet cached. + /// Used as a marker to refill the cache. + next_refill_at: MiniblockNumber, + miniblock_headers: HashMap, + batch_ranges: HashMap, + txs: HashMap>, +} + +impl CachedMainNodeClient { + pub fn build_client(main_node_url: &str) -> Self { + let client = HttpClientBuilder::default() + .build(main_node_url) + .expect("Unable to create a main node client"); + Self { + client, + next_refill_at: MiniblockNumber(0), + miniblock_headers: Default::default(), + batch_ranges: Default::default(), + txs: Default::default(), + } + } + + /// Cached version of [`HttpClient::get_raw_block_transaction`]. + pub async fn get_raw_block_transactions( + &self, + miniblock: MiniblockNumber, + ) -> RpcResult> { + let txs = { self.txs.get(&miniblock).cloned() }; + metrics::increment_counter!("external_node.fetcher.cache.total", "method" => "get_raw_block_transactions"); + match txs { + Some(txs) => { + metrics::increment_counter!("external_node.fetcher.cache.hit", "method" => "get_raw_block_transactions"); + Ok(txs) + } + None => self.client.get_raw_block_transactions(miniblock).await, + } + } + + /// Cached version of [`HttpClient::get_block_range`]. + pub async fn get_block_details( + &self, + miniblock: MiniblockNumber, + ) -> RpcResult> { + let block_details = self.miniblock_headers.get(&miniblock).cloned(); + metrics::increment_counter!("external_node.fetcher.cache.total", "method" => "get_block_details"); + match block_details { + Some(block_details) => { + metrics::increment_counter!("external_node.fetcher.cache.hit", "method" => "get_block_details"); + Ok(Some(block_details)) + } + None => self.client.get_block_details(miniblock).await, + } + } + + /// Cached version of [`HttpClient::get_miniblock_range`]. + pub async fn get_miniblock_range(&self, batch: L1BatchNumber) -> RpcResult> { + let range = self.batch_ranges.get(&batch).cloned(); + metrics::increment_counter!("external_node.fetcher.cache.total", "method" => "get_miniblock_range"); + match range { + Some(range) => { + metrics::increment_counter!("external_node.fetcher.cache.hit", "method" => "get_miniblock_range"); + Ok(Some(range)) + } + None => self.client.get_miniblock_range(batch).await, + } + } + + /// Re-export of [`HttpClient::get_block_number`]. + /// Added to not expose the internal client. + pub async fn get_block_number(&self) -> RpcResult { + self.client.get_block_number().await + } + + /// Removes a miniblock data from the cache. + pub fn forget_miniblock(&mut self, miniblock: MiniblockNumber) { + self.miniblock_headers.remove(&miniblock); + self.txs.remove(&miniblock); + } + + pub fn forget_l1_batch(&mut self, l1_batch: L1BatchNumber) { + self.batch_ranges.remove(&l1_batch); + } + + pub async fn populate_miniblocks_cache( + &mut self, + current_miniblock: MiniblockNumber, + last_miniblock: MiniblockNumber, + ) { + // This method may be invoked frequently, but in order to take advantage of the concurrent fetching, + // we only need to do it once in a while. If we'll do it too often, we'll end up adding 1 element to + // the cache at a time, which eliminates the cache's purpose. + if current_miniblock < self.next_refill_at { + return; + } + let start = Instant::now(); + let last_miniblock_to_fetch = + last_miniblock.min(current_miniblock + MAX_CONCURRENT_REQUESTS as u32); + let task_futures = (current_miniblock.0..last_miniblock_to_fetch.0) + .map(MiniblockNumber) + .filter(|&miniblock| { + // If the miniblock is already in the cache, we don't need to fetch it. + !self.has_miniblock(miniblock) + }) + .map(|miniblock| Self::fetch_one_miniblock(&self.client, miniblock)); + + let results = futures::future::join_all(task_futures).await; + for result in results { + if let Ok(Some((header, range, txs))) = result { + let miniblock = header.number; + let batch = header.l1_batch_number; + self.miniblock_headers.insert(miniblock, header); + if let Some(range) = range { + self.batch_ranges.insert(batch, range); + } + self.txs.insert(miniblock, txs); + self.next_refill_at = self.next_refill_at.max(miniblock + 1); + } else { + // At the cache level, it's fine to just silence errors. + // The entry won't be included into the cache, and whoever uses the cache, will have to process + // a cache miss as they will. + metrics::increment_counter!("external_node.fetcher.cache.errors"); + } + } + metrics::histogram!("external_node.fetcher.cache.populate", start.elapsed()); + } + + fn has_miniblock(&self, miniblock: MiniblockNumber) -> bool { + self.miniblock_headers.contains_key(&miniblock) + } + + async fn fetch_one_miniblock( + client: &HttpClient, + miniblock: MiniblockNumber, + ) -> RpcResult> { + // Error propagation here would mean that these entries won't appear in the cache. + // This would cause a cache miss, but generally it shouldn't be a problem as long as the API errors are rare. + // If the API returns lots of errors, that's a problem regardless of caching. + let start = Instant::now(); + let header = client.get_block_details(miniblock).await; + metrics::histogram!("external_node.fetcher.cache.requests", start.elapsed(), "stage" => "get_block_details"); + let Some(header) = header? else { return Ok(None) }; + + let start = Instant::now(); + let miniblock_range = client.get_miniblock_range(header.l1_batch_number).await?; + metrics::histogram!("external_node.fetcher.cache.requests", start.elapsed(), "stage" => "get_miniblock_range"); + + let start = Instant::now(); + let miniblock_txs = client.get_raw_block_transactions(miniblock).await?; + metrics::histogram!("external_node.fetcher.cache.requests", start.elapsed(), "stage" => "get_raw_block_transactions"); + + Ok(Some((header, miniblock_range, miniblock_txs))) + } +} diff --git a/core/bin/zksync_core/src/sync_layer/external_io.rs b/core/bin/zksync_core/src/sync_layer/external_io.rs index ed480c8a37a0..8f5650ead78d 100644 --- a/core/bin/zksync_core/src/sync_layer/external_io.rs +++ b/core/bin/zksync_core/src/sync_layer/external_io.rs @@ -1,18 +1,28 @@ +use std::convert::TryFrom; use std::time::Duration; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction}; +use super::genesis::fetch_system_contract_by_hash; +use actix_rt::time::Instant; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; +use zksync_dal::ConnectionPool; +use zksync_types::{l1::L1Tx, l2::L2Tx, L1BatchNumber, MiniblockNumber, Transaction, H256}; +use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; use crate::state_keeper::{ + extractors, io::{ - common::{l1_batch_params, poll_until}, + common::{l1_batch_params, load_pending_batch, poll_until, StateKeeperStats}, + seal_logic::{seal_l1_batch_impl, seal_miniblock_impl}, L1BatchParams, PendingBatchData, StateKeeperIO, }, seal_criteria::SealerFn, updates::UpdatesManager, }; -use super::sync_action::{ActionQueue, SyncAction}; +use super::{ + sync_action::{ActionQueue, SyncAction}, + SyncState, +}; /// The interval between the action queue polling attempts for the new actions. const POLL_INTERVAL: Duration = Duration::from_millis(100); @@ -33,13 +43,17 @@ impl ExternalNodeSealer { fn should_seal_miniblock(&self) -> bool { let res = matches!(self.actions.peek_action(), Some(SyncAction::SealMiniblock)); - vlog::info!("Asked if should seal the miniblock. The answer is {res}"); + if res { + vlog::info!("Sealing miniblock"); + } res } fn should_seal_batch(&self) -> bool { let res = matches!(self.actions.peek_action(), Some(SyncAction::SealBatch)); - vlog::info!("Asked if should seal the batch. The answer is {res}"); + if res { + vlog::info!("Sealing the batch"); + } res } @@ -60,20 +74,81 @@ impl ExternalNodeSealer { /// to the one in the mempool IO (which is used in the main node). #[derive(Debug)] pub struct ExternalIO { - fee_account: Address, + pool: ConnectionPool, + + // Grafana metrics + statistics: StateKeeperStats, current_l1_batch_number: L1BatchNumber, current_miniblock_number: MiniblockNumber, actions: ActionQueue, + sync_state: SyncState, + main_node_url: String, } impl ExternalIO { - pub fn new(fee_account: Address, actions: ActionQueue) -> Self { + pub fn new( + pool: ConnectionPool, + actions: ActionQueue, + sync_state: SyncState, + main_node_url: String, + ) -> Self { + let mut storage = pool.access_storage_blocking(); + let last_sealed_block_header = storage.blocks_dal().get_newest_block_header(); + let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number(); + let num_contracts = storage.storage_load_dal().load_number_of_contracts(); + drop(storage); + + vlog::info!( + "Initialized the ExternalIO: current L1 batch number {}, current miniblock number {}", + last_sealed_block_header.number + 1, + last_miniblock_number + 1, + ); + + sync_state.set_local_block(last_miniblock_number); + Self { - fee_account, - current_l1_batch_number: L1BatchNumber(1), - current_miniblock_number: MiniblockNumber(1), + pool, + statistics: StateKeeperStats { num_contracts }, + current_l1_batch_number: last_sealed_block_header.number + 1, + current_miniblock_number: last_miniblock_number + 1, actions, + sync_state, + main_node_url, + } + } + + fn get_base_system_contract(&self, hash: H256) -> SystemContractCode { + let bytecode = self + .pool + .access_storage_blocking() + .storage_dal() + .get_factory_dep(hash); + + match bytecode { + Some(bytecode) => SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }, + None => { + let main_node_url = self.main_node_url.clone(); + let contract = crate::block_on(async move { + vlog::info!("Fetching base system contract bytecode from the main node"); + fetch_system_contract_by_hash(&main_node_url, hash) + .await + .expect("Failed to fetch base system contract bytecode from the main node") + }); + self.pool + .access_storage_blocking() + .storage_dal() + .insert_factory_deps( + self.current_miniblock_number, + vec![(contract.hash, be_words_to_bytes(&contract.code))] + .into_iter() + .collect(), + ); + contract + } } } } @@ -88,11 +163,23 @@ impl StateKeeperIO for ExternalIO { } fn load_pending_batch(&mut self) -> Option { - None + let mut storage = self.pool.access_storage_blocking(); + + let fee_account = storage + .blocks_dal() + .get_block_header(self.current_l1_batch_number - 1) + .unwrap_or_else(|| { + panic!( + "No block header for batch {}", + self.current_l1_batch_number - 1 + ) + }) + .fee_account_address; + load_pending_batch(&mut storage, self.current_l1_batch_number, fee_account) } fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { - vlog::info!("Waiting for the new batch params"); + vlog::debug!("Waiting for the new batch params"); poll_until(POLL_INTERVAL, max_wait, || { match self.actions.pop_action()? { SyncAction::OpenBatch { @@ -100,20 +187,49 @@ impl StateKeeperIO for ExternalIO { timestamp, l1_gas_price, l2_fair_gas_price, - base_system_contracts_hashes, + base_system_contracts_hashes: + BaseSystemContractsHashes { + bootloader, + default_aa, + }, + operator_address, } => { assert_eq!( number, self.current_l1_batch_number, "Batch number mismatch" ); + vlog::info!("Getting previous block hash"); + let previous_l1_batch_hash = { + let mut storage = self.pool.access_storage_blocking(); + + let stage_started_at: Instant = Instant::now(); + let hash = extractors::wait_for_prev_l1_batch_state_root_unchecked( + &mut storage, + self.current_l1_batch_number, + ); + metrics::histogram!( + "server.state_keeper.wait_for_prev_hash_time", + stage_started_at.elapsed() + ); + hash + }; + vlog::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); + + vlog::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); + + let base_system_contracts = BaseSystemContracts { + bootloader: self.get_base_system_contract(bootloader), + default_aa: self.get_base_system_contract(default_aa), + }; + Some(l1_batch_params( number, - self.fee_account, + operator_address, timestamp, - Default::default(), + previous_l1_batch_hash, l1_gas_price, l2_fair_gas_price, - load_base_contracts(base_system_contracts_hashes), + base_system_contracts, )) } other => { @@ -153,7 +269,7 @@ impl StateKeeperIO for ExternalIO { } fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { - vlog::info!( + vlog::debug!( "Waiting for the new tx, next action is {:?}", self.actions.peek_action() ); @@ -184,7 +300,7 @@ impl StateKeeperIO for ExternalIO { ); } - fn seal_miniblock(&mut self, _updates_manager: &UpdatesManager) { + fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { match self.actions.pop_action() { Some(SyncAction::SealMiniblock) => {} other => panic!( @@ -192,15 +308,57 @@ impl StateKeeperIO for ExternalIO { other ), }; + + let mut storage = self.pool.access_storage_blocking(); + let mut transaction = storage.start_transaction_blocking(); + + let start = Instant::now(); + // We don't store the transactions in the database until they're executed to not overcomplicate the state + // recovery on restart. So we have to store them here. + for tx in updates_manager.miniblock.executed_transactions.iter() { + if let Ok(l1_tx) = L1Tx::try_from(tx.transaction.clone()) { + // Using `Default` for `l1_block_number` is OK here, since it's only used to track the last processed + // L1 number in the `eth_watch` module. + transaction + .transactions_dal() + .insert_transaction_l1(l1_tx, Default::default()) + } else if let Ok(l2_tx) = L2Tx::try_from(tx.transaction.clone()) { + // Using `Default` for execution metrics should be OK here, since this data is not used on the EN. + transaction + .transactions_dal() + .insert_transaction_l2(l2_tx, Default::default()); + } else { + unreachable!("Transaction {:?} is neither L1 nor L2", tx.transaction); + } + } + metrics::histogram!( + "server.state_keeper.l1_batch.sealed_time_stage", + start.elapsed(), + "stage" => "external_node_store_transactions" + ); + + // Now transactions are stored, and we may mark them as executed. + seal_miniblock_impl( + self.current_miniblock_number, + self.current_l1_batch_number, + &mut self.statistics, + &mut transaction, + updates_manager, + false, + ); + transaction.commit_blocking(); + + self.sync_state + .set_local_block(self.current_miniblock_number); self.current_miniblock_number += 1; vlog::info!("Miniblock {} is sealed", self.current_miniblock_number); } fn seal_l1_batch( &mut self, - _block_result: vm::VmBlockResult, - _updates_manager: UpdatesManager, - _block_context: vm::vm_with_bootloader::DerivedBlockContext, + block_result: vm::VmBlockResult, + updates_manager: UpdatesManager, + block_context: vm::vm_with_bootloader::DerivedBlockContext, ) { match self.actions.pop_action() { Some(SyncAction::SealBatch) => {} @@ -209,22 +367,28 @@ impl StateKeeperIO for ExternalIO { other ), }; - self.current_l1_batch_number += 1; - vlog::info!("Batch {} is sealed", self.current_l1_batch_number); - } -} + let mut storage = self.pool.access_storage_blocking(); + seal_l1_batch_impl( + self.current_miniblock_number, + self.current_l1_batch_number, + &mut self.statistics, + &mut storage, + block_result, + updates_manager, + block_context, + ); -/// Currently it always returns the contracts that are present on the disk. -/// Later on, support for different base contracts versions will be added. -fn load_base_contracts(expected_hashes: BaseSystemContractsHashes) -> BaseSystemContracts { - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let local_hashes = base_system_contracts.hashes(); + vlog::info!("Batch {} is sealed", self.current_l1_batch_number); - assert_eq!( - local_hashes, expected_hashes, - "Local base system contract hashes do not match ones required to process the L1 batch" - ); + // Mimic the metric emitted by the main node to reuse existing grafana charts. + metrics::gauge!( + "server.block_number", + self.current_l1_batch_number.0 as f64, + "stage" => "sealed" + ); - base_system_contracts + self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. + self.current_l1_batch_number += 1; + } } diff --git a/core/bin/zksync_core/src/sync_layer/fetcher.rs b/core/bin/zksync_core/src/sync_layer/fetcher.rs index c3908c378420..dc2cb3ab0e54 100644 --- a/core/bin/zksync_core/src/sync_layer/fetcher.rs +++ b/core/bin/zksync_core/src/sync_layer/fetcher.rs @@ -1,26 +1,21 @@ -use std::time::Duration; +use std::time::{Duration, Instant}; -use zksync_types::{explorer_api::BlockDetails, L1BatchNumber, MiniblockNumber}; -use zksync_web3_decl::{ - jsonrpsee::{ - core::{Error as RpcError, RpcResult}, - http_client::{HttpClient, HttpClientBuilder}, - }, - namespaces::{EthNamespaceClient, ZksNamespaceClient}, -}; +use tokio::sync::watch::Receiver; -use crate::sync_layer::sync_action::{BatchStatusChange, SyncAction}; +use crate::sync_layer::sync_action::{ActionQueue, BatchStatusChange, SyncAction}; +use zksync_dal::ConnectionPool; +use zksync_types::{explorer_api::BlockDetails, L1BatchNumber, MiniblockNumber}; +use zksync_web3_decl::jsonrpsee::core::{Error as RpcError, RpcResult}; -use super::sync_action::ActionQueue; +use super::{cached_main_node_client::CachedMainNodeClient, SyncState}; const DELAY_INTERVAL: Duration = Duration::from_millis(500); -const RECONNECT_INTERVAL: Duration = Duration::from_secs(5); +const RETRY_DELAY_INTERVAL: Duration = Duration::from_secs(5); /// Structure responsible for fetching batches and miniblock data from the main node. #[derive(Debug)] pub struct MainNodeFetcher { - main_node_url: String, - client: HttpClient, + client: CachedMainNodeClient, current_l1_batch: L1BatchNumber, current_miniblock: MiniblockNumber, @@ -29,22 +24,53 @@ pub struct MainNodeFetcher { last_committed_l1_batch: L1BatchNumber, actions: ActionQueue, + sync_state: SyncState, + stop_receiver: Receiver, } impl MainNodeFetcher { pub fn new( + pool: ConnectionPool, main_node_url: &str, - current_l1_batch: L1BatchNumber, - current_miniblock: MiniblockNumber, - last_executed_l1_batch: L1BatchNumber, - last_proven_l1_batch: L1BatchNumber, - last_committed_l1_batch: L1BatchNumber, actions: ActionQueue, + sync_state: SyncState, + stop_receiver: Receiver, ) -> Self { - let client = Self::build_client(main_node_url); + let mut storage = pool.access_storage_blocking(); + let last_sealed_block_header = storage.blocks_dal().get_newest_block_header(); + let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number(); + + // It's important to know whether we have opened a new batch already or just sealed the previous one. + // Depending on it, we must either insert `OpenBatch` item into the queue, or not. + let was_new_batch_open = storage.blocks_dal().pending_batch_exists(); + + // Miniblocks are always fully processed. + let current_miniblock = last_miniblock_number + 1; + // Decide whether the next batch should be explicitly opened or not. + let current_l1_batch = if was_new_batch_open { + // No `OpenBatch` action needed. + last_sealed_block_header.number + 1 + } else { + // We need to open the next batch. + last_sealed_block_header.number + }; + + let last_executed_l1_batch = storage + .blocks_dal() + .get_number_of_last_block_executed_on_eth() + .unwrap_or_default(); + let last_proven_l1_batch = storage + .blocks_dal() + .get_number_of_last_block_proven_on_eth() + .unwrap_or_default(); + let last_committed_l1_batch = storage + .blocks_dal() + .get_number_of_last_block_committed_on_eth() + .unwrap_or_default(); + + let client = CachedMainNodeClient::build_client(main_node_url); Self { - main_node_url: main_node_url.into(), client, current_l1_batch, current_miniblock, @@ -54,15 +80,11 @@ impl MainNodeFetcher { last_committed_l1_batch, actions, + sync_state, + stop_receiver, } } - fn build_client(main_node_url: &str) -> HttpClient { - HttpClientBuilder::default() - .build(main_node_url) - .expect("Unable to create a main node client") - } - pub async fn run(mut self) { vlog::info!( "Starting the fetcher routine. Initial miniblock: {}, initial l1 batch: {}", @@ -72,11 +94,14 @@ impl MainNodeFetcher { // Run the main routine and reconnect upon the network errors. loop { match self.run_inner().await { - Ok(()) => unreachable!("Fetcher actor never exits"), - Err(RpcError::Transport(err)) => { + Ok(()) => { + vlog::info!("Stop signal received, exiting the fetcher routine"); + return; + } + Err(err @ RpcError::Transport(_) | err @ RpcError::RequestTimeout) => { vlog::warn!("Following transport error occurred: {}", err); - vlog::info!("Trying to reconnect"); - self.reconnect().await; + vlog::info!("Trying again after a delay"); + tokio::time::sleep(RETRY_DELAY_INTERVAL).await; } Err(err) => { panic!("Unexpected error in the fetcher: {}", err); @@ -85,24 +110,25 @@ impl MainNodeFetcher { } } - async fn reconnect(&mut self) { - loop { - self.client = Self::build_client(&self.main_node_url); - if self.client.chain_id().await.is_ok() { - vlog::info!("Reconnected"); - break; - } - vlog::warn!( - "Reconnect attempt unsuccessful. Next attempt would happen after a timeout" - ); - std::thread::sleep(RECONNECT_INTERVAL); - } + fn check_if_cancelled(&self) -> bool { + *self.stop_receiver.borrow() } async fn run_inner(&mut self) -> RpcResult<()> { loop { + if self.check_if_cancelled() { + return Ok(()); + } + let mut progressed = false; + let last_main_node_block = + MiniblockNumber(self.client.get_block_number().await?.as_u32()); + self.sync_state.set_main_node_block(last_main_node_block); + + self.client + .populate_miniblocks_cache(self.current_miniblock, last_main_node_block) + .await; if self.actions.has_action_capacity() { progressed |= self.fetch_next_miniblock().await?; } @@ -121,6 +147,9 @@ impl MainNodeFetcher { /// Tries to fetch the next miniblock and insert it to the sync queue. /// Returns `true` if a miniblock was processed and `false` otherwise. async fn fetch_next_miniblock(&mut self) -> RpcResult { + let start = Instant::now(); + + let request_start = Instant::now(); let Some(miniblock_header) = self .client .get_block_details(self.current_miniblock) @@ -128,6 +157,12 @@ impl MainNodeFetcher { else { return Ok(false); }; + metrics::histogram!( + "external_node.fetcher.requests", + request_start.elapsed(), + "stage" => "get_block_details", + "actor" => "miniblock_fetcher" + ); let mut new_actions = Vec::new(); if miniblock_header.l1_batch_number != self.current_l1_batch { @@ -149,8 +184,11 @@ impl MainNodeFetcher { l1_gas_price: miniblock_header.l1_gas_price, l2_fair_gas_price: miniblock_header.l2_fair_gas_price, base_system_contracts_hashes: miniblock_header.base_system_contracts_hashes, + operator_address: miniblock_header.operator_address, }); + metrics::gauge!("external_node.fetcher.l1_batch", miniblock_header.l1_batch_number.0 as f64, "status" => "open"); + self.client.forget_l1_batch(self.current_l1_batch); self.current_l1_batch += 1; } else { // New batch implicitly means a new miniblock, so we only need to push the miniblock action @@ -159,39 +197,74 @@ impl MainNodeFetcher { number: miniblock_header.number, timestamp: miniblock_header.timestamp, }); + metrics::gauge!( + "external_node.fetcher.miniblock", + miniblock_header.number.0 as f64 + ); } + let request_start = Instant::now(); let miniblock_txs = self .client .get_raw_block_transactions(self.current_miniblock) .await? .into_iter() .map(|tx| SyncAction::Tx(Box::new(tx))); + metrics::histogram!( + "external_node.fetcher.requests", + request_start.elapsed(), + "stage" => "get_raw_block_transactions", + "actor" => "miniblock_fetcher" + ); + + metrics::counter!( + "server.processed_txs", + miniblock_txs.len() as u64, + "stage" => "mempool_added" + ); new_actions.extend(miniblock_txs); - new_actions.push(SyncAction::SealMiniblock); // Check if this was the last miniblock in the batch. // If we will receive `None` here, it would mean that it's the currently open batch and it was not sealed // after the current miniblock. + let request_start = Instant::now(); let is_last_miniblock_of_batch = self .client .get_miniblock_range(self.current_l1_batch) .await? .map(|(_, last)| last.as_u32() == miniblock_header.number.0) .unwrap_or(false); + metrics::histogram!( + "external_node.fetcher.requests", + request_start.elapsed(), + "stage" => "get_miniblock_range", + "actor" => "miniblock_fetcher" + ); + + // Last miniblock of the batch is a "fictive" miniblock and would be replicated locally. + // We don't need to seal it explicitly, so we only put the seal miniblock command if it's not the last miniblock. if is_last_miniblock_of_batch { new_actions.push(SyncAction::SealBatch); + } else { + new_actions.push(SyncAction::SealMiniblock); } vlog::info!("New miniblock: {}", miniblock_header.number); + self.client.forget_miniblock(self.current_miniblock); self.current_miniblock += 1; self.actions.push_actions(new_actions); + + metrics::histogram!( + "external_node.fetcher.fetch_next_miniblock", + start.elapsed() + ); Ok(true) } /// Goes through the already fetched batches trying to update their statuses. /// Returns `true` if at least one batch was updated, and `false` otherwise. async fn update_batch_statuses(&mut self) -> RpcResult { + let start = Instant::now(); assert!( self.last_executed_l1_batch <= self.last_proven_l1_batch, "Incorrect local state: executed batch must be proven" @@ -206,15 +279,26 @@ impl MainNodeFetcher { ); let mut applied_updates = false; - for batch in - (self.last_executed_l1_batch.next().0..=self.current_l1_batch.0).map(L1BatchNumber) - { + let mut batch = self.last_executed_l1_batch.next(); + // In this loop we try to progress on the batch statuses, utilizing the same request to the node to potentially + // update all three statuses (e.g. if the node is still syncing), but also skipping the gaps in the statuses + // (e.g. if the last executed batch is 10, but the last proven is 20, we don't need to check the batches 11-19). + while batch <= self.current_l1_batch { // While we may receive `None` for the `self.current_l1_batch`, it's OK: open batch is guaranteed to not // be sent to L1. + let request_start = Instant::now(); let Some((start_miniblock, _)) = self.client.get_miniblock_range(batch).await? else { return Ok(applied_updates); }; + metrics::histogram!( + "external_node.fetcher.requests", + request_start.elapsed(), + "stage" => "get_miniblock_range", + "actor" => "batch_status_fetcher" + ); + // We could've used any miniblock from the range, all of them share the same info. + let request_start = Instant::now(); let Some(batch_info) = self .client .get_block_details(MiniblockNumber(start_miniblock.as_u32())) @@ -226,17 +310,33 @@ impl MainNodeFetcher { but API has no information about this miniblock", start_miniblock, batch ); }; + metrics::histogram!( + "external_node.fetcher.requests", + request_start.elapsed(), + "stage" => "get_block_details", + "actor" => "batch_status_fetcher" + ); applied_updates |= self.update_committed_batch(&batch_info); applied_updates |= self.update_proven_batch(&batch_info); applied_updates |= self.update_executed_batch(&batch_info); + // Check whether we can skip a part of the range. if batch_info.commit_tx_hash.is_none() { // No committed batches after this one. break; + } else if batch_info.prove_tx_hash.is_none() && batch < self.last_committed_l1_batch { + // The interval between this batch and the last committed one is not proven. + batch = self.last_committed_l1_batch.next(); + } else if batch_info.executed_at.is_none() && batch < self.last_proven_l1_batch { + // The interval between this batch and the last proven one is not executed. + batch = self.last_proven_l1_batch.next(); + } else { + batch += 1; } } + metrics::histogram!("external_node.update_batch_statuses", start.elapsed()); Ok(applied_updates) } @@ -255,6 +355,7 @@ impl MainNodeFetcher { happened_at: batch_info.committed_at.unwrap(), }); vlog::info!("Batch {}: committed", batch_info.l1_batch_number); + metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "committed"); self.last_committed_l1_batch += 1; true } else { @@ -277,6 +378,7 @@ impl MainNodeFetcher { happened_at: batch_info.proven_at.unwrap(), }); vlog::info!("Batch {}: proven", batch_info.l1_batch_number); + metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "proven"); self.last_proven_l1_batch += 1; true } else { @@ -299,6 +401,7 @@ impl MainNodeFetcher { happened_at: batch_info.executed_at.unwrap(), }); vlog::info!("Batch {}: executed", batch_info.l1_batch_number); + metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "executed"); self.last_executed_l1_batch += 1; true } else { diff --git a/core/bin/zksync_core/src/sync_layer/genesis.rs b/core/bin/zksync_core/src/sync_layer/genesis.rs index 06e3101622d1..2bb4dade8aa5 100644 --- a/core/bin/zksync_core/src/sync_layer/genesis.rs +++ b/core/bin/zksync_core/src/sync_layer/genesis.rs @@ -1,23 +1,33 @@ -use crate::genesis::ensure_genesis_state; +use crate::genesis::{ensure_genesis_state, GenesisParams}; -use zksync_config::ZkSyncConfig; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::StorageProcessor; -use zksync_types::{L1BatchNumber, H256}; -use zksync_web3_decl::{jsonrpsee::http_client::HttpClientBuilder, namespaces::ZksNamespaceClient}; - -pub async fn perform_genesis_if_needed(storage: &mut StorageProcessor<'_>, config: &ZkSyncConfig) { - let mut transaction = storage.start_transaction().await; - let main_node_url = config - .api - .web3_json_rpc - .main_node_url - .as_ref() - .expect("main node url is not set"); - - let genesis_block_hash = ensure_genesis_state(&mut transaction, config).await; - - validate_genesis_state(main_node_url, genesis_block_hash).await; - transaction.commit().await; +use zksync_types::{L1BatchNumber, L2ChainId, H256}; +use zksync_web3_decl::{ + jsonrpsee::{core::error::Error, http_client::HttpClientBuilder}, + namespaces::ZksNamespaceClient, +}; + +pub async fn perform_genesis_if_needed( + storage: &mut StorageProcessor<'_>, + zksync_chain_id: L2ChainId, + base_system_contracts_hashes: BaseSystemContractsHashes, + main_node_url: String, +) { + let mut transaction = storage.start_transaction_blocking(); + + let genesis_block_hash = ensure_genesis_state( + &mut transaction, + zksync_chain_id, + GenesisParams::ExternalNode { + base_system_contracts_hashes, + main_node_url: main_node_url.clone(), + }, + ) + .await; + + validate_genesis_state(&main_node_url, genesis_block_hash).await; + transaction.commit_blocking(); } // When running an external node, we want to make sure we have the same @@ -39,3 +49,33 @@ async fn validate_genesis_state(main_node_url: &str, root_hash: H256) { ); } } + +pub async fn fetch_system_contract_by_hash( + main_node_url: &str, + hash: H256, +) -> Result { + let client = HttpClientBuilder::default().build(main_node_url).unwrap(); + let bytecode = client + .get_bytecode_by_hash(hash) + .await? + .expect("Failed to get base system contract bytecode"); + assert_eq!( + hash, + zksync_utils::bytecode::hash_bytecode(&bytecode), + "Got invalid base system contract bytecode from main node" + ); + Ok(SystemContractCode { + code: zksync_utils::bytes_to_be_words(bytecode), + hash, + }) +} + +pub async fn fetch_base_system_contracts( + main_node_url: &str, + hashes: BaseSystemContractsHashes, +) -> Result { + Ok(BaseSystemContracts { + bootloader: fetch_system_contract_by_hash(main_node_url, hashes.bootloader).await?, + default_aa: fetch_system_contract_by_hash(main_node_url, hashes.default_aa).await?, + }) +} diff --git a/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs b/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs index 080f3126d8aa..ba649576a2df 100644 --- a/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs +++ b/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs @@ -14,6 +14,7 @@ use vm::{ VmBlockResult, VmExecutionResult, }; use zksync_types::tx::tx_execution_info::TxExecutionStatus; +use zksync_types::vm_trace::{VmExecutionTrace, VmTrace}; use crate::state_keeper::{ batch_executor::{BatchExecutorHandle, Command, L1BatchExecutorBuilder, TxExecutionResult}, @@ -48,15 +49,17 @@ impl L1BatchExecutorBuilder for MockBatchExecutorBuilder { gas_used: Default::default(), contracts_used: Default::default(), revert_reason: Default::default(), - trace: Default::default(), + trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), total_log_queries: Default::default(), cycles_used: Default::default(), + computational_gas_used: Default::default(), }, block_tip_result: VmPartialExecutionResult { logs: Default::default(), revert_reason: Default::default(), contracts_used: Default::default(), cycles_used: Default::default(), + computational_gas_used: Default::default(), }, }; @@ -76,28 +79,29 @@ fn partial_execution_result() -> VmPartialExecutionResult { revert_reason: Default::default(), contracts_used: Default::default(), cycles_used: Default::default(), + computational_gas_used: Default::default(), } } /// Creates a `TxExecutionResult` object denoting a successful tx execution. pub(crate) fn successful_exec() -> TxExecutionResult { - let mut result = TxExecutionResult::new(Ok(( - VmTxExecutionResult { + TxExecutionResult::Success { + tx_result: Box::new(VmTxExecutionResult { status: TxExecutionStatus::Success, result: partial_execution_result(), + call_traces: vec![], gas_refunded: 0, operator_suggested_refund: 0, + }), + tx_metrics: ExecutionMetricsForCriteria { + l1_gas: Default::default(), + execution_metrics: Default::default(), }, - vec![], - ))); - result.add_tx_metrics(ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }); - result.add_bootloader_result(Ok(partial_execution_result())); - result.add_bootloader_metrics(ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }); - result + bootloader_dry_run_metrics: ExecutionMetricsForCriteria { + l1_gas: Default::default(), + execution_metrics: Default::default(), + }, + bootloader_dry_run_result: Box::new(partial_execution_result()), + compressed_bytecodes: vec![], + } } diff --git a/core/bin/zksync_core/src/sync_layer/mod.rs b/core/bin/zksync_core/src/sync_layer/mod.rs index 6ae8b2eb8d39..85faced646aa 100644 --- a/core/bin/zksync_core/src/sync_layer/mod.rs +++ b/core/bin/zksync_core/src/sync_layer/mod.rs @@ -1,11 +1,14 @@ pub mod batch_status_updater; +mod cached_main_node_client; pub mod external_io; pub mod fetcher; pub mod genesis; pub mod mock_batch_executor; pub(crate) mod sync_action; +mod sync_state; pub use self::{ external_io::{ExternalIO, ExternalNodeSealer}, sync_action::ActionQueue, + sync_state::SyncState, }; diff --git a/core/bin/zksync_core/src/sync_layer/sync_action.rs b/core/bin/zksync_core/src/sync_layer/sync_action.rs index bf60a7831878..311db9dd0802 100644 --- a/core/bin/zksync_core/src/sync_layer/sync_action.rs +++ b/core/bin/zksync_core/src/sync_layer/sync_action.rs @@ -1,11 +1,12 @@ use std::{ collections::VecDeque, sync::{Arc, RwLock}, + time::Instant, }; use chrono::{DateTime, Utc}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::{L1BatchNumber, MiniblockNumber, Transaction, H256}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction, H256}; /// Action queue is used to communicate between the fetcher and the rest of the external node /// by collecting the fetched data in memory until it gets processed by the different entities. @@ -21,14 +22,15 @@ impl ActionQueue { /// Removes the first action from the queue. pub(crate) fn pop_action(&self) -> Option { - let mut write_lock = self.inner.write().unwrap(); - write_lock.actions.pop_front() + self.write_lock().actions.pop_front().map(|action| { + metrics::decrement_gauge!("external_node.action_queue.action_queue_size", 1_f64); + action + }) } /// Returns the first action from the queue without removing it. pub(crate) fn peek_action(&self) -> Option { - let read_lock = self.inner.read().unwrap(); - read_lock.actions.front().cloned() + self.read_lock().actions.front().cloned() } /// Returns true if the queue has capacity for a new action. @@ -41,8 +43,7 @@ impl ActionQueue { // decompose received data into a sequence of actions. // This is not a problem, since the size of decomposed action is much smaller // than the configured capacity. - let read_lock = self.inner.read().unwrap(); - read_lock.actions.len() < ACTION_CAPACITY + self.read_lock().actions.len() < ACTION_CAPACITY } /// Returns true if the queue has capacity for a new status change. @@ -52,7 +53,7 @@ impl ActionQueue { // We don't really care about any particular queue size, as the only intention // of this check is to prevent memory exhaustion. - let read_lock = self.inner.read().unwrap(); + let read_lock = self.read_lock(); read_lock.commit_status_changes.len() < STATUS_CHANGE_CAPACITY && read_lock.prove_status_changes.len() < STATUS_CHANGE_CAPACITY && read_lock.execute_status_changes.len() < STATUS_CHANGE_CAPACITY @@ -66,37 +67,70 @@ impl ActionQueue { pub(crate) fn push_actions(&self, actions: Vec) { // We need to enforce the ordering of actions to make sure that they can be processed. Self::check_action_sequence(&actions).expect("Invalid sequence of actions."); + metrics::increment_gauge!( + "external_node.action_queue.action_queue_size", + actions.len() as f64 + ); - let mut write_lock = self.inner.write().unwrap(); - write_lock.actions.extend(actions); + self.write_lock().actions.extend(actions); } /// Pushes a notification about certain batch being committed. pub(crate) fn push_commit_status_change(&self, change: BatchStatusChange) { - let mut write_lock = self.inner.write().unwrap(); - write_lock.commit_status_changes.push_back(change); + metrics::increment_gauge!("external_node.action_queue.status_change_queue_size", 1_f64, "item" => "commit"); + self.write_lock().commit_status_changes.push_back(change); } /// Pushes a notification about certain batch being proven. pub(crate) fn push_prove_status_change(&self, change: BatchStatusChange) { - let mut write_lock = self.inner.write().unwrap(); - write_lock.prove_status_changes.push_back(change); + metrics::increment_gauge!("external_node.action_queue.status_change_queue_size", 1_f64, "item" => "prove"); + self.write_lock().prove_status_changes.push_back(change); } /// Pushes a notification about certain batch being executed. pub(crate) fn push_execute_status_change(&self, change: BatchStatusChange) { - let mut write_lock = self.inner.write().unwrap(); - write_lock.execute_status_changes.push_back(change); + metrics::increment_gauge!("external_node.action_queue.status_change_queue_size", 1_f64, "item" => "execute"); + self.write_lock().execute_status_changes.push_back(change); } /// Collects all status changes and returns them. - pub(crate) fn take_status_changes(&self) -> StatusChanges { - let mut write_lock = self.inner.write().unwrap(); - StatusChanges { - commit: write_lock.commit_status_changes.drain(..).collect(), - prove: write_lock.prove_status_changes.drain(..).collect(), - execute: write_lock.execute_status_changes.drain(..).collect(), + pub(crate) fn take_status_changes(&self, last_sealed_batch: L1BatchNumber) -> StatusChanges { + fn drain( + queue: &mut VecDeque, + last_sealed_batch: L1BatchNumber, + ) -> Vec { + let range_end = queue + .iter() + .position(|change| change.number > last_sealed_batch) + .unwrap_or(queue.len()); + queue.drain(..range_end).collect() } + + let mut write_lock = self.write_lock(); + + let result = StatusChanges { + commit: drain(&mut write_lock.commit_status_changes, last_sealed_batch), + prove: drain(&mut write_lock.prove_status_changes, last_sealed_batch), + execute: drain(&mut write_lock.execute_status_changes, last_sealed_batch), + }; + + metrics::gauge!( + "external_node.action_queue.status_change_queue_size", + write_lock.commit_status_changes.len() as f64, + "item" => "commit" + ); + metrics::gauge!( + "external_node.action_queue.status_change_queue_size", + write_lock.prove_status_changes.len() as f64, + "item" => "prove" + ); + metrics::gauge!( + "external_node.action_queue.status_change_queue_size", + write_lock.execute_status_changes.len() as f64, + "item" => "execute" + ); + + result } /// Checks whether the action sequence is valid. @@ -106,12 +140,10 @@ impl ActionQueue { // Rules for the sequence: // 1. Must start with either `OpenBatch` or `Miniblock`, both of which may be met only once. // 2. Followed by a sequence of `Tx` actions which consists of 0 or more elements. - // 3. Must have `SealMiniblock` come after transactions. - // 4. May or may not have `SealBatch` come after `SealMiniblock`. + // 3. Must have either `SealMiniblock` or `SealBatch` at the end. let mut opened = false; let mut miniblock_sealed = false; - let mut batch_sealed = false; for action in actions { match action { @@ -126,18 +158,12 @@ impl ActionQueue { return Err(format!("Unexpected Tx: {:?}", actions)); } } - SyncAction::SealMiniblock => { + SyncAction::SealMiniblock | SyncAction::SealBatch => { if !opened || miniblock_sealed { - return Err(format!("Unexpected SealMiniblock: {:?}", actions)); + return Err(format!("Unexpected SealMiniblock/SealBatch: {:?}", actions)); } miniblock_sealed = true; } - SyncAction::SealBatch => { - if !miniblock_sealed || batch_sealed { - return Err(format!("Unexpected SealBatch: {:?}", actions)); - } - batch_sealed = true; - } } } if !miniblock_sealed { @@ -145,6 +171,20 @@ impl ActionQueue { } Ok(()) } + + fn read_lock(&self) -> std::sync::RwLockReadGuard<'_, ActionQueueInner> { + let start = Instant::now(); + let lock = self.inner.read().unwrap(); + metrics::histogram!("external_node.action_queue.lock", start.elapsed(), "action" => "acquire_read"); + lock + } + + fn write_lock(&self) -> std::sync::RwLockWriteGuard<'_, ActionQueueInner> { + let start = Instant::now(); + let lock = self.inner.write().unwrap(); + metrics::histogram!("external_node.action_queue.lock", start.elapsed(), "action" => "acquire_write"); + lock + } } #[derive(Debug)] @@ -154,6 +194,13 @@ pub(crate) struct StatusChanges { pub(crate) execute: Vec, } +impl StatusChanges { + /// Returns true if there are no status changes. + pub(crate) fn is_empty(&self) -> bool { + self.commit.is_empty() && self.prove.is_empty() && self.execute.is_empty() + } +} + #[derive(Debug, Default)] struct ActionQueueInner { actions: VecDeque, @@ -171,6 +218,7 @@ pub(crate) enum SyncAction { l1_gas_price: u64, l2_fair_gas_price: u64, base_system_contracts_hashes: BaseSystemContractsHashes, + operator_address: Address, }, Miniblock { number: MiniblockNumber, @@ -208,6 +256,7 @@ mod tests { l1_gas_price: 1, l2_fair_gas_price: 1, base_system_contracts_hashes: BaseSystemContractsHashes::default(), + operator_address: Default::default(), } } @@ -246,13 +295,14 @@ mod tests { fn correct_sequence() { let test_vector = vec![ vec![open_batch(), seal_miniblock()], + vec![open_batch(), seal_batch()], vec![open_batch(), tx(), seal_miniblock()], - vec![open_batch(), seal_miniblock(), seal_batch()], - vec![open_batch(), tx(), seal_miniblock(), seal_batch()], + vec![open_batch(), tx(), tx(), tx(), seal_miniblock()], + vec![open_batch(), tx(), seal_batch()], vec![miniblock(), seal_miniblock()], + vec![miniblock(), seal_batch()], vec![miniblock(), tx(), seal_miniblock()], - vec![miniblock(), seal_miniblock(), seal_batch()], - vec![miniblock(), tx(), seal_miniblock(), seal_batch()], + vec![miniblock(), tx(), seal_batch()], ]; for (idx, sequence) in test_vector.into_iter().enumerate() { ActionQueue::check_action_sequence(&sequence) @@ -292,30 +342,20 @@ mod tests { vec![miniblock(), seal_miniblock(), seal_miniblock()], "Unexpected SealMiniblock", ), - // Unexpected SealBatch. - ( - vec![open_batch(), tx(), seal_batch()], - "Unexpected SealBatch", - ), - (vec![open_batch(), seal_batch()], "Unexpected SealBatch"), ( vec![open_batch(), seal_miniblock(), seal_batch(), seal_batch()], - "Unexpected SealBatch", + "Unexpected SealMiniblock/SealBatch", ), - (vec![miniblock(), seal_batch()], "Unexpected SealBatch"), ( vec![miniblock(), seal_miniblock(), seal_batch(), seal_batch()], - "Unexpected SealBatch", - ), - (vec![seal_batch()], "Unexpected SealBatch"), - ( - vec![miniblock(), tx(), seal_batch()], - "Unexpected SealBatch", + "Unexpected SealMiniblock/SealBatch", ), + (vec![seal_batch()], "Unexpected SealMiniblock/SealBatch"), ]; for (idx, (sequence, expected_err)) in test_vector.into_iter().enumerate() { - let err = - ActionQueue::check_action_sequence(&sequence).expect_err("Invalid sequence passed"); + let Err(err) = ActionQueue::check_action_sequence(&sequence) else { + panic!("Invalid sequence passed the test. Sequence #{}, expected error: {}", idx, expected_err); + }; assert!( err.starts_with(expected_err), "Sequence #{} failed. Expected error: {}, got: {}", @@ -325,4 +365,50 @@ mod tests { ); } } + + fn batch_status_change(batch: u32) -> BatchStatusChange { + BatchStatusChange { + number: L1BatchNumber(batch), + l1_tx_hash: H256::default(), + happened_at: Utc::now(), + } + } + + /// Checks that `ActionQueue::take_status_changes` correctly takes the status changes from the queue. + #[test] + fn take_status_changes() { + let queue = ActionQueue::new(); + let taken = queue.take_status_changes(L1BatchNumber(1000)); + assert!(taken.commit.is_empty() && taken.prove.is_empty() && taken.execute.is_empty()); + + queue.push_commit_status_change(batch_status_change(1)); + queue.push_prove_status_change(batch_status_change(1)); + + let taken = queue.take_status_changes(L1BatchNumber(0)); + assert!(taken.commit.is_empty() && taken.prove.is_empty() && taken.execute.is_empty()); + + let taken = queue.take_status_changes(L1BatchNumber(1)); + assert!(taken.commit.len() == 1 && taken.prove.len() == 1 && taken.execute.is_empty()); + // Changes are already taken. + let taken = queue.take_status_changes(L1BatchNumber(1)); + assert!(taken.commit.is_empty() && taken.prove.is_empty() && taken.execute.is_empty()); + + // Test partial draining. + queue.push_commit_status_change(batch_status_change(2)); + queue.push_commit_status_change(batch_status_change(3)); + queue.push_commit_status_change(batch_status_change(4)); + queue.push_prove_status_change(batch_status_change(2)); + queue.push_prove_status_change(batch_status_change(3)); + queue.push_execute_status_change(batch_status_change(1)); + queue.push_execute_status_change(batch_status_change(2)); + let taken = queue.take_status_changes(L1BatchNumber(3)); + assert_eq!(taken.commit.len(), 2); + assert_eq!(taken.prove.len(), 2); + assert_eq!(taken.execute.len(), 2); + + let taken = queue.take_status_changes(L1BatchNumber(4)); + assert_eq!(taken.commit.len(), 1); + assert_eq!(taken.prove.len(), 0); + assert_eq!(taken.execute.len(), 0); + } } diff --git a/core/bin/zksync_core/src/sync_layer/sync_state.rs b/core/bin/zksync_core/src/sync_layer/sync_state.rs new file mode 100644 index 000000000000..7fdb1f28cc40 --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/sync_state.rs @@ -0,0 +1,149 @@ +use std::sync::{Arc, RwLock}; + +use zksync_types::MiniblockNumber; + +/// `SyncState` is a structure that holds the state of the syncing process. +/// The intended use case is to signalize to Web3 API whether the node is fully synced. +/// Data inside is expected to be updated by both `MainNodeFetcher` (on last block available on the main node) +/// and `ExternalIO` (on latest sealed miniblock). +/// +/// This structure operates on miniblocks rather than L1 batches, since this is the default unit used in the web3 API. +#[derive(Debug, Default, Clone)] +pub struct SyncState { + inner: Arc>, +} + +/// A threshold constant intended to keep the sync status less flaky. +/// This gives the external node some room to fetch new miniblocks without losing the sync status. +const SYNC_MINIBLOCK_DELTA: u32 = 10; + +impl SyncState { + pub fn new() -> Self { + Self::default() + } + + pub(crate) fn get_main_node_block(&self) -> MiniblockNumber { + self.inner + .read() + .unwrap() + .main_node_block + .unwrap_or_default() + } + + pub(crate) fn get_local_block(&self) -> MiniblockNumber { + self.inner.read().unwrap().local_block.unwrap_or_default() + } + + pub(super) fn set_main_node_block(&self, block: MiniblockNumber) { + let mut inner = self.inner.write().unwrap(); + if let Some(local_block) = inner.local_block { + if block.0 < local_block.0 { + panic!( + "main_node_block({}) is less than local_block({})", + block, local_block + ); + } + } + inner.main_node_block = Some(block); + self.update_sync_metric(&inner); + } + + pub(super) fn set_local_block(&self, block: MiniblockNumber) { + let mut inner = self.inner.write().unwrap(); + if let Some(main_node_block) = inner.main_node_block { + if block.0 > main_node_block.0 { + // Probably it's fine -- will be checked by the reorg detector. + vlog::info!( + "local_block({}) is greater than main_node_block({})", + block, + main_node_block + ); + } + } + inner.local_block = Some(block); + self.update_sync_metric(&inner); + } + + pub(crate) fn is_synced(&self) -> bool { + let inner = self.inner.read().unwrap(); + self.is_synced_inner(&inner).0 + } + + fn update_sync_metric(&self, inner: &SyncStateInner) { + let (is_synced, lag) = self.is_synced_inner(inner); + metrics::gauge!("external_node.synced", is_synced as u64 as f64); + if let Some(lag) = lag { + metrics::gauge!("external_node.sync_lag", lag as f64); + } + } + + fn is_synced_inner(&self, inner: &SyncStateInner) -> (bool, Option) { + if let (Some(main_node_block), Some(local_block)) = + (inner.main_node_block, inner.local_block) + { + let Some(block_diff) = main_node_block + .0 + .checked_sub(local_block.0) + else { + return (false, None); + }; + (block_diff <= SYNC_MINIBLOCK_DELTA, Some(block_diff)) + } else { + (false, None) + } + } +} + +#[derive(Debug, Default)] +struct SyncStateInner { + main_node_block: Option, + local_block: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sync_state() { + let sync_state = SyncState::new(); + + // The node is not synced if there is no data. + assert!(!sync_state.is_synced()); + + // The gap is too big, still not synced. + sync_state.set_local_block(MiniblockNumber(0)); + sync_state.set_main_node_block(MiniblockNumber(SYNC_MINIBLOCK_DELTA + 1)); + assert!(!sync_state.is_synced()); + + // Within the threshold, the node is synced. + sync_state.set_local_block(MiniblockNumber(1)); + assert!(sync_state.is_synced()); + + // Can reach the main node last block. + sync_state.set_local_block(MiniblockNumber(SYNC_MINIBLOCK_DELTA + 1)); + assert!(sync_state.is_synced()); + + // Main node can again move forward. + sync_state.set_main_node_block(MiniblockNumber(2 * SYNC_MINIBLOCK_DELTA + 2)); + assert!(!sync_state.is_synced()); + } + + #[test] + fn test_sync_state_doesnt_panic_on_local_block() { + let sync_state = SyncState::new(); + + sync_state.set_main_node_block(MiniblockNumber(1)); + sync_state.set_local_block(MiniblockNumber(2)); + // ^ should not panic, as we defer the situation to the reorg detector. + } + + #[test] + #[should_panic(expected = "main_node_block(1) is less than local_block(2)")] + fn test_sync_state_panic_on_main_node_block() { + let sync_state = SyncState::new(); + + sync_state.set_local_block(MiniblockNumber(2)); + sync_state.set_main_node_block(MiniblockNumber(1)); + } +} diff --git a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs index 1ff506ff1539..64e66f87f9d6 100644 --- a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs +++ b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs @@ -1,55 +1,208 @@ -use serde::{Deserialize, Serialize}; +use std::cell::RefCell; use std::collections::hash_map::DefaultHasher; use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; use std::rc::Rc; +use std::sync::Arc; use std::time::Instant; -use vm::zk_evm::bitflags::_core::cell::RefCell; + +use async_trait::async_trait; +use rand::Rng; +use serde::{Deserialize, Serialize}; + use vm::zk_evm::ethereum_types::H256; +use vm::HistoryDisabled; use vm::{memory::SimpleMemory, StorageOracle, MAX_CYCLES_FOR_TX}; use zksync_config::configs::WitnessGeneratorConfig; use zksync_config::constants::BOOTLOADER_ADDRESS; use zksync_dal::ConnectionPool; -use zksync_object_store::gcs_utils::{ - basic_circuits_blob_url, basic_circuits_inputs_blob_url, merkle_tree_paths_blob_url, - scheduler_witness_blob_url, -}; -use zksync_object_store::object_store::create_object_store_from_env; -use zksync_object_store::object_store::{ - DynamicObjectStore, LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, WITNESS_INPUT_BUCKET_PATH, -}; +use zksync_db_storage_provider::DbStorageProvider; +use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; +use zksync_queued_job_processor::JobProcessor; use zksync_state::storage_view::StorageView; use zksync_types::zkevm_test_harness::toolset::GeometryConfig; use zksync_types::{ circuit::GEOMETRY_CONFIG, - proofs::{ - AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob, - StorageLogMetadata, WitnessGeneratorJob, WitnessGeneratorJobInput, - }, + proofs::{AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, witness::full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, + witness::oracle::VmWitnessOracle, SchedulerCircuitInstanceWitness, }, Address, L1BatchNumber, U256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; -use crate::db_storage_provider::DbStorageProvider; -use crate::witness_generator; use crate::witness_generator::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; use crate::witness_generator::track_witness_generation_stage; use crate::witness_generator::utils::{expand_bootloader_contents, save_prover_input_artifacts}; pub struct BasicCircuitArtifacts { - pub basic_circuits: BlockBasicCircuits, - pub basic_circuits_inputs: BlockBasicCircuitsPublicInputs, - pub scheduler_witness: SchedulerCircuitInstanceWitness, - pub serialized_circuits: Vec<(String, Vec)>, + basic_circuits: BlockBasicCircuits, + basic_circuits_inputs: BlockBasicCircuitsPublicInputs, + scheduler_witness: SchedulerCircuitInstanceWitness, + circuits: Vec>>, +} + +#[derive(Debug)] +struct BlobUrls { + basic_circuits_url: String, + basic_circuits_inputs_url: String, + scheduler_witness_url: String, + circuit_types_and_urls: Vec<(&'static str, String)>, +} + +#[derive(Clone)] +pub struct BasicWitnessGeneratorJob { + block_number: L1BatchNumber, + job: PrepareBasicCircuitsJob, +} + +#[derive(Debug)] +pub struct BasicWitnessGenerator { + config: WitnessGeneratorConfig, + object_store: Arc, +} + +impl BasicWitnessGenerator { + pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + config, + object_store: store_factory.create_store().into(), + } + } + + fn process_job_sync( + object_store: &dyn ObjectStore, + connection_pool: ConnectionPool, + basic_job: BasicWitnessGeneratorJob, + started_at: Instant, + ) -> Option { + let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); + let BasicWitnessGeneratorJob { block_number, job } = basic_job; + + if let Some(blocks_proving_percentage) = config.blocks_proving_percentage { + // Generate random number in (0; 100). + let threshold = rand::thread_rng().gen_range(1..100); + // We get value higher than `blocks_proving_percentage` with prob = `1 - blocks_proving_percentage`. + // In this case job should be skipped. + if threshold > blocks_proving_percentage { + metrics::counter!("server.witness_generator.skipped_blocks", 1); + vlog::info!( + "Skipping witness generation for block {}, blocks_proving_percentage: {}", + block_number.0, + blocks_proving_percentage + ); + let mut storage = connection_pool.access_storage_blocking(); + storage + .witness_generator_dal() + .mark_witness_job_as_skipped(block_number, AggregationRound::BasicCircuits); + return None; + } + } + + metrics::counter!("server.witness_generator.sampled_blocks", 1); + vlog::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::BasicCircuits, + block_number.0 + ); + + Some(process_basic_circuits_job( + object_store, + config, + connection_pool, + started_at, + block_number, + job, + )) + } +} + +#[async_trait] +impl JobProcessor for BasicWitnessGenerator { + type Job = BasicWitnessGeneratorJob; + type JobId = L1BatchNumber; + // The artifact is optional to support skipping blocks when sampling is enabled. + type JobArtifacts = Option; + + const SERVICE_NAME: &'static str = "basic_circuit_witness_generator"; + + async fn get_next_job( + &self, + connection_pool: ConnectionPool, + ) -> Option<(Self::JobId, Self::Job)> { + let mut connection = connection_pool.access_storage_blocking(); + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + + match connection + .witness_generator_dal() + .get_next_basic_circuit_witness_job( + self.config.witness_generation_timeout(), + self.config.max_attempts, + last_l1_batch_to_process, + ) { + Some(metadata) => { + let job = get_artifacts(metadata.block_number, &*self.object_store); + Some((job.block_number, job)) + } + None => None, + } + } + + async fn save_failure( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + error: String, + ) -> () { + connection_pool + .access_storage_blocking() + .witness_generator_dal() + .mark_witness_job_as_failed( + job_id, + AggregationRound::BasicCircuits, + started_at.elapsed(), + error, + self.config.max_attempts, + ); + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + connection_pool: ConnectionPool, + job: BasicWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + let object_store = Arc::clone(&self.object_store); + tokio::task::spawn_blocking(move || { + Self::process_job_sync(&*object_store, connection_pool.clone(), job, started_at) + }) + } + + async fn save_result( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + optional_artifacts: Option, + ) { + match optional_artifacts { + None => (), + Some(artifacts) => { + let blob_urls = save_artifacts(job_id, artifacts, &*self.object_store); + update_database(connection_pool, started_at, job_id, blob_urls); + } + } + } } pub fn process_basic_circuits_job( + object_store: &dyn ObjectStore, config: WitnessGeneratorConfig, connection_pool: ConnectionPool, started_at: Instant, @@ -58,50 +211,44 @@ pub fn process_basic_circuits_job( ) -> BasicCircuitArtifacts { let witness_gen_input = build_basic_circuits_witness_generator_input(connection_pool.clone(), job, block_number); - let (basic_circuits, basic_circuits_inputs, scheduler_witness) = - generate_witness(config, connection_pool, witness_gen_input); - - let individual_circuits = basic_circuits.clone().into_flattened_set(); - - let serialized_circuits: Vec<(String, Vec)> = - witness_generator::serialize_circuits(&individual_circuits); + generate_witness(object_store, config, connection_pool, witness_gen_input); + let circuits = basic_circuits.clone().into_flattened_set(); - let total_size_bytes: usize = serialized_circuits - .iter() - .map(|(_, bytes)| bytes.len()) - .sum(); vlog::info!( - "Witness generation for block {} is complete in {:?}. Number of circuits: {}, total size: {}KB", + "Witness generation for block {} is complete in {:?}. Number of circuits: {}", block_number.0, started_at.elapsed(), - serialized_circuits.len(), - total_size_bytes >> 10 + circuits.len() ); BasicCircuitArtifacts { basic_circuits, basic_circuits_inputs, scheduler_witness, - serialized_circuits, + circuits, } } -pub fn update_database( +fn update_database( connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, - circuits: Vec, + blob_urls: BlobUrls, ) { let mut connection = connection_pool.access_storage_blocking(); let mut transaction = connection.start_transaction_blocking(); - transaction - .witness_generator_dal() - .create_aggregation_jobs(block_number, circuits.len()); + transaction.witness_generator_dal().create_aggregation_jobs( + block_number, + &blob_urls.basic_circuits_url, + &blob_urls.basic_circuits_inputs_url, + blob_urls.circuit_types_and_urls.len(), + &blob_urls.scheduler_witness_url, + ); transaction.prover_dal().insert_prover_jobs( block_number, - circuits, + blob_urls.circuit_types_and_urls, AggregationRound::BasicCircuits, ); transaction @@ -116,72 +263,40 @@ pub fn update_database( track_witness_generation_stage(started_at, AggregationRound::BasicCircuits); } -pub async fn get_artifacts( +pub fn get_artifacts( block_number: L1BatchNumber, - object_store: &DynamicObjectStore, -) -> WitnessGeneratorJob { - let merkle_tree_paths = object_store - .get( - WITNESS_INPUT_BUCKET_PATH, - merkle_tree_paths_blob_url(block_number), - ) - .unwrap(); - - let (merkle_paths, next_enumeration_index) = - bincode::deserialize::<(Vec, u64)>(&merkle_tree_paths) - .expect("witness deserialization failed"); - - WitnessGeneratorJob { - block_number, - job: WitnessGeneratorJobInput::BasicCircuits(Box::new(PrepareBasicCircuitsJob { - merkle_paths, - next_enumeration_index, - })), - } + object_store: &dyn ObjectStore, +) -> BasicWitnessGeneratorJob { + let job = object_store.get(block_number).unwrap(); + BasicWitnessGeneratorJob { block_number, job } } -pub async fn save_artifacts( +fn save_artifacts( block_number: L1BatchNumber, artifacts: BasicCircuitArtifacts, - object_store: &mut DynamicObjectStore, -) { - let basic_circuits_serialized = - bincode::serialize(&artifacts.basic_circuits).expect("cannot serialize basic_circuits"); - object_store - .put( - LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - basic_circuits_blob_url(block_number), - basic_circuits_serialized, - ) + object_store: &dyn ObjectStore, +) -> BlobUrls { + let basic_circuits_url = object_store + .put(block_number, &artifacts.basic_circuits) .unwrap(); - - let basic_circuits_inputs_serialized = bincode::serialize(&artifacts.basic_circuits_inputs) - .expect("cannot serialize basic_circuits_inputs"); - object_store - .put( - LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - basic_circuits_inputs_blob_url(block_number), - basic_circuits_inputs_serialized, - ) + let basic_circuits_inputs_url = object_store + .put(block_number, &artifacts.basic_circuits_inputs) .unwrap(); - - let scheduler_witness_serialized = bincode::serialize(&artifacts.scheduler_witness) - .expect("cannot serialize scheduler_witness"); - - object_store - .put( - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, - scheduler_witness_blob_url(block_number), - scheduler_witness_serialized, - ) + let scheduler_witness_url = object_store + .put(block_number, &artifacts.scheduler_witness) .unwrap(); - save_prover_input_artifacts( + let circuit_types_and_urls = save_prover_input_artifacts( block_number, - artifacts.serialized_circuits, + &artifacts.circuits, object_store, AggregationRound::BasicCircuits, - ) - .await; + ); + BlobUrls { + basic_circuits_url, + basic_circuits_inputs_url, + scheduler_witness_url, + circuit_types_and_urls, + } } // If making changes to this method, consider moving this logic to the DAL layer and make @@ -216,6 +331,7 @@ pub fn build_basic_circuits_witness_generator_input( } pub fn generate_witness( + object_store: &dyn ObjectStore, config: WitnessGeneratorConfig, connection_pool: ConnectionPool, input: BasicCircuitWitnessGeneratorInput, @@ -254,18 +370,13 @@ pub fn generate_witness( if input.used_bytecodes_hashes.contains(&account_code_hash) { used_bytecodes.insert(account_code_hash, account_bytecode); } - let factory_dep_bytecode_hashes: HashSet = used_bytecodes - .clone() - .keys() - .map(|&hash| u256_to_h256(hash)) - .collect(); - let missing_deps: HashSet<_> = hashes - .difference(&factory_dep_bytecode_hashes) - .cloned() - .collect(); - if !missing_deps.is_empty() { - vlog::error!("{:?} factory deps are not found in DB", missing_deps); - } + + assert_eq!( + hashes.len(), + used_bytecodes.len(), + "{} factory deps are not found in DB", + hashes.len() - used_bytecodes.len() + ); // `DbStorageProvider` was designed to be used in API, so it accepts miniblock numbers. // Probably, we should make it work with L1 batch numbers too. @@ -276,12 +387,13 @@ pub fn generate_witness( let db_storage_provider = DbStorageProvider::new(connection, last_miniblock_number, true); let mut tree = PrecalculatedMerklePathsProvider::new( input.merkle_paths_input, - input.previous_block_hash.0.to_vec(), + input.previous_block_hash.0, ); let storage_ptr: &mut dyn vm::storage::Storage = &mut StorageView::new(db_storage_provider); - let storage_oracle = StorageOracle::new(Rc::new(RefCell::new(storage_ptr))); - let memory = SimpleMemory::default(); + let storage_oracle: StorageOracle = + StorageOracle::new(Rc::new(RefCell::new(storage_ptr))); + let memory: SimpleMemory = SimpleMemory::default(); let mut hasher = DefaultHasher::new(); GEOMETRY_CONFIG.hash(&mut hasher); vlog::info!( @@ -294,6 +406,7 @@ pub fn generate_witness( .contains(&input.block_number.0) { save_run_with_fixed_params_args_to_gcs( + object_store, input.block_number.0, last_miniblock_number.0, Address::zero(), @@ -329,6 +442,7 @@ pub fn generate_witness( #[allow(clippy::too_many_arguments)] fn save_run_with_fixed_params_args_to_gcs( + object_store: &dyn ObjectStore, l1_batch_number: u32, last_miniblock_number: u32, caller: Address, @@ -358,16 +472,8 @@ fn save_run_with_fixed_params_args_to_gcs( geometry, tree, }; - let run_with_fixed_params_input_serialized = bincode::serialize(&run_with_fixed_params_input) - .expect("cannot serialize run_with_fixed_params_input"); - let blob_url = format!("run_with_fixed_params_input_{}.bin", l1_batch_number); - let mut object_store = create_object_store_from_env(); object_store - .put( - WITNESS_INPUT_BUCKET_PATH, - blob_url, - run_with_fixed_params_input_serialized, - ) + .put(L1BatchNumber(l1_batch_number), &run_with_fixed_params_input) .unwrap(); } @@ -387,3 +493,14 @@ pub struct RunWithFixedParamsInput { pub geometry: GeometryConfig, pub tree: PrecalculatedMerklePathsProvider, } + +impl StoredObject for RunWithFixedParamsInput { + const BUCKET: Bucket = Bucket::WitnessInput; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("run_with_fixed_params_input_{}.bin", key) + } + + zksync_object_store::serialize_using_bincode!(); +} diff --git a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs index 589c6d784639..8298d8847f27 100644 --- a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs @@ -1,32 +1,20 @@ use std::collections::HashMap; use std::time::Instant; +use async_trait::async_trait; + +use zksync_config::configs::WitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::gcs_utils::{ - aggregation_outputs_blob_url, basic_circuits_blob_url, basic_circuits_inputs_blob_url, - leaf_layer_subqueues_blob_url, -}; -use zksync_object_store::object_store::{ - DynamicObjectStore, LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, -}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_queued_job_processor::JobProcessor; use zksync_types::{ circuit::LEAF_SPLITTING_FACTOR, - proofs::{ - AggregationRound, PrepareLeafAggregationCircuitsJob, WitnessGeneratorJob, - WitnessGeneratorJobInput, WitnessGeneratorJobMetadata, - }, + proofs::{AggregationRound, PrepareLeafAggregationCircuitsJob, WitnessGeneratorJobMetadata}, zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, - bellman::bn256::Bn256, + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, bellman::plonk::better_better_cs::setup::VerificationKey, - encodings::recursion_request::RecursionRequest, - encodings::QueueSimulator, - sync_vm, witness, - witness::{ - full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, - oracle::VmWitnessOracle, - }, + encodings::recursion_request::RecursionRequest, encodings::QueueSimulator, witness, + witness::oracle::VmWitnessOracle, LeafAggregationOutputDataWitness, }, L1BatchNumber, }; @@ -34,16 +22,133 @@ use zksync_verification_key_server::{ get_ordered_vks_for_basic_circuits, get_vks_for_basic_circuits, get_vks_for_commitment, }; -use crate::witness_generator; use crate::witness_generator::track_witness_generation_stage; use crate::witness_generator::utils::save_prover_input_artifacts; pub struct LeafAggregationArtifacts { - pub leaf_layer_subqueues: Vec, 2, 2>>, - pub aggregation_outputs: - Vec>, - pub serialized_circuits: Vec<(String, Vec)>, - pub leaf_circuits: Vec>>, + leaf_layer_subqueues: Vec, 2, 2>>, + aggregation_outputs: Vec>, + leaf_circuits: Vec>>, +} + +#[derive(Debug)] +struct BlobUrls { + leaf_layer_subqueues_url: String, + aggregation_outputs_url: String, + circuit_types_and_urls: Vec<(&'static str, String)>, +} + +#[derive(Clone)] +pub struct LeafAggregationWitnessGeneratorJob { + block_number: L1BatchNumber, + job: PrepareLeafAggregationCircuitsJob, +} + +#[derive(Debug)] +pub struct LeafAggregationWitnessGenerator { + config: WitnessGeneratorConfig, + object_store: Box, +} + +impl LeafAggregationWitnessGenerator { + pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + config, + object_store: store_factory.create_store(), + } + } + + fn process_job_sync( + leaf_job: LeafAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> LeafAggregationArtifacts { + let LeafAggregationWitnessGeneratorJob { block_number, job } = leaf_job; + + vlog::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::LeafAggregation, + block_number.0 + ); + process_leaf_aggregation_job(started_at, block_number, job) + } +} + +#[async_trait] +impl JobProcessor for LeafAggregationWitnessGenerator { + type Job = LeafAggregationWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = LeafAggregationArtifacts; + + const SERVICE_NAME: &'static str = "leaf_aggregation_witness_generator"; + + async fn get_next_job( + &self, + connection_pool: ConnectionPool, + ) -> Option<(Self::JobId, Self::Job)> { + let mut connection = connection_pool.access_storage_blocking(); + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + + match connection + .witness_generator_dal() + .get_next_leaf_aggregation_witness_job( + self.config.witness_generation_timeout(), + self.config.max_attempts, + last_l1_batch_to_process, + ) { + Some(metadata) => { + let job = get_artifacts(metadata, &*self.object_store); + Some((job.block_number, job)) + } + None => None, + } + } + + async fn save_failure( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + error: String, + ) -> () { + connection_pool + .access_storage_blocking() + .witness_generator_dal() + .mark_witness_job_as_failed( + job_id, + AggregationRound::LeafAggregation, + started_at.elapsed(), + error, + self.config.max_attempts, + ); + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _connection_pool: ConnectionPool, + job: LeafAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle { + tokio::task::spawn_blocking(move || Self::process_job_sync(job, started_at)) + } + + async fn save_result( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: LeafAggregationArtifacts, + ) { + let leaf_circuits_len = artifacts.leaf_circuits.len(); + let blob_urls = save_artifacts(job_id, artifacts, &*self.object_store); + update_database( + connection_pool, + started_at, + job_id, + leaf_circuits_len, + blob_urls, + ); + } } pub fn process_leaf_aggregation_job( @@ -75,18 +180,10 @@ pub fn process_leaf_aggregation_job( vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); - // fs::write("basic_circuits.bincode", bincode::serialize(&job.basic_circuits).unwrap()).unwrap(); - // fs::write("basic_circuits_inputs.bincode", bincode::serialize(&job.basic_circuits_inputs).unwrap()).unwrap(); - // fs::write("basic_circuits_proofs.bincode", bincode::serialize(&job.basic_circuits_proofs).unwrap()).unwrap(); - // fs::write("vks_for_aggregation.bincode", bincode::serialize(&vks_for_aggregation).unwrap()).unwrap(); - // fs::write("all_vk_committments.bincode", bincode::serialize(&all_vk_committments).unwrap()).unwrap(); - // fs::write("set_committment.bincode", bincode::serialize(&set_committment).unwrap()).unwrap(); - // fs::write("g2_points.bincode", bincode::serialize(&g2_points).unwrap()).unwrap(); - let stage_started_at = Instant::now(); let (leaf_layer_subqueues, aggregation_outputs, leaf_circuits) = - zksync_types::zkevm_test_harness::witness::recursive_aggregation::prepare_leaf_aggregations( + witness::recursive_aggregation::prepare_leaf_aggregations( job.basic_circuits, job.basic_circuits_inputs, job.basic_circuits_proofs, @@ -97,9 +194,6 @@ pub fn process_leaf_aggregation_job( g2_points, ); - let serialized_circuits: Vec<(String, Vec)> = - witness_generator::serialize_circuits(&leaf_circuits); - vlog::info!( "prepare_leaf_aggregations took {:?}", stage_started_at.elapsed() @@ -114,17 +208,16 @@ pub fn process_leaf_aggregation_job( LeafAggregationArtifacts { leaf_layer_subqueues, aggregation_outputs, - serialized_circuits, leaf_circuits, } } -pub fn update_database( +fn update_database( connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, leaf_circuits_len: usize, - circuits: Vec, + blob_urls: BlobUrls, ) { let mut connection = connection_pool.access_storage_blocking(); let mut transaction = connection.start_transaction_blocking(); @@ -133,10 +226,15 @@ pub fn update_database( // and advances it to waiting_for_proofs status transaction .witness_generator_dal() - .save_leaf_aggregation_artifacts(block_number, leaf_circuits_len); + .save_leaf_aggregation_artifacts( + block_number, + leaf_circuits_len, + &blob_urls.leaf_layer_subqueues_url, + &blob_urls.aggregation_outputs_url, + ); transaction.prover_dal().insert_prover_jobs( block_number, - circuits, + blob_urls.circuit_types_and_urls, AggregationRound::LeafAggregation, ); transaction @@ -151,72 +249,43 @@ pub fn update_database( track_witness_generation_stage(started_at, AggregationRound::LeafAggregation); } -pub async fn get_artifacts( +pub fn get_artifacts( metadata: WitnessGeneratorJobMetadata, - object_store: &DynamicObjectStore, -) -> WitnessGeneratorJob { - let basic_circuits_serialized = object_store - .get( - LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - basic_circuits_blob_url(metadata.block_number), - ) - .unwrap(); - let basic_circuits = - bincode::deserialize::>(&basic_circuits_serialized) - .expect("basic_circuits deserialization failed"); - - let basic_circuits_inputs_serialized = object_store - .get( - LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - basic_circuits_inputs_blob_url(metadata.block_number), - ) - .unwrap(); - let basic_circuits_inputs = bincode::deserialize::>( - &basic_circuits_inputs_serialized, - ) - .expect("basic_circuits_inputs deserialization failed"); + object_store: &dyn ObjectStore, +) -> LeafAggregationWitnessGeneratorJob { + let basic_circuits = object_store.get(metadata.block_number).unwrap(); + let basic_circuits_inputs = object_store.get(metadata.block_number).unwrap(); - WitnessGeneratorJob { + LeafAggregationWitnessGeneratorJob { block_number: metadata.block_number, - job: WitnessGeneratorJobInput::LeafAggregation(Box::new( - PrepareLeafAggregationCircuitsJob { - basic_circuits_inputs, - basic_circuits_proofs: metadata.proofs, - basic_circuits, - }, - )), + job: PrepareLeafAggregationCircuitsJob { + basic_circuits_inputs, + basic_circuits_proofs: metadata.proofs, + basic_circuits, + }, } } -pub async fn save_artifacts( +fn save_artifacts( block_number: L1BatchNumber, artifacts: LeafAggregationArtifacts, - object_store: &mut DynamicObjectStore, -) { - let leaf_layer_subqueues_serialized = bincode::serialize(&artifacts.leaf_layer_subqueues) - .expect("cannot serialize leaf_layer_subqueues"); - object_store - .put( - NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - leaf_layer_subqueues_blob_url(block_number), - leaf_layer_subqueues_serialized, - ) + object_store: &dyn ObjectStore, +) -> BlobUrls { + let leaf_layer_subqueues_url = object_store + .put(block_number, &artifacts.leaf_layer_subqueues) .unwrap(); - - let aggregation_outputs_serialized = bincode::serialize(&artifacts.aggregation_outputs) - .expect("cannot serialize aggregation_outputs"); - object_store - .put( - NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - aggregation_outputs_blob_url(block_number), - aggregation_outputs_serialized, - ) + let aggregation_outputs_url = object_store + .put(block_number, &artifacts.aggregation_outputs) .unwrap(); - save_prover_input_artifacts( + let circuit_types_and_urls = save_prover_input_artifacts( block_number, - artifacts.serialized_circuits, + &artifacts.leaf_circuits, object_store, AggregationRound::LeafAggregation, - ) - .await; + ); + BlobUrls { + leaf_layer_subqueues_url, + aggregation_outputs_url, + circuit_types_and_urls, + } } diff --git a/core/bin/zksync_core/src/witness_generator/mod.rs b/core/bin/zksync_core/src/witness_generator/mod.rs index ffc28b0ef8d9..c39a9d8eb605 100644 --- a/core/bin/zksync_core/src/witness_generator/mod.rs +++ b/core/bin/zksync_core/src/witness_generator/mod.rs @@ -1,35 +1,17 @@ -use std::fmt::Debug; use std::time::Instant; -use async_trait::async_trait; -use rand::Rng; - -use zksync_config::configs::WitnessGeneratorConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::object_store::create_object_store_from_env; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::{ - proofs::{AggregationRound, WitnessGeneratorJob, WitnessGeneratorJobInput}, - zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, - witness::oracle::VmWitnessOracle, - }, - L1BatchNumber, -}; +use zksync_types::proofs::AggregationRound; // use crate::witness_generator::basic_circuits; -use crate::witness_generator::basic_circuits::BasicCircuitArtifacts; -use crate::witness_generator::leaf_aggregation::LeafAggregationArtifacts; -use crate::witness_generator::node_aggregation::NodeAggregationArtifacts; -use crate::witness_generator::scheduler::SchedulerArtifacts; mod precalculated_merkle_paths_provider; mod utils; -mod basic_circuits; -mod leaf_aggregation; -mod node_aggregation; -mod scheduler; +pub mod basic_circuits; +pub mod leaf_aggregation; +pub mod node_aggregation; +pub mod scheduler; + #[cfg(test)] mod tests; @@ -73,311 +55,6 @@ mod tests; /// Note that the very first input table (`basic_circuit_witness_jobs`) /// is populated by the tree (as the input artifact for the `WitnessGeneratorJobType::BasicCircuits` is the merkle proofs) /// -#[derive(Debug)] -pub struct WitnessGenerator { - config: WitnessGeneratorConfig, -} - -pub enum WitnessGeneratorArtifacts { - BasicCircuits(Box), - LeafAggregation(Box), - NodeAggregation(Box), - Scheduler(Box), -} - -impl WitnessGenerator { - pub fn new(config: WitnessGeneratorConfig) -> Self { - Self { config } - } - - fn process_job_sync( - connection_pool: ConnectionPool, - job: WitnessGeneratorJob, - started_at: Instant, - ) -> Option { - let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); - let WitnessGeneratorJob { block_number, job } = job; - - if let (Some(blocks_proving_percentage), &WitnessGeneratorJobInput::BasicCircuits(_)) = - (config.blocks_proving_percentage, &job) - { - // Generate random number in (0; 100). - let rand_value = rand::thread_rng().gen_range(1..100); - // We get value higher than `blocks_proving_percentage` with prob = `1 - blocks_proving_percentage`. - // In this case job should be skipped. - if rand_value > blocks_proving_percentage { - metrics::counter!("server.witness_generator.skipped_blocks", 1); - vlog::info!( - "Skipping witness generation for block {}, blocks_proving_percentage: {}", - block_number.0, - blocks_proving_percentage - ); - let mut storage = connection_pool.access_storage_blocking(); - storage - .witness_generator_dal() - .mark_witness_job_as_skipped(block_number, AggregationRound::BasicCircuits); - return None; - } - } - - if matches!(&job, &WitnessGeneratorJobInput::BasicCircuits(_)) { - metrics::counter!("server.witness_generator.sampled_blocks", 1); - } - vlog::info!( - "Starting witness generation of type {:?} for block {}", - job.aggregation_round(), - block_number.0 - ); - - match job { - WitnessGeneratorJobInput::BasicCircuits(job) => { - Some(WitnessGeneratorArtifacts::BasicCircuits(Box::new( - basic_circuits::process_basic_circuits_job( - config, - connection_pool, - started_at, - block_number, - *job, - ), - ))) - } - WitnessGeneratorJobInput::LeafAggregation(job) => { - Some(WitnessGeneratorArtifacts::LeafAggregation(Box::new( - leaf_aggregation::process_leaf_aggregation_job(started_at, block_number, *job), - ))) - } - WitnessGeneratorJobInput::NodeAggregation(job) => { - Some(WitnessGeneratorArtifacts::NodeAggregation(Box::new( - node_aggregation::process_node_aggregation_job( - config, - started_at, - block_number, - *job, - ), - ))) - } - - WitnessGeneratorJobInput::Scheduler(job) => { - Some(WitnessGeneratorArtifacts::Scheduler(Box::new( - scheduler::process_scheduler_job(started_at, block_number, *job), - ))) - } - } - } -} - -#[async_trait] -impl JobProcessor for WitnessGenerator { - type Job = WitnessGeneratorJob; - type JobId = (L1BatchNumber, AggregationRound); - type JobArtifacts = Option; - - const SERVICE_NAME: &'static str = "witness_generator"; - - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage().await; - let object_store = create_object_store_from_env(); - let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - let optional_metadata = connection - .witness_generator_dal() - .get_next_scheduler_witness_job( - self.config.witness_generation_timeout(), - self.config.max_attempts, - last_l1_batch_to_process, - ); - - if let Some(metadata) = optional_metadata { - let prev_metadata = connection - .blocks_dal() - .get_block_metadata(metadata.block_number - 1); - let previous_aux_hash = prev_metadata - .as_ref() - .map_or([0u8; 32], |e| e.metadata.aux_data_hash.0); - let previous_meta_hash = - prev_metadata.map_or([0u8; 32], |e| e.metadata.meta_parameters_hash.0); - let job = scheduler::get_artifacts( - metadata, - previous_aux_hash, - previous_meta_hash, - &object_store, - ) - .await; - return Some(((job.block_number, job.job.aggregation_round()), job)); - } - - let optional_metadata = connection - .witness_generator_dal() - .get_next_node_aggregation_witness_job( - self.config.witness_generation_timeout(), - self.config.max_attempts, - last_l1_batch_to_process, - ); - - if let Some(metadata) = optional_metadata { - let job = node_aggregation::get_artifacts(metadata, &object_store).await; - return Some(((job.block_number, job.job.aggregation_round()), job)); - } - - let optional_metadata = connection - .witness_generator_dal() - .get_next_leaf_aggregation_witness_job( - self.config.witness_generation_timeout(), - self.config.max_attempts, - last_l1_batch_to_process, - ); - - if let Some(metadata) = optional_metadata { - let job = leaf_aggregation::get_artifacts(metadata, &object_store).await; - return Some(((job.block_number, job.job.aggregation_round()), job)); - } - - let optional_metadata = connection - .witness_generator_dal() - .get_next_basic_circuit_witness_job( - self.config.witness_generation_timeout(), - self.config.max_attempts, - last_l1_batch_to_process, - ); - - if let Some(metadata) = optional_metadata { - let job = basic_circuits::get_artifacts(metadata.block_number, &object_store).await; - return Some(((job.block_number, job.job.aggregation_round()), job)); - } - - None - } - - async fn save_failure( - connection_pool: ConnectionPool, - job_id: (L1BatchNumber, AggregationRound), - started_at: Instant, - error: String, - ) -> () { - let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); - connection_pool - .access_storage_blocking() - .witness_generator_dal() - .mark_witness_job_as_failed( - job_id.0, - job_id.1, - started_at.elapsed(), - error, - config.max_attempts, - ); - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - connection_pool: ConnectionPool, - job: WitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || { - Self::process_job_sync(connection_pool.clone(), job, started_at) - }) - } - - async fn save_result( - connection_pool: ConnectionPool, - job_id: (L1BatchNumber, AggregationRound), - started_at: Instant, - optional_artifacts: Option, - ) { - match optional_artifacts { - None => (), - Some(artifacts) => { - let mut object_store = create_object_store_from_env(); - let block_number = job_id.0; - match artifacts { - WitnessGeneratorArtifacts::BasicCircuits(boxed_basic_circuit_artifacts) => { - let basic_circuit_artifacts = *boxed_basic_circuit_artifacts; - let circuits = - get_circuit_types(&basic_circuit_artifacts.serialized_circuits); - basic_circuits::save_artifacts( - block_number, - basic_circuit_artifacts, - &mut object_store, - ) - .await; - basic_circuits::update_database( - connection_pool, - started_at, - block_number, - circuits, - ); - } - WitnessGeneratorArtifacts::LeafAggregation( - boxed_leaf_aggregation_artifacts, - ) => { - let leaf_aggregation_artifacts = *boxed_leaf_aggregation_artifacts; - let leaf_circuits_len = leaf_aggregation_artifacts.leaf_circuits.len(); - let circuits = - get_circuit_types(&leaf_aggregation_artifacts.serialized_circuits); - leaf_aggregation::save_artifacts( - block_number, - leaf_aggregation_artifacts, - &mut object_store, - ) - .await; - leaf_aggregation::update_database( - connection_pool, - started_at, - block_number, - leaf_circuits_len, - circuits, - ); - } - WitnessGeneratorArtifacts::NodeAggregation( - boxed_node_aggregation_artifacts, - ) => { - let node_aggregation_artifacts = *boxed_node_aggregation_artifacts; - let circuits = - get_circuit_types(&node_aggregation_artifacts.serialized_circuits); - node_aggregation::save_artifacts( - block_number, - node_aggregation_artifacts, - &mut object_store, - ) - .await; - node_aggregation::update_database( - connection_pool, - started_at, - block_number, - circuits, - ); - } - WitnessGeneratorArtifacts::Scheduler(boxed_scheduler_artifacts) => { - let scheduler_artifacts = *boxed_scheduler_artifacts; - let circuits = get_circuit_types(&scheduler_artifacts.serialized_circuits); - scheduler::save_artifacts( - block_number, - scheduler_artifacts.serialized_circuits, - &mut object_store, - ) - .await; - scheduler::update_database( - connection_pool, - started_at, - block_number, - scheduler_artifacts.final_aggregation_result, - circuits, - ); - } - }; - } - } - } -} - -fn get_circuit_types(serialized_circuits: &[(String, Vec)]) -> Vec { - serialized_circuits - .iter() - .map(|(circuit, _)| circuit.clone()) - .collect() -} fn track_witness_generation_stage(started_at: Instant, round: AggregationRound) { let stage = match round { @@ -392,19 +69,3 @@ fn track_witness_generation_stage(started_at: Instant, round: AggregationRound) "stage" => format!("wit_gen_{}", stage) ); } - -fn serialize_circuits( - individual_circuits: &[ZkSyncCircuit>], -) -> Vec<(String, Vec)> { - individual_circuits - .iter() - .map(|circuit| { - ( - circuit.short_description().to_owned(), - bincode::serialize(&circuit).expect("failed to serialize circuit"), - ) - }) - .collect() -} - -const _SCHEDULER_CIRCUIT_INDEX: u8 = 0; diff --git a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs index 73f18f62fccd..19bdf4d7d126 100644 --- a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs @@ -2,23 +2,17 @@ use std::collections::HashMap; use std::env; use std::time::Instant; +use async_trait::async_trait; + use zksync_config::configs::WitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::gcs_utils::{ - aggregation_outputs_blob_url, final_node_aggregations_blob_url, leaf_layer_subqueues_blob_url, -}; -use zksync_object_store::object_store::{ - DynamicObjectStore, NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, -}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_queued_job_processor::JobProcessor; use zksync_types::{ circuit::{ LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, NODE_SPLITTING_FACTOR, }, - proofs::{ - AggregationRound, PrepareNodeAggregationCircuitJob, WitnessGeneratorJob, - WitnessGeneratorJobInput, WitnessGeneratorJobMetadata, - }, + proofs::{AggregationRound, PrepareNodeAggregationCircuitJob, WitnessGeneratorJobMetadata}, zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, @@ -29,7 +23,7 @@ use zksync_types::{ oracle::VmWitnessOracle, recursive_aggregation::{erase_vk_type, padding_aggregations}, }, - LeafAggregationOutputDataWitness, NodeAggregationOutputDataWitness, + NodeAggregationOutputDataWitness, }, L1BatchNumber, }; @@ -37,13 +31,125 @@ use zksync_verification_key_server::{ get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, }; -use crate::witness_generator; use crate::witness_generator::track_witness_generation_stage; use crate::witness_generator::utils::save_prover_input_artifacts; pub struct NodeAggregationArtifacts { - pub final_node_aggregation: NodeAggregationOutputDataWitness, - pub serialized_circuits: Vec<(String, Vec)>, + final_node_aggregation: NodeAggregationOutputDataWitness, + node_circuits: Vec>>, +} + +#[derive(Debug)] +struct BlobUrls { + node_aggregations_url: String, + circuit_types_and_urls: Vec<(&'static str, String)>, +} + +#[derive(Clone)] +pub struct NodeAggregationWitnessGeneratorJob { + block_number: L1BatchNumber, + job: PrepareNodeAggregationCircuitJob, +} + +#[derive(Debug)] +pub struct NodeAggregationWitnessGenerator { + config: WitnessGeneratorConfig, + object_store: Box, +} + +impl NodeAggregationWitnessGenerator { + pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + config, + object_store: store_factory.create_store(), + } + } + + fn process_job_sync( + node_job: NodeAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> NodeAggregationArtifacts { + let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); + let NodeAggregationWitnessGeneratorJob { block_number, job } = node_job; + + vlog::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::NodeAggregation, + block_number.0 + ); + process_node_aggregation_job(config, started_at, block_number, job) + } +} + +#[async_trait] +impl JobProcessor for NodeAggregationWitnessGenerator { + type Job = NodeAggregationWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = NodeAggregationArtifacts; + + const SERVICE_NAME: &'static str = "node_aggregation_witness_generator"; + + async fn get_next_job( + &self, + connection_pool: ConnectionPool, + ) -> Option<(Self::JobId, Self::Job)> { + let mut connection = connection_pool.access_storage_blocking(); + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + + match connection + .witness_generator_dal() + .get_next_node_aggregation_witness_job( + self.config.witness_generation_timeout(), + self.config.max_attempts, + last_l1_batch_to_process, + ) { + Some(metadata) => { + let job = get_artifacts(metadata, &*self.object_store); + return Some((job.block_number, job)); + } + None => None, + } + } + + async fn save_failure( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + error: String, + ) -> () { + connection_pool + .access_storage_blocking() + .witness_generator_dal() + .mark_witness_job_as_failed( + job_id, + AggregationRound::NodeAggregation, + started_at.elapsed(), + error, + self.config.max_attempts, + ); + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _connection_pool: ConnectionPool, + job: NodeAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle { + tokio::task::spawn_blocking(move || Self::process_job_sync(job, started_at)) + } + + async fn save_result( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: NodeAggregationArtifacts, + ) { + let blob_urls = save_artifacts(job_id, artifacts, &*self.object_store); + update_database(connection_pool, started_at, job_id, blob_urls); + } } pub fn process_node_aggregation_job( @@ -140,9 +246,6 @@ pub fn process_node_aggregation_job( "prepare_node_aggregations returned more than one node aggregation" ); - let serialized_circuits: Vec<(String, Vec)> = - witness_generator::serialize_circuits(&node_circuits); - vlog::info!( "Node witness generation for block {} is complete in {:?}. Number of circuits: {}", block_number.0, @@ -152,15 +255,15 @@ pub fn process_node_aggregation_job( NodeAggregationArtifacts { final_node_aggregation: final_node_aggregations.into_iter().next().unwrap(), - serialized_circuits, + node_circuits, } } -pub fn update_database( +fn update_database( connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, - circuits: Vec, + blob_urls: BlobUrls, ) { let mut connection = connection_pool.access_storage_blocking(); let mut transaction = connection.start_transaction_blocking(); @@ -169,10 +272,10 @@ pub fn update_database( // and advances it to waiting_for_proofs status transaction .witness_generator_dal() - .save_node_aggregation_artifacts(block_number); + .save_node_aggregation_artifacts(block_number, &blob_urls.node_aggregations_url); transaction.prover_dal().insert_prover_jobs( block_number, - circuits, + blob_urls.circuit_types_and_urls, AggregationRound::NodeAggregation, ); transaction @@ -187,77 +290,43 @@ pub fn update_database( track_witness_generation_stage(started_at, AggregationRound::NodeAggregation); } -pub async fn get_artifacts( +pub fn get_artifacts( metadata: WitnessGeneratorJobMetadata, - object_store: &DynamicObjectStore, -) -> WitnessGeneratorJob { - let leaf_layer_subqueues_serialized = object_store - .get( - NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - leaf_layer_subqueues_blob_url(metadata.block_number), - ) - .expect( - "leaf_layer_subqueues is not found in a `queued` `node_aggregation_witness_jobs` job", - ); - let leaf_layer_subqueues = bincode::deserialize::< - Vec< - zksync_types::zkevm_test_harness::encodings::QueueSimulator< - Bn256, - zksync_types::zkevm_test_harness::encodings::recursion_request::RecursionRequest< - Bn256, - >, - 2, - 2, - >, - >, - >(&leaf_layer_subqueues_serialized) - .expect("leaf_layer_subqueues deserialization failed"); - - let aggregation_outputs_serialized = object_store - .get( - NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - aggregation_outputs_blob_url(metadata.block_number), - ) - .expect( - "aggregation_outputs is not found in a `queued` `node_aggregation_witness_jobs` job", - ); - let aggregation_outputs = bincode::deserialize::>>( - &aggregation_outputs_serialized, - ) - .expect("aggregation_outputs deserialization failed"); + object_store: &dyn ObjectStore, +) -> NodeAggregationWitnessGeneratorJob { + let leaf_layer_subqueues = object_store + .get(metadata.block_number) + .expect("leaf_layer_subqueues not found in queued `node_aggregation_witness_jobs` job"); + let aggregation_outputs = object_store + .get(metadata.block_number) + .expect("aggregation_outputs not found in queued `node_aggregation_witness_jobs` job"); - WitnessGeneratorJob { + NodeAggregationWitnessGeneratorJob { block_number: metadata.block_number, - job: WitnessGeneratorJobInput::NodeAggregation(Box::new( - PrepareNodeAggregationCircuitJob { - previous_level_proofs: metadata.proofs, - previous_level_leafs_aggregations: aggregation_outputs, - previous_sequence: leaf_layer_subqueues, - }, - )), + job: PrepareNodeAggregationCircuitJob { + previous_level_proofs: metadata.proofs, + previous_level_leafs_aggregations: aggregation_outputs, + previous_sequence: leaf_layer_subqueues, + }, } } -pub async fn save_artifacts( +fn save_artifacts( block_number: L1BatchNumber, artifacts: NodeAggregationArtifacts, - object_store: &mut DynamicObjectStore, -) { - let final_node_aggregations_serialized = bincode::serialize(&artifacts.final_node_aggregation) - .expect("cannot serialize final_node_aggregations"); - - object_store - .put( - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, - final_node_aggregations_blob_url(block_number), - final_node_aggregations_serialized, - ) + object_store: &dyn ObjectStore, +) -> BlobUrls { + let node_aggregations_url = object_store + .put(block_number, &artifacts.final_node_aggregation) .unwrap(); - save_prover_input_artifacts( + let circuit_types_and_urls = save_prover_input_artifacts( block_number, - artifacts.serialized_circuits, + &artifacts.node_circuits, object_store, AggregationRound::NodeAggregation, - ) - .await; + ); + BlobUrls { + node_aggregations_url, + circuit_types_and_urls, + } } diff --git a/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs b/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs index 4f585347b1bf..48c063857b14 100644 --- a/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs +++ b/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs @@ -1,16 +1,15 @@ use serde::{Deserialize, Serialize}; -use std::convert::TryInto; use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_types::zkevm_test_harness::blake2::Blake2s256; use zksync_types::zkevm_test_harness::witness::tree::BinaryHasher; use zksync_types::zkevm_test_harness::witness::tree::{ BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, }; -use zksync_types::zkevm_test_harness::Blake2s256; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct PrecalculatedMerklePathsProvider { // We keep the root hash of the last processed leaf, as it is needed by the the witness generator. - pub root_hash: Vec, + pub root_hash: [u8; 32], // The ordered list of expected leaves to be interacted with pub pending_leaves: Vec, // The index that would be assigned to the next new leaf @@ -21,12 +20,13 @@ pub struct PrecalculatedMerklePathsProvider { } impl PrecalculatedMerklePathsProvider { - pub fn new(input: PrepareBasicCircuitsJob, root_hash: Vec) -> Self { - vlog::debug!("Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, initial next_enumeration_index: {:?}", root_hash, input.next_enumeration_index); + pub fn new(input: PrepareBasicCircuitsJob, root_hash: [u8; 32]) -> Self { + let next_enumeration_index = input.next_enumeration_index(); + vlog::debug!("Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, initial next_enumeration_index: {:?}", root_hash, next_enumeration_index); Self { root_hash, - pending_leaves: input.merkle_paths, - next_enumeration_index: input.next_enumeration_index, + pending_leaves: input.into_merkle_paths().collect(), + next_enumeration_index, is_get_leaf_invoked: false, } } @@ -50,7 +50,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> } fn root(&self) -> [u8; 32] { - self.root_hash.clone().try_into().unwrap() + self.root_hash } fn get_leaf(&mut self, index: &[u8; 32]) -> LeafQuery<256, 32, 32, 32, ZkSyncStorageLeaf> { @@ -70,7 +70,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> index ) }); - self.root_hash = next.root_hash.clone(); + self.root_hash = next.root_hash; assert_eq!( &next.leaf_hashed_key_array(), @@ -85,7 +85,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> }, first_write: next.first_write, index: *index, - merkle_path: next.merkle_paths_array(), + merkle_path: next.clone().into_merkle_paths_array(), }; if next.is_write { @@ -119,7 +119,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> "`get_leaf()` is expected to be invoked before `insert_leaf()`" ); let next = self.pending_leaves.remove(0); - self.root_hash = next.root_hash.clone(); + self.root_hash = next.root_hash; assert!( next.is_write, @@ -153,7 +153,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> }, first_write: next.first_write, index: *index, - merkle_path: next.merkle_paths_array(), + merkle_path: next.into_merkle_paths_array(), } } diff --git a/core/bin/zksync_core/src/witness_generator/scheduler.rs b/core/bin/zksync_core/src/witness_generator/scheduler.rs index 6ef3bc58e751..97f973b5b41d 100644 --- a/core/bin/zksync_core/src/witness_generator/scheduler.rs +++ b/core/bin/zksync_core/src/witness_generator/scheduler.rs @@ -1,25 +1,23 @@ use std::collections::HashMap; +use std::slice; use std::time::Instant; +use async_trait::async_trait; + +use zksync_config::configs::WitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::gcs_utils::{ - final_node_aggregations_blob_url, scheduler_witness_blob_url, -}; -use zksync_object_store::object_store::{DynamicObjectStore, SCHEDULER_WITNESS_JOBS_BUCKET_PATH}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_queued_job_processor::JobProcessor; use zksync_types::{ circuit::{ LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, NODE_SPLITTING_FACTOR, }, - proofs::{ - AggregationRound, PrepareSchedulerCircuitJob, WitnessGeneratorJob, - WitnessGeneratorJobInput, WitnessGeneratorJobMetadata, - }, + proofs::{AggregationRound, PrepareSchedulerCircuitJob, WitnessGeneratorJobMetadata}, zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::{bn256::Bn256, plonk::better_better_cs::setup::VerificationKey}, sync_vm::scheduler::BlockApplicationWitness, witness::{self, oracle::VmWitnessOracle, recursive_aggregation::erase_vk_type}, - NodeAggregationOutputDataWitness, SchedulerCircuitInstanceWitness, }, L1BatchNumber, }; @@ -27,13 +25,138 @@ use zksync_verification_key_server::{ get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, }; -use crate::witness_generator; use crate::witness_generator::track_witness_generation_stage; use crate::witness_generator::utils::save_prover_input_artifacts; pub struct SchedulerArtifacts { - pub final_aggregation_result: BlockApplicationWitness, - pub serialized_circuits: Vec<(String, Vec)>, + final_aggregation_result: BlockApplicationWitness, + scheduler_circuit: ZkSyncCircuit>, +} + +#[derive(Clone)] +pub struct SchedulerWitnessGeneratorJob { + block_number: L1BatchNumber, + job: PrepareSchedulerCircuitJob, +} + +#[derive(Debug)] +pub struct SchedulerWitnessGenerator { + config: WitnessGeneratorConfig, + object_store: Box, +} + +impl SchedulerWitnessGenerator { + pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + Self { + config, + object_store: store_factory.create_store(), + } + } + + fn process_job_sync( + scheduler_job: SchedulerWitnessGeneratorJob, + started_at: Instant, + ) -> SchedulerArtifacts { + let SchedulerWitnessGeneratorJob { block_number, job } = scheduler_job; + + vlog::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::Scheduler, + block_number.0 + ); + process_scheduler_job(started_at, block_number, job) + } +} + +#[async_trait] +impl JobProcessor for SchedulerWitnessGenerator { + type Job = SchedulerWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = SchedulerArtifacts; + + const SERVICE_NAME: &'static str = "scheduler_witness_generator"; + + async fn get_next_job( + &self, + connection_pool: ConnectionPool, + ) -> Option<(Self::JobId, Self::Job)> { + let mut connection = connection_pool.access_storage_blocking(); + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + + match connection + .witness_generator_dal() + .get_next_scheduler_witness_job( + self.config.witness_generation_timeout(), + self.config.max_attempts, + last_l1_batch_to_process, + ) { + Some(metadata) => { + let prev_metadata = connection + .blocks_dal() + .get_block_metadata(metadata.block_number - 1); + let previous_aux_hash = prev_metadata + .as_ref() + .map_or([0u8; 32], |e| e.metadata.aux_data_hash.0); + let previous_meta_hash = + prev_metadata.map_or([0u8; 32], |e| e.metadata.meta_parameters_hash.0); + let job = get_artifacts( + metadata, + previous_aux_hash, + previous_meta_hash, + &*self.object_store, + ); + Some((job.block_number, job)) + } + None => None, + } + } + + async fn save_failure( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + error: String, + ) -> () { + connection_pool + .access_storage_blocking() + .witness_generator_dal() + .mark_witness_job_as_failed( + job_id, + AggregationRound::Scheduler, + started_at.elapsed(), + error, + self.config.max_attempts, + ); + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _connection_pool: ConnectionPool, + job: SchedulerWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle { + tokio::task::spawn_blocking(move || Self::process_job_sync(job, started_at)) + } + + async fn save_result( + &self, + connection_pool: ConnectionPool, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: SchedulerArtifacts, + ) { + let circuit_types_and_urls = + save_artifacts(job_id, &artifacts.scheduler_circuit, &*self.object_store); + update_database( + connection_pool, + started_at, + job_id, + artifacts.final_aggregation_result, + circuit_types_and_urls, + ); + } } pub fn process_scheduler_job( @@ -75,19 +198,8 @@ pub fn process_scheduler_job( vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); let stage_started_at = Instant::now(); - // fs::write("incomplete_scheduler_witness.bincode", bincode::serialize(&job.incomplete_scheduler_witness).unwrap()).unwrap(); - // fs::write("node_final_proof_level_proofs.bincode", bincode::serialize(&job.node_final_proof_level_proof).unwrap()).unwrap(); - // fs::write("node_aggregation_vk.bincode", bincode::serialize(&node_aggregation_vk).unwrap()).unwrap(); - // fs::write("final_node_aggregations.bincode", bincode::serialize(&job.final_node_aggregations).unwrap()).unwrap(); - // fs::write("leaf_vks_committment.bincode", bincode::serialize(&set_committment).unwrap()).unwrap(); - // fs::write("node_aggregation_vk_committment.bincode", bincode::serialize(&node_aggregation_vk_committment).unwrap()).unwrap(); - // fs::write("leaf_aggregation_vk_committment.bincode", bincode::serialize(&leaf_aggregation_vk_committment).unwrap()).unwrap(); - // fs::write("previous_aux_hash.bincode", bincode::serialize(&job.previous_aux_hash).unwrap()).unwrap(); - // fs::write("previous_meta_hash.bincode", bincode::serialize(&job.previous_meta_hash).unwrap()).unwrap(); - // fs::write("g2_points.bincode", bincode::serialize(&g2_points).unwrap()).unwrap(); - let (scheduler_circuit, final_aggregation_result) = - zksync_types::zkevm_test_harness::witness::recursive_aggregation::prepare_scheduler_circuit( + witness::recursive_aggregation::prepare_scheduler_circuit( job.incomplete_scheduler_witness, job.node_final_proof_level_proof, node_aggregation_vk, @@ -106,9 +218,6 @@ pub fn process_scheduler_job( stage_started_at.elapsed() ); - let serialized_circuits: Vec<(String, Vec)> = - witness_generator::serialize_circuits(&vec![scheduler_circuit]); - vlog::info!( "Scheduler generation for block {} is complete in {:?}", block_number.0, @@ -117,7 +226,7 @@ pub fn process_scheduler_job( SchedulerArtifacts { final_aggregation_result, - serialized_circuits, + scheduler_circuit, } } @@ -126,7 +235,7 @@ pub fn update_database( started_at: Instant, block_number: L1BatchNumber, final_aggregation_result: BlockApplicationWitness, - circuits: Vec, + circuit_types_and_urls: Vec<(&'static str, String)>, ) { let mut connection = connection_pool.access_storage_blocking(); let mut transaction = connection.start_transaction_blocking(); @@ -157,7 +266,7 @@ pub fn update_database( transaction.prover_dal().insert_prover_jobs( block_number, - circuits, + circuit_types_and_urls, AggregationRound::Scheduler, ); @@ -180,56 +289,36 @@ pub fn update_database( track_witness_generation_stage(started_at, AggregationRound::Scheduler); } -pub async fn save_artifacts( +pub fn save_artifacts( block_number: L1BatchNumber, - serialized_circuits: Vec<(String, Vec)>, - object_store: &mut DynamicObjectStore, -) { + scheduler_circuit: &ZkSyncCircuit>, + object_store: &dyn ObjectStore, +) -> Vec<(&'static str, String)> { save_prover_input_artifacts( block_number, - serialized_circuits, + slice::from_ref(scheduler_circuit), object_store, AggregationRound::Scheduler, ) - .await; } -pub async fn get_artifacts( +pub fn get_artifacts( metadata: WitnessGeneratorJobMetadata, previous_aux_hash: [u8; 32], previous_meta_hash: [u8; 32], - object_store: &DynamicObjectStore, -) -> WitnessGeneratorJob { - let scheduler_witness_serialized = object_store - .get( - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, - scheduler_witness_blob_url(metadata.block_number), - ) - .unwrap(); - let scheduler_witness = bincode::deserialize::>( - &scheduler_witness_serialized, - ) - .expect("scheduler_witness deserialization failed"); - - let final_node_aggregations_serialized = object_store - .get( - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, - final_node_aggregations_blob_url(metadata.block_number), - ) - .expect("final_node_aggregations is not found in a `queued` `scheduler_witness_jobs` job"); - let final_node_aggregations = bincode::deserialize::>( - &final_node_aggregations_serialized, - ) - .expect("final_node_aggregations deserialization failed"); + object_store: &dyn ObjectStore, +) -> SchedulerWitnessGeneratorJob { + let scheduler_witness = object_store.get(metadata.block_number).unwrap(); + let final_node_aggregations = object_store.get(metadata.block_number).unwrap(); - WitnessGeneratorJob { + SchedulerWitnessGeneratorJob { block_number: metadata.block_number, - job: WitnessGeneratorJobInput::Scheduler(Box::new(PrepareSchedulerCircuitJob { + job: PrepareSchedulerCircuitJob { incomplete_scheduler_witness: scheduler_witness, final_node_aggregations, node_final_proof_level_proof: metadata.proofs.into_iter().next().unwrap(), previous_aux_hash, previous_meta_hash, - })), + }, } } diff --git a/core/bin/zksync_core/src/witness_generator/tests.rs b/core/bin/zksync_core/src/witness_generator/tests.rs index 73dd23343c23..6f8ff3996b22 100644 --- a/core/bin/zksync_core/src/witness_generator/tests.rs +++ b/core/bin/zksync_core/src/witness_generator/tests.rs @@ -7,29 +7,31 @@ use zksync_types::zkevm_test_harness::witness::tree::{BinarySparseStorageTree, Z fn test_filter_renumerate_all_first_writes() { let logs = vec![ generate_storage_log_metadata( - "DDC60818D8F7CFE42514F8EA3CC52806", - "12E9FF974B0FAEE514AD4AC50E2BDC6E", + "DDC60818D8F7CFE42514F8EA3CC52806DDC60818D8F7CFE42514F8EA3CC52806", + "12E9FF974B0FAEE514AD4AC50E2BDC6E12E9FF974B0FAEE514AD4AC50E2BDC6E", false, false, 1, ), generate_storage_log_metadata( - "BDA1617CC883E2251D3BE0FD9B3F3064", - "D14917FCB067922F92322025D1BA50B4", + "BDA1617CC883E2251D3BE0FD9B3F3064BDA1617CC883E2251D3BE0FD9B3F3064", + "D14917FCB067922F92322025D1BA50B4D14917FCB067922F92322025D1BA50B4", true, true, 2, ), generate_storage_log_metadata( - "77F035AD50811CFABD956F6F1B48E482", - "7CF33B959916CC9B56F21C427ED7CA18", + "77F035AD50811CFABD956F6F1B48E48277F035AD50811CFABD956F6F1B48E482", + "7CF33B959916CC9B56F21C427ED7CA187CF33B959916CC9B56F21C427ED7CA18", true, true, 3, ), ]; let precalculated_merkle_paths_provider = PrecalculatedMerklePathsProvider { - root_hash: hex::decode("4AF44B3D5D4F9C7B117A68351AAB65CF").unwrap(), + root_hash: string_to_array( + "4AF44B3D5D4F9C7B117A68351AAB65CF4AF44B3D5D4F9C7B117A68351AAB65CF", + ), pending_leaves: logs, next_enumeration_index: 4, is_get_leaf_invoked: false, @@ -46,29 +48,31 @@ fn test_filter_renumerate_all_first_writes() { fn test_filter_renumerate_all_repeated_writes() { let logs = vec![ generate_storage_log_metadata( - "DDC60818D8F7CFE42514F8EA3CC52806", - "12E9FF974B0FAEE514AD4AC50E2BDC6E", + "DDC60818D8F7CFE42514F8EA3CC52806DDC60818D8F7CFE42514F8EA3CC52806", + "12E9FF974B0FAEE514AD4AC50E2BDC6E12E9FF974B0FAEE514AD4AC50E2BDC6E", false, false, 1, ), generate_storage_log_metadata( - "BDA1617CC883E2251D3BE0FD9B3F3064", - "D14917FCB067922F92322025D1BA50B4", + "BDA1617CC883E2251D3BE0FD9B3F3064BDA1617CC883E2251D3BE0FD9B3F3064", + "D14917FCB067922F92322025D1BA50B4D14917FCB067922F92322025D1BA50B4", true, false, 2, ), generate_storage_log_metadata( - "77F035AD50811CFABD956F6F1B48E482", - "7CF33B959916CC9B56F21C427ED7CA18", + "77F035AD50811CFABD956F6F1B48E48277F035AD50811CFABD956F6F1B48E482", + "7CF33B959916CC9B56F21C427ED7CA187CF33B959916CC9B56F21C427ED7CA18", true, false, 3, ), ]; let precalculated_merkle_paths_provider = PrecalculatedMerklePathsProvider { - root_hash: hex::decode("4AF44B3D5D4F9C7B117A68351AAB65CF").unwrap(), + root_hash: string_to_array( + "4AF44B3D5D4F9C7B117A68351AAB65CF4AF44B3D5D4F9C7B117A68351AAB65CF", + ), pending_leaves: logs, next_enumeration_index: 4, is_get_leaf_invoked: false, @@ -85,29 +89,31 @@ fn test_filter_renumerate_all_repeated_writes() { fn test_filter_renumerate_repeated_writes_with_first_write() { let logs = vec![ generate_storage_log_metadata( - "DDC60818D8F7CFE42514F8EA3CC52806", - "12E9FF974B0FAEE514AD4AC50E2BDC6E", + "DDC60818D8F7CFE42514F8EA3CC52806DDC60818D8F7CFE42514F8EA3CC52806", + "12E9FF974B0FAEE514AD4AC50E2BDC6E12E9FF974B0FAEE514AD4AC50E2BDC6E", false, false, 1, ), generate_storage_log_metadata( - "BDA1617CC883E2251D3BE0FD9B3F3064", - "D14917FCB067922F92322025D1BA50B4", + "BDA1617CC883E2251D3BE0FD9B3F3064BDA1617CC883E2251D3BE0FD9B3F3064", + "D14917FCB067922F92322025D1BA50B4D14917FCB067922F92322025D1BA50B4", true, false, 2, ), generate_storage_log_metadata( - "77F035AD50811CFABD956F6F1B48E482", - "7CF33B959916CC9B56F21C427ED7CA18", + "77F035AD50811CFABD956F6F1B48E48277F035AD50811CFABD956F6F1B48E482", + "7CF33B959916CC9B56F21C427ED7CA187CF33B959916CC9B56F21C427ED7CA18", true, true, 3, ), ]; let precalculated_merkle_paths_provider = PrecalculatedMerklePathsProvider { - root_hash: hex::decode("4AF44B3D5D4F9C7B117A68351AAB65CF").unwrap(), + root_hash: string_to_array( + "4AF44B3D5D4F9C7B117A68351AAB65CF4AF44B3D5D4F9C7B117A68351AAB65CF", + ), pending_leaves: logs, next_enumeration_index: 4, is_get_leaf_invoked: false, @@ -127,29 +133,31 @@ fn test_filter_renumerate_repeated_writes_with_first_write() { fn test_filter_renumerate_panic_when_leafs_and_indices_are_of_different_length() { let logs = vec![ generate_storage_log_metadata( - "DDC60818D8F7CFE42514F8EA3CC52806", - "12E9FF974B0FAEE514AD4AC50E2BDC6E", + "DDC60818D8F7CFE42514F8EA3CC52806DDC60818D8F7CFE42514F8EA3CC52806", + "12E9FF974B0FAEE514AD4AC50E2BDC6E12E9FF974B0FAEE514AD4AC50E2BDC6E", false, false, 1, ), generate_storage_log_metadata( - "BDA1617CC883E2251D3BE0FD9B3F3064", - "D14917FCB067922F92322025D1BA50B4", + "BDA1617CC883E2251D3BE0FD9B3F3064BDA1617CC883E2251D3BE0FD9B3F3064", + "D14917FCB067922F92322025D1BA50B4D14917FCB067922F92322025D1BA50B4", true, false, 2, ), generate_storage_log_metadata( - "77F035AD50811CFABD956F6F1B48E482", - "7CF33B959916CC9B56F21C427ED7CA18", + "77F035AD50811CFABD956F6F1B48E48277F035AD50811CFABD956F6F1B48E482", + "7CF33B959916CC9B56F21C427ED7CA187CF33B959916CC9B56F21C427ED7CA18", true, true, 3, ), ]; let precalculated_merkle_paths_provider = PrecalculatedMerklePathsProvider { - root_hash: hex::decode("4AF44B3D5D4F9C7B117A68351AAB65CF").unwrap(), + root_hash: string_to_array( + "4AF44B3D5D4F9C7B117A68351AAB65CF4AF44B3D5D4F9C7B117A68351AAB65CF", + ), pending_leaves: logs, next_enumeration_index: 4, is_get_leaf_invoked: false, @@ -182,29 +190,31 @@ fn test_filter_renumerate_panic_when_leafs_and_indices_are_of_different_length() fn test_filter_renumerate_panic_when_indices_and_pending_leaves_are_of_different_length() { let logs = vec![ generate_storage_log_metadata( - "DDC60818D8F7CFE42514F8EA3CC52806", - "12E9FF974B0FAEE514AD4AC50E2BDC6E", + "DDC60818D8F7CFE42514F8EA3CC52806DDC60818D8F7CFE42514F8EA3CC52806", + "12E9FF974B0FAEE514AD4AC50E2BDC6E12E9FF974B0FAEE514AD4AC50E2BDC6E", false, false, 1, ), generate_storage_log_metadata( - "BDA1617CC883E2251D3BE0FD9B3F3064", - "D14917FCB067922F92322025D1BA50B4", + "BDA1617CC883E2251D3BE0FD9B3F3064BDA1617CC883E2251D3BE0FD9B3F3064", + "D14917FCB067922F92322025D1BA50B4D14917FCB067922F92322025D1BA50B4", true, false, 2, ), generate_storage_log_metadata( - "77F035AD50811CFABD956F6F1B48E482", - "7CF33B959916CC9B56F21C427ED7CA18", + "77F035AD50811CFABD956F6F1B48E48277F035AD50811CFABD956F6F1B48E482", + "7CF33B959916CC9B56F21C427ED7CA187CF33B959916CC9B56F21C427ED7CA18", true, true, 3, ), ]; let precalculated_merkle_paths_provider = PrecalculatedMerklePathsProvider { - root_hash: hex::decode("4AF44B3D5D4F9C7B117A68351AAB65CF").unwrap(), + root_hash: string_to_array( + "4AF44B3D5D4F9C7B117A68351AAB65CF4AF44B3D5D4F9C7B117A68351AAB65CF", + ), pending_leaves: logs, next_enumeration_index: 4, is_get_leaf_invoked: false, @@ -274,10 +284,10 @@ fn generate_storage_log_metadata( leaf_enumeration_index: u64, ) -> StorageLogMetadata { StorageLogMetadata { - root_hash: hex::decode(root_hash).expect("Hex decoding failed"), + root_hash: string_to_array(root_hash), is_write, first_write, - merkle_paths: vec![hex::decode(merkle_path).expect("Hex decoding failed")], + merkle_paths: vec![string_to_array(merkle_path)], leaf_hashed_key: Default::default(), leaf_enumeration_index, value_written: [0; 32], diff --git a/core/bin/zksync_core/src/witness_generator/utils.rs b/core/bin/zksync_core/src/witness_generator/utils.rs index e5726bf5c3c6..4c67c4f3644a 100644 --- a/core/bin/zksync_core/src/witness_generator/utils.rs +++ b/core/bin/zksync_core/src/witness_generator/utils.rs @@ -1,12 +1,14 @@ -use vm::zk_evm::abstractions::MAX_MEMORY_BYTES; use vm::zk_evm::ethereum_types::U256; -use zksync_object_store::gcs_utils::prover_circuit_input_blob_url; -use zksync_object_store::object_store::{DynamicObjectStore, PROVER_JOBS_BUCKET_PATH}; +use zksync_object_store::{CircuitKey, ObjectStore}; +use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; +use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; +use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; +use zksync_types::USED_BOOTLOADER_MEMORY_BYTES; use zksync_types::{proofs::AggregationRound, L1BatchNumber}; pub fn expand_bootloader_contents(packed: Vec<(usize, U256)>) -> Vec { let mut result: Vec = Vec::new(); - result.resize(MAX_MEMORY_BYTES, 0); + result.resize(USED_BOOTLOADER_MEMORY_BYTES, 0); for (offset, value) in packed { value.to_big_endian(&mut result[(offset * 32)..(offset + 1) * 32]); @@ -15,21 +17,25 @@ pub fn expand_bootloader_contents(packed: Vec<(usize, U256)>) -> Vec { result.to_vec() } -pub async fn save_prover_input_artifacts( +pub fn save_prover_input_artifacts( block_number: L1BatchNumber, - serialized_circuits: Vec<(String, Vec)>, - object_store: &mut DynamicObjectStore, + circuits: &[ZkSyncCircuit>], + object_store: &dyn ObjectStore, aggregation_round: AggregationRound, -) { - for (sequence_number, (circuit, input)) in serialized_circuits.into_iter().enumerate() { - let circuit_input_blob_url = prover_circuit_input_blob_url( - block_number, - sequence_number, - circuit, - aggregation_round, - ); - object_store - .put(PROVER_JOBS_BUCKET_PATH, circuit_input_blob_url, input) - .unwrap(); - } +) -> Vec<(&'static str, String)> { + let types_and_urls = circuits + .iter() + .enumerate() + .map(|(sequence_number, circuit)| { + let circuit_type = circuit.short_description(); + let circuit_key = CircuitKey { + block_number, + sequence_number, + circuit_type, + aggregation_round, + }; + let blob_url = object_store.put(circuit_key, circuit).unwrap(); + (circuit_type, blob_url) + }); + types_and_urls.collect() } diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 616e47a71ab2..a8f7cacbae5f 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -109,7 +109,7 @@ basic_type!( basic_type!( /// ChainId in the Ethereum network. L1ChainId, - u8 + u64 ); basic_type!( diff --git a/core/lib/basic_types/src/network.rs b/core/lib/basic_types/src/network.rs index cda956d1b34b..5f4683aeb672 100644 --- a/core/lib/basic_types/src/network.rs +++ b/core/lib/basic_types/src/network.rs @@ -24,6 +24,8 @@ pub enum Network { Ropsten, /// Ethereum Görli testnet. Goerli, + /// Ethereum Sepolia testnet. + Sepolia, /// Self-hosted Ethereum network. Localhost, /// Unknown network type. @@ -42,6 +44,7 @@ impl FromStr for Network { "ropsten" => Self::Ropsten, "goerli" => Self::Goerli, "localhost" => Self::Localhost, + "sepolia" => Self::Sepolia, "test" => Self::Test, another => return Err(another.to_owned()), }) @@ -56,6 +59,7 @@ impl fmt::Display for Network { Self::Ropsten => write!(f, "ropsten"), Self::Goerli => write!(f, "goerli"), Self::Localhost => write!(f, "localhost"), + Self::Sepolia => write!(f, "sepolia"), Self::Unknown => write!(f, "unknown"), Self::Test => write!(f, "test"), } @@ -71,6 +75,7 @@ impl Network { 4 => Self::Rinkeby, 5 => Self::Goerli, 9 => Self::Localhost, + 11155111 => Self::Sepolia, _ => Self::Unknown, } } @@ -83,6 +88,7 @@ impl Network { Self::Rinkeby => L1ChainId(4), Self::Goerli => L1ChainId(5), Self::Localhost => L1ChainId(9), + Self::Sepolia => L1ChainId(11155111), Self::Unknown => panic!("Unknown chain ID"), Self::Test => panic!("Test chain ID"), } diff --git a/core/lib/circuit_breaker/Cargo.toml b/core/lib/circuit_breaker/Cargo.toml index b57e8281edd4..b2c69a4300e1 100644 --- a/core/lib/circuit_breaker/Cargo.toml +++ b/core/lib/circuit_breaker/Cargo.toml @@ -25,3 +25,5 @@ tokio = { version = "1", features = ["time"] } async-trait = "0.1" hex = "0.4" convert_case = "0.6.0" +backon = "0.4.0" +assert_matches = "1.5.0" diff --git a/core/lib/circuit_breaker/src/facet_selectors.rs b/core/lib/circuit_breaker/src/facet_selectors.rs index 9072d0f94a7c..03d1590e67f2 100644 --- a/core/lib/circuit_breaker/src/facet_selectors.rs +++ b/core/lib/circuit_breaker/src/facet_selectors.rs @@ -1,17 +1,13 @@ -use crate::{utils::unwrap_tuple, CircuitBreaker, CircuitBreakerError}; - -use std::collections::BTreeMap; -use std::env; -use std::fmt; -use std::str::FromStr; -use std::{fs, path::Path}; - +use backon::{ConstantBuilder, Retryable}; use convert_case::{Case, Casing}; +use std::{collections::BTreeMap, env, fmt, fs, path::Path, str::FromStr}; + +use zksync_config::configs::chain::CircuitBreakerConfig; +use zksync_eth_client::{types::Error as EthClientError, BoundEthInterface}; +use zksync_types::{ethabi::Token, Address}; -use zksync_config::ZkSyncConfig; -use zksync_eth_client::clients::http_client::EthereumClient; -use zksync_types::ethabi::Token; -use zksync_types::Address; +// local imports +use crate::{utils::unwrap_tuple, CircuitBreaker, CircuitBreakerError}; #[derive(Debug)] pub struct MismatchedFacetSelectorsError { @@ -30,14 +26,15 @@ impl fmt::Display for MismatchedFacetSelectorsError { } #[derive(Debug)] -pub struct FacetSelectorsChecker { - eth_client: EthereumClient, +pub struct FacetSelectorsChecker { + eth_client: E, // BTreeMap is used to have fixed order of elements when printing error. server_selectors: BTreeMap>, + config: CircuitBreakerConfig, } -impl FacetSelectorsChecker { - pub fn new(config: &ZkSyncConfig) -> Self { +impl FacetSelectorsChecker { + pub fn new(config: &CircuitBreakerConfig, eth_client: E) -> Self { let zksync_home = env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); let path_str = "contracts/ethereum/artifacts/cache/solpp-generated-contracts/zksync/facets"; let facets_path = Path::new(&zksync_home).join(path_str); @@ -74,42 +71,40 @@ impl FacetSelectorsChecker { .collect(); Self { - eth_client: EthereumClient::from_config(config), + eth_client, server_selectors, + config: config.clone(), } } } -impl FacetSelectorsChecker { +impl FacetSelectorsChecker { async fn get_contract_facet_selectors(&self) -> BTreeMap> { - let facets: Token = self - .eth_client - .call_main_contract_function("facets", (), None, Default::default(), None) - .await - .unwrap(); - let facets = facets.into_array().unwrap(); - facets - .into_iter() - .map(|facet| { - let tokens = unwrap_tuple(facet); - let address = tokens[0].clone().into_address().unwrap(); - let selectors = tokens[1] - .clone() - .into_array() - .unwrap() - .into_iter() - .map(|token| { - "0x".to_string() + hex::encode(token.into_fixed_bytes().unwrap()).as_str() - }) - .collect(); - (address, selectors) - }) - .collect() + let facets = self.get_facets_token_with_retry().await.unwrap(); + + parse_faucets_token(facets) + } + + pub(super) async fn get_facets_token_with_retry(&self) -> Result { + (|| async { + let result: Result = self + .eth_client + .call_main_contract_function("facets", (), None, Default::default(), None) + .await; + + result + }) + .retry( + &ConstantBuilder::default() + .with_max_times(self.config.http_req_max_retry_number) + .with_delay(self.config.http_req_retry_interval()), + ) + .await } } #[async_trait::async_trait] -impl CircuitBreaker for FacetSelectorsChecker { +impl CircuitBreaker for FacetSelectorsChecker { async fn check(&self) -> Result<(), CircuitBreakerError> { let contract_selectors = self.get_contract_facet_selectors().await; if self.server_selectors != contract_selectors { @@ -124,3 +119,24 @@ impl CircuitBreaker for FacetSelectorsChecker { Ok(()) } } + +fn parse_faucets_token(facets: Token) -> BTreeMap> { + let facets = facets.into_array().unwrap(); + facets + .into_iter() + .map(|facet| { + let tokens = unwrap_tuple(facet); + let address = tokens[0].clone().into_address().unwrap(); + let selectors = tokens[1] + .clone() + .into_array() + .unwrap() + .into_iter() + .map(|token| { + "0x".to_string() + hex::encode(token.into_fixed_bytes().unwrap()).as_str() + }) + .collect(); + (address, selectors) + }) + .collect() +} diff --git a/core/lib/circuit_breaker/src/l1_txs.rs b/core/lib/circuit_breaker/src/l1_txs.rs index c3a7bd054919..a988227f11a5 100644 --- a/core/lib/circuit_breaker/src/l1_txs.rs +++ b/core/lib/circuit_breaker/src/l1_txs.rs @@ -11,8 +11,7 @@ impl CircuitBreaker for FailedL1TransactionChecker { async fn check(&self) -> Result<(), CircuitBreakerError> { if self .pool - .access_storage() - .await + .access_storage_blocking() .eth_sender_dal() .get_number_of_failed_transactions() > 0 diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index 611f922cf56e..7baafeec914e 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -14,6 +14,9 @@ pub mod l1_txs; pub mod utils; pub mod vks; +#[cfg(test)] +mod tests; + #[derive(Debug, Error)] pub enum CircuitBreakerError { #[error("System has failed L1 transaction")] @@ -32,7 +35,7 @@ pub struct CircuitBreakerChecker { } #[async_trait::async_trait] -pub trait CircuitBreaker: std::fmt::Debug + Send + Sync + 'static { +pub trait CircuitBreaker: std::fmt::Debug + Send + Sync { async fn check(&self) -> Result<(), CircuitBreakerError>; } diff --git a/core/lib/circuit_breaker/src/tests/mod.rs b/core/lib/circuit_breaker/src/tests/mod.rs new file mode 100644 index 000000000000..0f793c52d816 --- /dev/null +++ b/core/lib/circuit_breaker/src/tests/mod.rs @@ -0,0 +1,293 @@ +use std::sync::Mutex; + +use assert_matches::assert_matches; +use async_trait::async_trait; + +use zksync_config::configs::chain::CircuitBreakerConfig; +use zksync_eth_client::{ + types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}, + BoundEthInterface, EthInterface, +}; +use zksync_types::{ + ethabi::Token, + web3::{ + self, + contract::{ + tokens::{Detokenize, Tokenize}, + Options, + }, + error::TransportError, + ethabi, + types::{ + Address, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, H160, + H256, U256, + }, + }, + L1ChainId, U64, +}; + +#[derive(Debug)] +pub struct ETHDirectClientMock { + contract: ethabi::Contract, + // next 2 are needed for simulation of the few ZksInterface functions, + // to test retries + circuit_breaker_config: CircuitBreakerConfig, + counter: Mutex, +} + +impl ETHDirectClientMock { + pub fn new() -> Self { + Self { + contract: Default::default(), + circuit_breaker_config: get_test_circuit_breaker_config(), + counter: Mutex::new(0), + } + } + + fn inc_counter(&self) { + let mut current = self.counter.lock().unwrap(); + *current += 1; + } + + fn get_counter_cur_val(&self) -> u8 { + let current = self.counter.lock().unwrap(); + *current + } + + fn reset_counter(&self) { + let mut current = self.counter.lock().unwrap(); + *current = 0; + } + + // The idea of this function is to simulate the behavior when function call fails all the time, + // and when the current attempt is the last one it succeeds and returns Ok() + pub fn simulate_get_contract_behavior(&self) -> Result + where + R: Detokenize + Unpin, + { + self.inc_counter(); + + let cur_val = self.get_counter_cur_val(); + // If the condition returns `true`, it means that its the last attempt of the retry() wrapper function. + // Otherwise we pretend that there are some eth_client issues and return Err() + if cur_val as usize == self.circuit_breaker_config.http_req_max_retry_number { + self.reset_counter(); + Ok( + Detokenize::from_tokens(vec![Token::Array(vec![Token::Tuple(vec![ + Token::Address(H160::zero()), + Token::Array(vec![Token::FixedBytes(vec![0, 0, 0, 0, 0, 0])]), + ])])]) + .unwrap(), + ) + } else { + Err(Error::EthereumGateway(web3::error::Error::Transport( + TransportError::Code(503), + ))) + } + } +} + +fn get_test_circuit_breaker_config() -> CircuitBreakerConfig { + CircuitBreakerConfig { + sync_interval_ms: 1000, + http_req_max_retry_number: 5, + http_req_retry_interval_sec: 2, + } +} +#[async_trait] +impl EthInterface for ETHDirectClientMock { + /// Note: The only really implemented method! Other ones are just stubs. + #[allow(clippy::too_many_arguments)] + async fn call_contract_function( + &self, + _func: &str, + _params: P, + _from: A, + _options: Options, + _block: B, + _contract_address: Address, + _contract_abi: ethabi::Contract, + ) -> Result + where + R: Detokenize + Unpin, + A: Into> + Send, + B: Into> + Send, + P: Tokenize + Send, + { + self.simulate_get_contract_behavior() + } + + async fn get_tx_status( + &self, + _hash: H256, + _: &'static str, + ) -> Result, Error> { + Ok(None) + } + + async fn block_number(&self, _: &'static str) -> Result { + Ok(Default::default()) + } + + async fn send_raw_tx(&self, _tx: Vec) -> Result { + Ok(Default::default()) + } + + async fn nonce_at_for_account( + &self, + _account: Address, + _block: BlockNumber, + _: &'static str, + ) -> Result { + Ok(Default::default()) + } + + async fn get_gas_price(&self, _: &'static str) -> Result { + Ok(Default::default()) + } + + async fn base_fee_history( + &self, + _from_block: usize, + _block_count: usize, + _component: &'static str, + ) -> Result, Error> { + Ok(Default::default()) + } + + async fn get_pending_block_base_fee_per_gas( + &self, + _component: &'static str, + ) -> Result { + Ok(Default::default()) + } + + async fn failure_reason(&self, _tx_hash: H256) -> Result, Error> { + Ok(Default::default()) + } + + async fn get_tx( + &self, + _hash: H256, + _component: &'static str, + ) -> Result, Error> { + Ok(Default::default()) + } + + async fn tx_receipt( + &self, + _tx_hash: H256, + _component: &'static str, + ) -> Result, Error> { + Ok(Default::default()) + } + + async fn eth_balance( + &self, + _address: Address, + _component: &'static str, + ) -> Result { + Ok(Default::default()) + } + + async fn logs(&self, _filter: Filter, _component: &'static str) -> Result, Error> { + Ok(Default::default()) + } +} + +#[async_trait] +impl BoundEthInterface for ETHDirectClientMock { + fn contract(&self) -> ðabi::Contract { + &self.contract + } + + fn contract_addr(&self) -> H160 { + Default::default() + } + + fn chain_id(&self) -> L1ChainId { + L1ChainId(0) + } + + fn sender_account(&self) -> Address { + Default::default() + } + + async fn sign_prepared_tx_for_addr( + &self, + _data: Vec, + _contract_addr: H160, + _options: Options, + _component: &'static str, + ) -> Result { + Ok(SignedCallResult { + raw_tx: vec![], + max_priority_fee_per_gas: U256::zero(), + max_fee_per_gas: U256::zero(), + nonce: U256::zero(), + hash: H256::zero(), + }) + } + + async fn allowance_on_account( + &self, + _token_address: Address, + _contract_address: Address, + _erc20_abi: ethabi::Contract, + ) -> Result { + Ok(Default::default()) + } +} + +#[tokio::test] +async fn retries_for_contract_vk() { + let eth_client = ETHDirectClientMock::new(); + let result: Result = eth_client + .call_main_contract_function("facets", (), None, Default::default(), None) + .await; + + assert_matches!( + result, + Err(Error::EthereumGateway(web3::error::Error::Transport( + TransportError::Code(503), + ))) + ); + + let config = get_test_circuit_breaker_config(); + let vks_checker = crate::vks::VksChecker::new(&config, eth_client); + + assert_matches!(vks_checker.get_vk_token_with_retries().await, Ok(_)); +} + +#[tokio::test] +async fn retries_for_facet_selectors() { + let eth_client = ETHDirectClientMock::new(); + + let result: Result = eth_client + .call_contract_function( + "get_verification_key", + (), + None, + Default::default(), + None, + Address::default(), + eth_client.contract().clone(), + ) + .await; + + assert_matches!( + result, + Err(Error::EthereumGateway(web3::error::Error::Transport( + TransportError::Code(503), + ))) + ); + + let config = get_test_circuit_breaker_config(); + let facet_selectors_checker = + crate::facet_selectors::FacetSelectorsChecker::new(&config, eth_client); + + assert_matches!( + facet_selectors_checker.get_facets_token_with_retry().await, + Ok(_) + ); +} diff --git a/core/lib/circuit_breaker/src/vks.rs b/core/lib/circuit_breaker/src/vks.rs index e4cd88b1a6be..87fa1caeee3d 100644 --- a/core/lib/circuit_breaker/src/vks.rs +++ b/core/lib/circuit_breaker/src/vks.rs @@ -1,18 +1,27 @@ -use crate::{utils::unwrap_tuple, CircuitBreaker, CircuitBreakerError}; +use backon::{ConstantBuilder, Retryable}; use serde::{Deserialize, Serialize}; -use std::convert::TryInto; -use std::{env, str::FromStr}; +use std::{ + convert::TryInto, + fmt::Debug, + {env, str::FromStr}, +}; use thiserror::Error; -use zksync_config::ZkSyncConfig; -use zksync_eth_client::clients::http_client::EthereumClient; -use zksync_types::ethabi::Token; -use zksync_types::zkevm_test_harness::bellman::{ - bn256::{Fq, Fq2, Fr, G1Affine, G2Affine}, - CurveAffine, PrimeField, + +use zksync_config::configs::chain::CircuitBreakerConfig; +use zksync_eth_client::{types::Error as EthClientError, BoundEthInterface}; +use zksync_types::{ + ethabi::Token, + zkevm_test_harness::bellman::{ + bn256::{Fq, Fq2, Fr, G1Affine, G2Affine}, + CurveAffine, PrimeField, + }, + {Address, H256}, }; -use zksync_types::{Address, H256}; use zksync_verification_key_server::get_vk_for_circuit_type; +// local imports +use crate::{utils::unwrap_tuple, CircuitBreaker, CircuitBreakerError}; + #[derive(Debug, Error)] pub enum VerifierError { #[error("Verifier address from the env var is different from the one in Diamond Proxy contract, from env: {address_from_env:?}, from contract: {address_from_contract:?}")] @@ -57,25 +66,38 @@ pub struct VerificationKey { } #[derive(Debug)] -pub struct VksChecker { - pub eth_client: EthereumClient, +pub struct VksChecker { + pub eth_client: E, + pub config: CircuitBreakerConfig, } -impl VksChecker { - pub fn new(config: &ZkSyncConfig) -> Self { +impl VksChecker { + pub fn new(config: &CircuitBreakerConfig, eth_client: E) -> Self { Self { - eth_client: EthereumClient::from_config(config), + eth_client, + config: config.clone(), } } async fn check_verifier_address(&self) -> Result<(), CircuitBreakerError> { let address_from_env = Address::from_str(&env::var("CONTRACTS_VERIFIER_ADDR").unwrap()).unwrap(); - let address_from_contract: Address = self - .eth_client - .call_main_contract_function("getVerifier", (), None, Default::default(), None) - .await - .unwrap(); + + let address_from_contract: Address = (|| async { + let result: Result = self + .eth_client + .call_main_contract_function("getVerifier", (), None, Default::default(), None) + .await; + result + }) + .retry( + &ConstantBuilder::default() + .with_max_times(self.config.http_req_max_retry_number) + .with_delay(self.config.http_req_retry_interval()), + ) + .await + .unwrap(); + if address_from_env != address_from_contract { return Err(CircuitBreakerError::Verifier( VerifierError::VerifierAddressMismatch { @@ -88,11 +110,27 @@ impl VksChecker { } async fn check_commitments(&self) -> Result<(), CircuitBreakerError> { - let verifier_params_token: Token = self - .eth_client - .call_main_contract_function("getVerifierParams", (), None, Default::default(), None) - .await - .unwrap(); + let verifier_params_token: Token = (|| async { + let result: Result = self + .eth_client + .call_main_contract_function( + "getVerifierParams", + (), + None, + Default::default(), + None, + ) + .await; + result + }) + .retry( + &ConstantBuilder::default() + .with_max_times(self.config.http_req_max_retry_number) + .with_delay(self.config.http_req_retry_interval()), + ) + .await + .unwrap(); + let vks_vec: Vec = unwrap_tuple(verifier_params_token) .into_iter() .map(|token| H256::from_slice(&token.into_fixed_bytes().unwrap())) @@ -150,93 +188,42 @@ impl VksChecker { } async fn get_contract_vk(&self) -> VerificationKey { + let vk_token = self.get_vk_token_with_retries().await.unwrap(); + + parse_vk_token(vk_token) + } + + pub(super) async fn get_vk_token_with_retries(&self) -> Result { let verifier_contract_address = Address::from_str(&env::var("CONTRACTS_VERIFIER_ADDR").unwrap()).unwrap(); let verifier_contract_abi = zksync_contracts::verifier_contract(); - let vk_token: Token = self - .eth_client - .call_contract_function( - "get_verification_key", - (), - None, - Default::default(), - None, - verifier_contract_address, - verifier_contract_abi, - ) - .await - .unwrap(); - - let tokens = unwrap_tuple(vk_token); - let n = tokens[0].clone().into_uint().unwrap().as_usize() - 1; - let num_inputs = tokens[1].clone().into_uint().unwrap().as_usize(); - let gate_selectors_commitments = tokens[3] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g1_affine_from_token) - .collect(); - let gate_setup_commitments = tokens[4] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g1_affine_from_token) - .collect(); - let permutation_commitments = tokens[5] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g1_affine_from_token) - .collect(); - let lookup_selector_commitment = g1_affine_from_token(tokens[6].clone()); - let lookup_tables_commitments = tokens[7] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g1_affine_from_token) - .collect(); - let lookup_table_type_commitment = g1_affine_from_token(tokens[8].clone()); - let non_residues = tokens[9] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(fr_from_token) - .collect(); - let g2_elements = tokens[10] - .clone() - .into_fixed_array() - .unwrap() - .into_iter() - .map(g2_affine_from_token) - .collect::>() - .try_into() - .unwrap(); - - VerificationKey { - n, - num_inputs, - - gate_setup_commitments, - gate_selectors_commitments, - permutation_commitments, - - lookup_selector_commitment: Some(lookup_selector_commitment), - lookup_tables_commitments, - lookup_table_type_commitment: Some(lookup_table_type_commitment), + (|| async { + let result: Result = self + .eth_client + .call_contract_function( + "get_verification_key", + (), + None, + Default::default(), + None, + verifier_contract_address, + verifier_contract_abi.clone(), + ) + .await; - non_residues, - g2_elements, - } + result + }) + .retry( + &ConstantBuilder::default() + .with_max_times(self.config.http_req_max_retry_number) + .with_delay(self.config.http_req_retry_interval()), + ) + .await } } #[async_trait::async_trait] -impl CircuitBreaker for VksChecker { +impl CircuitBreaker for VksChecker { async fn check(&self) -> Result<(), CircuitBreakerError> { self.check_verifier_address().await?; self.check_commitments().await?; @@ -273,3 +260,70 @@ fn g2_affine_from_token(token: Token) -> G2Affine { }, ) } + +fn parse_vk_token(vk_token: Token) -> VerificationKey { + let tokens = unwrap_tuple(vk_token); + let n = tokens[0].clone().into_uint().unwrap().as_usize() - 1; + let num_inputs = tokens[1].clone().into_uint().unwrap().as_usize(); + let gate_selectors_commitments = tokens[3] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g1_affine_from_token) + .collect(); + let gate_setup_commitments = tokens[4] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g1_affine_from_token) + .collect(); + let permutation_commitments = tokens[5] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g1_affine_from_token) + .collect(); + let lookup_selector_commitment = g1_affine_from_token(tokens[6].clone()); + let lookup_tables_commitments = tokens[7] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g1_affine_from_token) + .collect(); + let lookup_table_type_commitment = g1_affine_from_token(tokens[8].clone()); + let non_residues = tokens[9] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(fr_from_token) + .collect(); + + let g2_elements = tokens[10] + .clone() + .into_fixed_array() + .unwrap() + .into_iter() + .map(g2_affine_from_token) + .collect::>(); + + VerificationKey { + n, + num_inputs, + + gate_setup_commitments, + gate_selectors_commitments, + permutation_commitments, + + lookup_selector_commitment: Some(lookup_selector_commitment), + lookup_tables_commitments, + lookup_table_type_commitment: Some(lookup_table_type_commitment), + + non_residues, + g2_elements: g2_elements.try_into().unwrap(), + } +} diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index 9e11708176c9..ea4a71f009ed 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/era license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -12,6 +12,7 @@ categories = ["cryptography"] [dependencies] zksync_basic_types = { path = "../../lib/basic_types", version = "1.0" } zksync_utils = { path = "../../lib/utils", version = "1.0" } +zksync_contracts = { path = "../../lib/contracts", version = "1.0" } url = "2.1" num = "0.3.1" diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index ac606d955d9b..e4f2962878d9 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -17,6 +17,8 @@ pub struct ApiConfig { pub explorer: Explorer, /// Configuration options for the Prometheus exporter. pub prometheus: Prometheus, + /// Configuration options for the Health check. + pub healthcheck: HealthCheck, } impl ApiConfig { @@ -25,6 +27,7 @@ impl ApiConfig { web3_json_rpc: envy_load!("web3_json_rpc", "API_WEB3_JSON_RPC_"), explorer: envy_load!("explorer", "API_EXPLORER_"), prometheus: envy_load!("prometheus", "API_PROMETHEUS_"), + healthcheck: envy_load!("healthcheck", "API_HEALTHCHECK_"), } } } @@ -68,6 +71,9 @@ pub struct Web3JsonRpc { pub max_tx_size: usize, /// Main node URL - used only by external node to proxy transactions to. pub main_node_url: Option, + /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the api server panics. + /// This is a temporary solution to mitigate API request resulting in thousands of DB queries. + pub vm_execution_cache_misses_limit: Option, } impl Web3JsonRpc { @@ -104,6 +110,18 @@ impl Web3JsonRpc { } } +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct HealthCheck { + /// Port to which the REST server is listening. + pub port: u16, +} + +impl HealthCheck { + pub fn bind_addr(&self) -> SocketAddr { + SocketAddr::new("0.0.0.0".parse().unwrap(), self.port) + } +} + #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct Explorer { /// Port to which the REST server is listening. @@ -175,6 +193,7 @@ mod tests { estimate_gas_acceptable_overestimation: 1000, max_tx_size: 1000000, main_node_url: None, + vm_execution_cache_misses_limit: None, }, explorer: Explorer { port: 3070, @@ -189,6 +208,7 @@ mod tests { pushgateway_url: "http://127.0.0.1:9091".into(), push_interval_ms: Some(100), }, + healthcheck: HealthCheck { port: 8081 }, } } @@ -221,6 +241,7 @@ API_EXPLORER_THREADS_PER_SERVER=128 API_PROMETHEUS_LISTENER_PORT="3312" API_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" API_PROMETHEUS_PUSH_INTERVAL_MS=100 +API_HEALTHCHECK_PORT=8081 "#; set_env(config); diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index a6c7d7268915..c3e942c5d9df 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -5,6 +5,7 @@ use std::time::Duration; // Local uses use zksync_basic_types::network::Network; use zksync_basic_types::{Address, H256}; +use zksync_contracts::BaseSystemContractsHashes; use crate::envy_load; @@ -79,8 +80,6 @@ pub struct StateKeeperConfig { pub fee_account_addr: Address, - pub reexecute_each_tx: bool, - /// The price the operator spends on 1 gas of computation in wei. pub fair_l2_gas_price: u64, @@ -89,6 +88,22 @@ pub struct StateKeeperConfig { /// Max number of computational gas that validation step is allowed to take. pub validation_computational_gas_limit: u32, + pub save_call_traces: bool, + /// Max number of l1 gas price that is allowed to be used in state keeper. + pub max_l1_gas_price: Option, +} + +impl StateKeeperConfig { + pub fn max_l1_gas_price(&self) -> u64 { + self.max_l1_gas_price.unwrap_or(u64::MAX) + } + + pub fn base_system_contracts_hashes(&self) -> BaseSystemContractsHashes { + BaseSystemContractsHashes { + bootloader: self.bootloader_hash, + default_aa: self.default_aa_hash, + } + } } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -106,12 +121,18 @@ impl OperationsManager { #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct CircuitBreakerConfig { pub sync_interval_ms: u64, + pub http_req_max_retry_number: usize, + pub http_req_retry_interval_sec: u8, } impl CircuitBreakerConfig { pub fn sync_interval(&self) -> Duration { Duration::from_millis(self.sync_interval_ms) } + + pub fn http_req_retry_interval(&self) -> Duration { + Duration::from_secs(self.http_req_retry_interval_sec as u64) + } } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -158,11 +179,12 @@ mod tests { reject_tx_at_geometry_percentage: 0.3, fee_account_addr: addr("de03a0B5963f75f1C8485B355fF6D30f3093BDE7"), reject_tx_at_gas_percentage: 0.5, - reexecute_each_tx: true, fair_l2_gas_price: 250000000, bootloader_hash: H256::from(&[254; 32]), default_aa_hash: H256::from(&[254; 32]), validation_computational_gas_limit: 10_000_000, + save_call_traces: false, + max_l1_gas_price: Some(100000000), }, operations_manager: OperationsManager { delay_interval: 100, @@ -176,6 +198,8 @@ mod tests { }, circuit_breaker: CircuitBreakerConfig { sync_interval_ms: 1000, + http_req_max_retry_number: 5, + http_req_retry_interval_sec: 2, }, } } @@ -196,13 +220,14 @@ CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_ETH_PARAMS_PERCENTAGE="0.2" CHAIN_STATE_KEEPER_REJECT_TX_AT_GEOMETRY_PERCENTAGE="0.3" CHAIN_STATE_KEEPER_REJECT_TX_AT_ETH_PARAMS_PERCENTAGE="0.8" CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE="0.5" -CHAIN_STATE_KEEPER_REEXECUTE_EACH_TX="true" CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS="2500" CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS="1000" CHAIN_STATE_KEEPER_FAIR_L2_GAS_PRICE="250000000" CHAIN_STATE_KEEPER_BOOTLOADER_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" CHAIN_STATE_KEEPER_DEFAULT_AA_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT="10000000" +CHAIN_STATE_KEEPER_SAVE_CALL_TRACES="false" +CHAIN_STATE_KEEPER_MAX_L1_GAS_PRICE="100000000" CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL="100" CHAIN_MEMPOOL_SYNC_INTERVAL_MS="10" CHAIN_MEMPOOL_SYNC_BATCH_SIZE="1000" @@ -210,6 +235,8 @@ CHAIN_MEMPOOL_STUCK_TX_TIMEOUT="10" CHAIN_MEMPOOL_REMOVE_STUCK_TXS="true" CHAIN_MEMPOOL_CAPACITY="1000000" CHAIN_CIRCUIT_BREAKER_SYNC_INTERVAL_MS="1000" +CHAIN_CIRCUIT_BREAKER_HTTP_REQ_MAX_RETRY_NUMBER="5" +CHAIN_CIRCUIT_BREAKER_HTTP_REQ_RETRY_INTERVAL_SEC="2" "#; set_env(config); diff --git a/core/lib/config/src/configs/database.rs b/core/lib/config/src/configs/database.rs index d1c5a909b38a..1f6e0563294a 100644 --- a/core/lib/config/src/configs/database.rs +++ b/core/lib/config/src/configs/database.rs @@ -1,23 +1,37 @@ use serde::Deserialize; -use std::env; -use std::time::Duration; + +use std::{env, str::FromStr, time::Duration}; /// Database configuration. -#[derive(Debug, Deserialize, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DBConfig { /// Path to the database data directory. pub path: String, /// Path to the database data directory that serves state cache. pub state_keeper_db_path: String, - /// Path to merkle tree backup directory + /// Path to merkle tree backup directory. pub merkle_tree_backup_path: String, - /// Fast ssd path + /// Fast SSD path. Used as a RocksDB dir for the Merkle tree (*old* implementation) + /// if the lightweight syncing is enabled. pub merkle_tree_fast_ssd_path: String, - /// Number of backups to keep + /// Fast SSD path. Used as a RocksDB dir for the Merkle tree (*new* implementation). + // We cannot compute this path like + // + // ``` + // new_merkle_tree_ssd_path = merkle_tree_fast_ssd_path.join("new") + // ``` + // + // because (1) we need to maintain backward compatibility; (2) it looks dangerous + // to place a RocksDB instance in a subdirectory of another RocksDB instance. + pub new_merkle_tree_ssd_path: String, + /// Throttle interval for the new tree implementation in milliseconds. This interval will be + /// applied after each time the tree makes progress. + pub new_merkle_tree_throttle_ms: u64, + /// Number of backups to keep. pub backup_count: usize, - /// Time interval between performing backups + /// Time interval between performing backups. pub backup_interval_ms: u64, - /// Maximum number of blocks to be processed by the full tree at a time + /// Maximum number of blocks to be processed by the full tree at a time. pub max_block_batch: usize, } @@ -28,6 +42,8 @@ impl Default for DBConfig { state_keeper_db_path: "./db/state_keeper".to_owned(), merkle_tree_backup_path: "./db/backups".to_owned(), merkle_tree_fast_ssd_path: "./db/lightweight".to_owned(), + new_merkle_tree_ssd_path: "./db/lightweight-new".to_owned(), + new_merkle_tree_throttle_ms: 0, backup_count: 5, backup_interval_ms: 60_000, max_block_batch: 100, @@ -50,18 +66,29 @@ impl DBConfig { if let Ok(path) = env::var("DATABASE_MERKLE_TREE_FAST_SSD_PATH") { config.merkle_tree_fast_ssd_path = path; } - if let Ok(Ok(count)) = env::var("DATABASE_BACKUP_COUNT").map(|s| s.parse()) { + if let Ok(path) = env::var("DATABASE_NEW_MERKLE_TREE_SSD_PATH") { + config.new_merkle_tree_ssd_path = path; + } + if let Some(interval) = Self::parse_env_var("DATABASE_NEW_MERKLE_TREE_THROTTLE_MS") { + config.new_merkle_tree_throttle_ms = interval; + } + if let Some(count) = Self::parse_env_var("DATABASE_BACKUP_COUNT") { config.backup_count = count; } - if let Ok(Ok(interval)) = env::var("DATABASE_BACKUP_INTERVAL_MS").map(|s| s.parse()) { + if let Some(interval) = Self::parse_env_var("DATABASE_BACKUP_INTERVAL_MS") { config.backup_interval_ms = interval; } - if let Ok(Ok(size)) = env::var("DATABASE_MAX_BLOCK_BATCH").map(|s| s.parse()) { + if let Some(size) = Self::parse_env_var("DATABASE_MAX_BLOCK_BATCH") { config.max_block_batch = size; } config } + fn parse_env_var(key: &str) -> Option { + let env_var = env::var(key).ok()?; + env_var.parse().ok() + } + /// Path to the database data directory. pub fn path(&self) -> &str { &self.path @@ -81,6 +108,11 @@ impl DBConfig { &self.merkle_tree_fast_ssd_path } + /// Throttle interval for the new Merkle tree implementation. + pub fn new_merkle_tree_throttle_interval(&self) -> Duration { + Duration::from_millis(self.new_merkle_tree_throttle_ms) + } + /// Number of backups to keep pub fn backup_count(&self) -> usize { self.backup_count @@ -100,18 +132,6 @@ mod tests { use super::*; use crate::configs::test_utils::set_env; - fn expected_config() -> DBConfig { - DBConfig { - path: "./db".to_owned(), - state_keeper_db_path: "./db/state_keeper".to_owned(), - merkle_tree_backup_path: "./db/backups".to_owned(), - merkle_tree_fast_ssd_path: "./db/lightweight".to_owned(), - backup_count: 5, - backup_interval_ms: 60_000, - max_block_batch: 100, - } - } - #[test] fn from_env() { let config = r#" @@ -119,6 +139,8 @@ DATABASE_PATH="./db" DATABASE_STATE_KEEPER_DB_PATH="./db/state_keeper" DATABASE_MERKLE_TREE_BACKUP_PATH="./db/backups" DATABASE_MERKLE_TREE_FAST_SSD_PATH="./db/lightweight" +DATABASE_NEW_MERKLE_TREE_SSD_PATH="./db/lightweight-new" +DATABASE_NEW_MERKLE_TREE_THROTTLE_MS=0 DATABASE_BACKUP_COUNT=5 DATABASE_BACKUP_INTERVAL_MS=60000 DATABASE_MAX_BLOCK_BATCH=100 @@ -126,13 +148,13 @@ DATABASE_MAX_BLOCK_BATCH=100 set_env(config); let actual = DBConfig::from_env(); - assert_eq!(actual, expected_config()); + assert_eq!(actual, DBConfig::default()); } /// Checks the correctness of the config helper methods. #[test] fn methods() { - let config = expected_config(); + let config = DBConfig::default(); assert_eq!(config.path(), &config.path); assert_eq!(config.state_keeper_db_path(), &config.state_keeper_db_path); diff --git a/core/lib/config/src/configs/eth_client.rs b/core/lib/config/src/configs/eth_client.rs index 96df517f425c..00fc08bf8de5 100644 --- a/core/lib/config/src/configs/eth_client.rs +++ b/core/lib/config/src/configs/eth_client.rs @@ -7,7 +7,7 @@ use crate::envy_load; #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ETHClientConfig { /// Numeric identifier of the L1 network (e.g. `9` for localhost). - pub chain_id: u8, + pub chain_id: u64, /// Address of the Ethereum node API. pub web3_url: String, } diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 41ab695e30bf..f10462dd2dbf 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -63,6 +63,8 @@ pub struct SenderConfig { /// Note that this number must be slightly higher than the one set on the contract, /// because the contract uses block.timestamp which lags behind the clock time. pub l1_batch_min_age_before_execute_seconds: Option, + // Max acceptable fee for sending tx it acts as a safeguard to prevent sending tx with very high fees. + pub max_acceptable_priority_fee_in_gwei: u64, } impl SenderConfig { @@ -129,6 +131,7 @@ mod tests { operator_commit_eth_addr: addr("de03a0B5963f75f1C8485B355fF6D30f3093BDE7"), proof_sending_mode: ProofSendingMode::SkipEveryProof, l1_batch_min_age_before_execute_seconds: Some(1000), + max_acceptable_priority_fee_in_gwei: 100_000_000_000, }, gas_adjuster: GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, @@ -169,6 +172,7 @@ ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" +ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" "#; set_env(config); diff --git a/core/lib/config/src/configs/fetcher.rs b/core/lib/config/src/configs/fetcher.rs index 5f51dc76edef..b83e5794651e 100644 --- a/core/lib/config/src/configs/fetcher.rs +++ b/core/lib/config/src/configs/fetcher.rs @@ -6,6 +6,7 @@ use serde::Deserialize; // Workspace uses // Local uses use crate::envy_load; + #[derive(Debug, Deserialize, Clone, Copy, PartialEq)] pub enum TokenListSource { OneInch, diff --git a/core/lib/config/src/configs/house_keeper.rs b/core/lib/config/src/configs/house_keeper.rs new file mode 100644 index 000000000000..fb80311ddd7c --- /dev/null +++ b/core/lib/config/src/configs/house_keeper.rs @@ -0,0 +1,56 @@ +use serde::Deserialize; + +use crate::envy_load; + +/// Configuration for the house keeper. +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct HouseKeeperConfig { + pub l1_batch_metrics_reporting_interval_ms: u64, + pub blob_cleaning_interval_ms: u64, + pub gpu_prover_queue_reporting_interval_ms: u64, + pub prover_job_retrying_interval_ms: u64, + pub prover_stats_reporting_interval_ms: u64, + pub witness_job_moving_interval_ms: u64, + pub witness_generator_stats_reporting_interval_ms: u64, +} + +impl HouseKeeperConfig { + pub fn from_env() -> Self { + envy_load!("house_keeper", "HOUSE_KEEPER_") + } +} + +#[cfg(test)] +mod tests { + use crate::configs::test_utils::set_env; + + use super::*; + + fn expected_config() -> HouseKeeperConfig { + HouseKeeperConfig { + l1_batch_metrics_reporting_interval_ms: 10_000, + blob_cleaning_interval_ms: 60_000, + gpu_prover_queue_reporting_interval_ms: 10_000, + prover_job_retrying_interval_ms: 300_000, + prover_stats_reporting_interval_ms: 5_000, + witness_job_moving_interval_ms: 30_000, + witness_generator_stats_reporting_interval_ms: 10_000, + } + } + + #[test] + fn from_env() { + let config = r#" +HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS="10000" +HOUSE_KEEPER_BLOB_CLEANING_INTERVAL_MS="60000" +HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS="10000" +HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS="300000" +HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS="5000" +HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS="30000" +HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS="10000" + "#; + set_env(config); + let actual = HouseKeeperConfig::from_env(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 43e9b1efeb5e..52889cf5f33d 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -18,6 +18,7 @@ pub mod eth_client; pub mod eth_sender; pub mod eth_watch; pub mod fetcher; +pub mod house_keeper; pub mod nfs; pub mod object_store; pub mod prover; diff --git a/core/lib/config/src/configs/object_store.rs b/core/lib/config/src/configs/object_store.rs index e61197f7cec1..c1bf36f87eb3 100644 --- a/core/lib/config/src/configs/object_store.rs +++ b/core/lib/config/src/configs/object_store.rs @@ -1,12 +1,21 @@ use crate::envy_load; use serde::Deserialize; +#[derive(Debug, Deserialize, Eq, PartialEq, Clone, Copy)] +pub enum ObjectStoreMode { + GCS, + GCSWithCredentialFile, + FileBacked, +} + /// Configuration for the object store #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ObjectStoreConfig { pub bucket_base_url: String, - pub mode: String, + pub mode: ObjectStoreMode, pub file_backed_base_path: String, + pub gcs_credential_file_path: String, + pub max_retries: u16, } impl ObjectStoreConfig { @@ -23,8 +32,10 @@ mod tests { fn expected_config() -> ObjectStoreConfig { ObjectStoreConfig { bucket_base_url: "/base/url".to_string(), - mode: "FileBacked".to_string(), + mode: ObjectStoreMode::FileBacked, file_backed_base_path: "artifacts".to_string(), + gcs_credential_file_path: "/path/to/credentials.json".to_string(), + max_retries: 5, } } @@ -34,6 +45,8 @@ mod tests { OBJECT_STORE_BUCKET_BASE_URL="/base/url" OBJECT_STORE_MODE="FileBacked" OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" +OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" +OBJECT_STORE_MAX_RETRIES="5" "#; set_env(config); let actual = ObjectStoreConfig::from_env(); diff --git a/core/lib/config/src/configs/prover.rs b/core/lib/config/src/configs/prover.rs index 73f4abd92a24..7b49823541eb 100644 --- a/core/lib/config/src/configs/prover.rs +++ b/core/lib/config/src/configs/prover.rs @@ -14,7 +14,7 @@ pub struct ProverConfig { pub prometheus_port: u16, /// Currently only a single (largest) key is supported. We'll support different ones in the future pub initial_setup_key_path: String, - /// https://storage.googleapis.com/universal-setup/setup_2\^26.key + /// https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key pub key_download_url: String, /// Max time for proof to be generated pub generation_timeout_in_secs: u16, diff --git a/core/lib/config/src/configs/prover_group.rs b/core/lib/config/src/configs/prover_group.rs index 65ab19af21b6..8e10b83f9769 100644 --- a/core/lib/config/src/configs/prover_group.rs +++ b/core/lib/config/src/configs/prover_group.rs @@ -17,6 +17,12 @@ pub struct ProverGroupConfig { pub group_8_circuit_ids: Vec, pub group_9_circuit_ids: Vec, pub region_read_url: String, + // This is used while running the provers/synthesizer in non-gcp cloud env. + pub region_override: Option, + pub zone_read_url: String, + // This is used while running the provers/synthesizer in non-gcp cloud env. + pub zone_override: Option, + pub synthesizer_per_gpu: u16, } impl ProverGroupConfig { @@ -84,6 +90,10 @@ mod tests { group_8_circuit_ids: vec![16, 17], group_9_circuit_ids: vec![3], region_read_url: "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location".to_string(), + region_override: Some("us-central-1".to_string()), + zone_read_url: "http://metadata.google.internal/computeMetadata/v1/instance/zone".to_string(), + zone_override: Some("us-central-1-b".to_string()), + synthesizer_per_gpu: 10, } } @@ -99,6 +109,10 @@ mod tests { PROVER_GROUP_GROUP_8_CIRCUIT_IDS="16,17" PROVER_GROUP_GROUP_9_CIRCUIT_IDS="3" PROVER_GROUP_REGION_READ_URL="http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location" + PROVER_GROUP_REGION_OVERRIDE="us-central-1" + PROVER_GROUP_ZONE_READ_URL="http://metadata.google.internal/computeMetadata/v1/instance/zone" + PROVER_GROUP_ZONE_OVERRIDE="us-central-1-b" + PROVER_GROUP_SYNTHESIZER_PER_GPU="10" "#; #[test] diff --git a/core/lib/config/src/configs/witness_generator.rs b/core/lib/config/src/configs/witness_generator.rs index 68abe45d6c86..3e5a17ca886c 100644 --- a/core/lib/config/src/configs/witness_generator.rs +++ b/core/lib/config/src/configs/witness_generator.rs @@ -14,7 +14,7 @@ pub struct WitnessGeneratorConfig { pub generation_timeout_in_secs: u16, /// Currently only a single (largest) key is supported. pub initial_setup_key_path: String, - /// https://storage.googleapis.com/universal-setup/setup_2\^26.key + /// https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key pub key_download_url: String, /// Max attempts for generating witness pub max_attempts: u32, diff --git a/core/lib/config/src/constants/crypto.rs b/core/lib/config/src/constants/crypto.rs index 800a58c189a8..dcc9d34a9ebd 100644 --- a/core/lib/config/src/constants/crypto.rs +++ b/core/lib/config/src/constants/crypto.rs @@ -21,3 +21,9 @@ pub const MAX_TXS_IN_BLOCK: usize = 1024; pub const MAX_NEW_FACTORY_DEPS: usize = 32; pub const PAD_MSG_BEFORE_HASH_BITS_LEN: usize = 736; + +/// The size of the bootloader memory in bytes which is used by the protocol. +/// While the maximal possible size is a lot higher, we restric ourselves to a certain limit to reduce +/// the requirements on RAM. +pub const USED_BOOTLOADER_MEMORY_BYTES: usize = 1 << 24; +pub const USED_BOOTLOADER_MEMORY_WORDS: usize = USED_BOOTLOADER_MEMORY_BYTES / 32; diff --git a/core/lib/config/src/constants/fees/mod.rs b/core/lib/config/src/constants/fees/mod.rs index cbe9f36d0b73..be2040a50e83 100644 --- a/core/lib/config/src/constants/fees/mod.rs +++ b/core/lib/config/src/constants/fees/mod.rs @@ -25,7 +25,7 @@ pub struct IntrinsicSystemGasConstants { pub l1_tx_delta_factory_dep_gas: u32, // The number of pubdata an L1->L2 transaction requires with each new factory dependency pub l1_tx_delta_factory_dep_pubdata: u32, - // The nubmer of computational gas the bootloader requires + // The number of computational gas the bootloader requires pub bootloader_intrinsic_gas: u32, // The number of overhead pubdata the bootloader requires pub bootloader_intrinsic_pubdata: u32, diff --git a/core/lib/crypto/src/hasher/blake2.rs b/core/lib/crypto/src/hasher/blake2.rs index cb2d22be72ea..3bf9e3a794b4 100644 --- a/core/lib/crypto/src/hasher/blake2.rs +++ b/core/lib/crypto/src/hasher/blake2.rs @@ -1,36 +1,38 @@ -use crate::hasher::Hasher; use blake2::{Blake2s256, Digest}; +use crate::hasher::Hasher; +use zksync_basic_types::H256; + #[derive(Default, Clone, Debug)] pub struct Blake2Hasher; impl Hasher> for Blake2Hasher { /// Gets the hash of the byte sequence. fn hash_bytes>(&self, value: I) -> Vec { - let mut hasher = Blake2s256::new(); - let value: Vec = value.into_iter().collect(); - hasher.update(&value); - - hasher.finalize().to_vec() + >::hash_bytes(self, value).0.into() } - /// Get the hash of the hashes sequence. - fn hash_elements>>(&self, elements: I) -> Vec { - let elems: Vec = elements.into_iter().flatten().collect(); - + /// Merges two hashes into one. + fn compress(&self, lhs: &Vec, rhs: &Vec) -> Vec { let mut hasher = Blake2s256::new(); - hasher.update(&elems); + hasher.update(lhs); + hasher.update(rhs); hasher.finalize().to_vec() } +} - /// Merges two hashes into one. - fn compress(&self, lhs: &Vec, rhs: &Vec) -> Vec { - let mut elems = vec![]; - elems.extend(lhs); - elems.extend(rhs); +impl Hasher for Blake2Hasher { + fn hash_bytes>(&self, value: I) -> H256 { + let mut hasher = Blake2s256::new(); + let value: Vec = value.into_iter().collect(); + hasher.update(&value); + H256(hasher.finalize().into()) + } + fn compress(&self, lhs: &H256, rhs: &H256) -> H256 { let mut hasher = Blake2s256::new(); - hasher.update(&elems); - hasher.finalize().to_vec() + hasher.update(lhs.as_ref()); + hasher.update(rhs.as_ref()); + H256(hasher.finalize().into()) } } diff --git a/core/lib/crypto/src/hasher/mod.rs b/core/lib/crypto/src/hasher/mod.rs index c71e7cc267a1..67fbf569c5f1 100644 --- a/core/lib/crypto/src/hasher/mod.rs +++ b/core/lib/crypto/src/hasher/mod.rs @@ -16,8 +16,15 @@ pub mod sha256; pub trait Hasher { /// Gets the hash of the byte sequence. fn hash_bytes>(&self, value: I) -> Hash; + /// Get the hash of the hashes sequence. - fn hash_elements>(&self, elements: I) -> Hash; + fn hash_elements>(&self, elements: I) -> Hash + where + Hash: IntoIterator, + { + self.hash_bytes(elements.into_iter().flatten()) + } + /// Merges two hashes into one. fn compress(&self, lhs: &Hash, rhs: &Hash) -> Hash; } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 2060ea301677..4310de58e2b2 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -10,17 +10,18 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] +vlog = { path = "../../lib/vlog", version = "1.0" } +vm = { path = "../vm", version = "0.1.0" } zksync_utils = { path = "../utils", version = "1.0" } zksync_config = { path = "../config", version = "1.0" } zksync_contracts = { path = "../contracts", version = "1.0" } zksync_types = { path = "../types", version = "1.0" } -vm = { path = "../vm", version = "0.1.0" } zksync_state = { path = "../state", version = "1.0" } zksync_storage = { path = "../storage", version = "1.0" } zksync_web3_decl = { path = "../web3_decl", version = "1.0" } +zksync_health_check = { path = "../health_check", version = "0.1.0" } + itertools = "0.10.1" -vlog = { path = "../../lib/vlog", version = "1.0" } -zksync_object_store = { path = "../object_store", version = "1.0" } thiserror = "1.0" anyhow = "1.0" metrics = "0.20" @@ -39,8 +40,7 @@ sqlx = { version = "0.5", default-features = false, features = [ serde_json = "1.0" bigdecimal = "0.2.0" bincode = "1" - -num = { version = "0.3.1" } +num = "0.3.1" hex = "0.4" once_cell = "1.7" diff --git a/core/lib/dal/README.md b/core/lib/dal/README.md new file mode 100644 index 000000000000..62dc5e9aa125 --- /dev/null +++ b/core/lib/dal/README.md @@ -0,0 +1,55 @@ +# DAL (Data access Layer) + +This crate provides read and write access to the main database (which is Postgres), that acts as a primary source of +truth. + +Current schema is managed by `diesel` - that applies all the schema changes from `migrations` directory. + +## Schema + +### Storage tables + +| Table name | Description | Usage | +| ---------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| contract_sources | Mapping from contract address to the contract source. | Currently set via `zks_setContractDebugInfo` JSON call | +| storage | Main storage column: mapping from hashed StorageKey (account + key) to the value. | We also store additional columns there (like transaction hash or creation time). | +| storage_logs | Stores all the storage access logs for all the transactions. | Main source of truth - other columns (like `storage`) are created by compacting this column. Its primary index is (storage key, mini_block, operation_id) | + +### Prover queue tables + +The tables below are used by different parts of witness generation. + +| Table name | Description | +| ----------------------------- | ---------------------------------- | +| witness_inputs | TODO | +| leaf_aggregation_witness_jobs | Queue of jobs for leaf aggregation | +| node_aggregation_witness_jobs | Queue of jobs for node aggregation | +| scheduler_witness_jobs | TODO | + +### TODO + +| Table name | +| ------------------------------------- | +| \_sqlx_migrations | +| aggregated_proof | +| contract_verification_requests | +| contract_verification_solc_versions | +| contract_verification_zksolc_versions | +| contracts_verification_info | +| eth_txs | +| eth_txs_history | +| events | +| factory_deps | +| gpu_prover_queue | +| initial_writes | +| l1_batches | +| l2_to_l1_logs | +| miniblocks | +| proof | +| protective_reads | +| prover_jobs | +| static_artifact_storage | +| storage_logs_dedup | +| tokens | +| transaction_traces | +| transactions | diff --git a/core/lib/dal/migrations/20230320195412_calls.down.sql b/core/lib/dal/migrations/20230320195412_calls.down.sql new file mode 100644 index 000000000000..90807bb59a8a --- /dev/null +++ b/core/lib/dal/migrations/20230320195412_calls.down.sql @@ -0,0 +1 @@ +DROP TABLE call_traces; diff --git a/core/lib/dal/migrations/20230320195412_calls.up.sql b/core/lib/dal/migrations/20230320195412_calls.up.sql new file mode 100644 index 000000000000..5cd344848e1c --- /dev/null +++ b/core/lib/dal/migrations/20230320195412_calls.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE call_traces ( + tx_hash BYTEA PRIMARY KEY, + call_trace BYTEA NOT NULL, + FOREIGN KEY (tx_hash) REFERENCES transactions (hash) ON DELETE CASCADE +); diff --git a/core/lib/dal/migrations/20230323110438_add_zone_num_gpu_column_in_gpu_prover_queue.down.sql b/core/lib/dal/migrations/20230323110438_add_zone_num_gpu_column_in_gpu_prover_queue.down.sql new file mode 100644 index 000000000000..bd24bf4ceb04 --- /dev/null +++ b/core/lib/dal/migrations/20230323110438_add_zone_num_gpu_column_in_gpu_prover_queue.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE gpu_prover_queue +DROP COLUMN IF EXISTS zone, + DROP COLUMN IF EXISTS num_gpu; diff --git a/core/lib/dal/migrations/20230323110438_add_zone_num_gpu_column_in_gpu_prover_queue.up.sql b/core/lib/dal/migrations/20230323110438_add_zone_num_gpu_column_in_gpu_prover_queue.up.sql new file mode 100644 index 000000000000..aba60ed82386 --- /dev/null +++ b/core/lib/dal/migrations/20230323110438_add_zone_num_gpu_column_in_gpu_prover_queue.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE gpu_prover_queue + ADD COLUMN IF NOT EXISTS zone TEXT NOT NULL DEFAULT('prior to enabling multi-zone'), + ADD COLUMN IF NOT EXISTS num_gpu SMALLINT NOT NULL DEFAULT 2; \ No newline at end of file diff --git a/core/lib/dal/migrations/20230323111121_add_zone_in_pk_of_gpu_prover_queue.down.sql b/core/lib/dal/migrations/20230323111121_add_zone_in_pk_of_gpu_prover_queue.down.sql new file mode 100644 index 000000000000..262fa64b48bd --- /dev/null +++ b/core/lib/dal/migrations/20230323111121_add_zone_in_pk_of_gpu_prover_queue.down.sql @@ -0,0 +1 @@ +ALTER TABLE gpu_prover_queue DROP CONSTRAINT IF EXISTS gpu_prover_unique_idx; diff --git a/core/lib/dal/migrations/20230323111121_add_zone_in_pk_of_gpu_prover_queue.up.sql b/core/lib/dal/migrations/20230323111121_add_zone_in_pk_of_gpu_prover_queue.up.sql new file mode 100644 index 000000000000..2f2053cf7924 --- /dev/null +++ b/core/lib/dal/migrations/20230323111121_add_zone_in_pk_of_gpu_prover_queue.up.sql @@ -0,0 +1 @@ +ALTER TABLE gpu_prover_queue ADD CONSTRAINT gpu_prover_unique_idx UNIQUE(instance_host, instance_port, region, zone); diff --git a/core/lib/dal/migrations/20230327173626_eth-txs-history-tx-hash-index.down.sql b/core/lib/dal/migrations/20230327173626_eth-txs-history-tx-hash-index.down.sql new file mode 100644 index 000000000000..ab442161835f --- /dev/null +++ b/core/lib/dal/migrations/20230327173626_eth-txs-history-tx-hash-index.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS eth_txs_history_tx_hash_index; diff --git a/core/lib/dal/migrations/20230327173626_eth-txs-history-tx-hash-index.up.sql b/core/lib/dal/migrations/20230327173626_eth-txs-history-tx-hash-index.up.sql new file mode 100644 index 000000000000..109a33e0404a --- /dev/null +++ b/core/lib/dal/migrations/20230327173626_eth-txs-history-tx-hash-index.up.sql @@ -0,0 +1,5 @@ +DELETE FROM eth_txs_history +WHERE confirmed_at IS NOT NULL AND NOT EXISTS + (SELECT 1 FROM eth_txs WHERE eth_txs.confirmed_eth_tx_history_id = eth_txs_history.id); + +CREATE UNIQUE INDEX IF NOT EXISTS eth_txs_history_tx_hash_index ON eth_txs_history (tx_hash); diff --git a/core/lib/dal/migrations/20230328110644_drop_and_recreate_gpu_prover_queue.down.sql b/core/lib/dal/migrations/20230328110644_drop_and_recreate_gpu_prover_queue.down.sql new file mode 100644 index 000000000000..adcae4cd3748 --- /dev/null +++ b/core/lib/dal/migrations/20230328110644_drop_and_recreate_gpu_prover_queue.down.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS gpu_prover_queue; + +CREATE TABLE IF NOT EXISTS gpu_prover_queue +( + instance_host INET NOT NULL, + instance_port INT NOT NULL + CONSTRAINT valid_port CHECK (instance_port >= 0 AND instance_port <= 65535), + instance_status TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + processing_started_at TIMESTAMP, + queue_free_slots integer, + queue_capacity integer, + specialized_prover_group_id smallint, + region TEXT, + zone TEXT, + num_gpu smallint, + PRIMARY KEY (instance_host, instance_port, region), + unique (instance_host, instance_port, region, zone) +); diff --git a/core/lib/dal/migrations/20230328110644_drop_and_recreate_gpu_prover_queue.up.sql b/core/lib/dal/migrations/20230328110644_drop_and_recreate_gpu_prover_queue.up.sql new file mode 100644 index 000000000000..e5ab107cb848 --- /dev/null +++ b/core/lib/dal/migrations/20230328110644_drop_and_recreate_gpu_prover_queue.up.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS gpu_prover_queue; + +CREATE TABLE IF NOT EXISTS gpu_prover_queue +( + id BIGSERIAL NOT NULL PRIMARY KEY, + instance_host INET NOT NULL, + instance_port INT NOT NULL + CONSTRAINT valid_port CHECK (instance_port >= 0 AND instance_port <= 65535), + instance_status TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + processing_started_at TIMESTAMP, + queue_free_slots integer, + queue_capacity integer, + specialized_prover_group_id smallint, + region TEXT NOT NULL, + zone TEXT NOT NULL, + num_gpu smallint, + unique (instance_host, instance_port, region, zone) +); + +CREATE INDEX IF NOT EXISTS gpu_prover_queue_zone_region_idx ON gpu_prover_queue (region, zone); diff --git a/core/lib/dal/migrations/20230420104112_add_events_address-block-index_index.down.sql b/core/lib/dal/migrations/20230420104112_add_events_address-block-index_index.down.sql new file mode 100644 index 000000000000..2b6072795f2b --- /dev/null +++ b/core/lib/dal/migrations/20230420104112_add_events_address-block-index_index.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS events_address_block_event_index_in_block_index; diff --git a/core/lib/dal/migrations/20230420104112_add_events_address-block-index_index.up.sql b/core/lib/dal/migrations/20230420104112_add_events_address-block-index_index.up.sql new file mode 100644 index 000000000000..cc0ae89147dc --- /dev/null +++ b/core/lib/dal/migrations/20230420104112_add_events_address-block-index_index.up.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS events_address_block_event_index_in_block_index ON events (address, miniblock_number, event_index_in_block); diff --git a/core/lib/dal/migrations/20230424115322_drop_stoarge_logs_dedup_table.down.sql b/core/lib/dal/migrations/20230424115322_drop_stoarge_logs_dedup_table.down.sql new file mode 100644 index 000000000000..0671d2177a11 --- /dev/null +++ b/core/lib/dal/migrations/20230424115322_drop_stoarge_logs_dedup_table.down.sql @@ -0,0 +1,21 @@ +create table storage_logs_dedup +( + hashed_key bytea not null, + address bytea not null, + key bytea not null, + value_read bytea not null, + value_written bytea not null, + is_write boolean not null, + operation_number integer not null, + l1_batch_number bigint not null + constraint storage_logs_dedup_block_number_fkey + references blocks + on delete cascade, + created_at timestamp not null, + constraint storage_logs_dedup_pkey + primary key (hashed_key, l1_batch_number, operation_number) +); + +create index storage_logs_dedup_block_number_idx + on storage_logs_dedup (l1_batch_number); + diff --git a/core/lib/dal/migrations/20230424115322_drop_stoarge_logs_dedup_table.up.sql b/core/lib/dal/migrations/20230424115322_drop_stoarge_logs_dedup_table.up.sql new file mode 100644 index 000000000000..2780ebcd2d63 --- /dev/null +++ b/core/lib/dal/migrations/20230424115322_drop_stoarge_logs_dedup_table.up.sql @@ -0,0 +1 @@ +DROP table storage_logs_dedup; diff --git a/core/lib/dal/migrations/20230525091429_events_transfer_indices_for_explorer.down.sql b/core/lib/dal/migrations/20230525091429_events_transfer_indices_for_explorer.down.sql new file mode 100644 index 000000000000..c425abeecd15 --- /dev/null +++ b/core/lib/dal/migrations/20230525091429_events_transfer_indices_for_explorer.down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS events_transfer_from; +DROP INDEX IF EXISTS events_transfer_to; diff --git a/core/lib/dal/migrations/20230525091429_events_transfer_indices_for_explorer.up.sql b/core/lib/dal/migrations/20230525091429_events_transfer_indices_for_explorer.up.sql new file mode 100644 index 000000000000..4f38c321af47 --- /dev/null +++ b/core/lib/dal/migrations/20230525091429_events_transfer_indices_for_explorer.up.sql @@ -0,0 +1,7 @@ +CREATE INDEX IF NOT EXISTS events_transfer_from + ON events (topic2, miniblock_number, tx_index_in_block) + WHERE topic1 = '\xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef'; + +CREATE INDEX IF NOT EXISTS events_transfer_to + ON events (topic3, miniblock_number, tx_index_in_block) + WHERE topic1 = '\xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef'; diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index d4a3342160a8..7c48f297ea1c 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -207,6 +207,19 @@ }, "query": "\n WITH events_select AS (\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE miniblock_number > $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n )\n SELECT miniblocks.hash as \"block_hash?\",\n address as \"address!\", topic1 as \"topic1!\", topic2 as \"topic2!\", topic3 as \"topic3!\", topic4 as \"topic4!\", value as \"value!\",\n miniblock_number as \"miniblock_number!\", miniblocks.l1_batch_number as \"l1_batch_number?\", tx_hash as \"tx_hash!\",\n tx_index_in_block as \"tx_index_in_block!\", event_index_in_block as \"event_index_in_block!\", event_index_in_tx as \"event_index_in_tx!\"\n FROM events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, + "073d304fe756940303f00b514ef1e24036a1d3d3c3c7fb204b484f681a3520d7": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + } + }, + "query": "UPDATE eth_txs\n SET confirmed_eth_tx_history_id = $1\n WHERE id = $2" + }, "077913dcb33f255fad3f6d81a46a5acad9074cf5c03216430ca1a959825a057a": { "describe": { "columns": [ @@ -804,6 +817,26 @@ }, "query": "\n SELECT COUNT(*) as \"count!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY status\n " }, + "13f9c910b12ede287fe5ee753c9a3bf87a06216a320a58170608b9c81dc14b14": { + "describe": { + "columns": [ + { + "name": "tx_hash", + "ordinal": 0, + "type_info": "Text" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "SELECT tx_hash FROM eth_txs_history\n WHERE eth_tx_id = $1 AND confirmed_at IS NOT NULL" + }, "151aa7cab859c275f74f981ed146415e1e5242ebe259552d5b9fac333c0d9ce8": { "describe": { "columns": [], @@ -1127,6 +1160,26 @@ }, "query": "UPDATE transactions SET in_mempool = FALSE WHERE in_mempool = TRUE" }, + "1f3e41f4ac5b1f6e735f1c422c0098ed534d9e8fe84e98b3234e893e8a2c5085": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Text" + ] + } + }, + "query": "SELECT eth_txs.id FROM eth_txs_history JOIN eth_txs\n ON eth_txs.confirmed_eth_tx_history_id = eth_txs_history.id\n WHERE eth_txs_history.tx_hash = $1" + }, "1faf6552c221c75b7232b55210c0c37be76a57ec9dc94584b6ccb562e8b182f2": { "describe": { "columns": [ @@ -1243,6 +1296,120 @@ }, "query": "SELECT * from prover_jobs where id=$1" }, + "206eaafbd834d16f37c47a06c8bbb8da8b23ed1eab9c5c5958e31832ced6f9f0": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_type", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "prover_input", + "ordinal": 3, + "type_info": "Bytea" + }, + { + "name": "status", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "error", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "processing_started_at", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "created_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 8, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 9, + "type_info": "Time" + }, + { + "name": "aggregation_round", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "result", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "sequence_number", + "ordinal": 12, + "type_info": "Int4" + }, + { + "name": "attempts", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "circuit_input_blob_url", + "ordinal": 14, + "type_info": "Text" + }, + { + "name": "proccesed_by", + "ordinal": 15, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 16, + "type_info": "Bool" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + true, + true, + false, + false, + false, + false, + true, + false, + false, + true, + true, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " + }, "227daa1e8d647c207869d7c306d9d13a38c6baf07281cf72cd93d20da2e3cf3c": { "describe": { "columns": [ @@ -1333,6 +1500,26 @@ }, "query": "\n SELECT miniblock_number as \"miniblock_number!\",\n hash, index_in_block as \"index_in_block!\", l1_batch_tx_index as \"l1_batch_tx_index!\"\n FROM transactions\n WHERE l1_batch_number = $1\n ORDER BY miniblock_number, index_in_block\n " }, + "249d8c0334a8a1a4ff993f72f5245dc55c60773732bfe7596dc5f05f34c15131": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Text" + ] + } + }, + "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ('\\x00', 0, $1, '', 0, now(), now())\n RETURNING id" + }, "24abd3109457403cbb8dc59f8805e0426d6da3b766ddae1516d45ad0b1277bc7": { "describe": { "columns": [ @@ -1745,18 +1932,6 @@ }, "query": "\n SELECT address, key, value\n FROM storage_logs\n WHERE miniblock_number BETWEEN (SELECT MIN(number) FROM miniblocks WHERE l1_batch_number = $1)\n AND (SELECT MAX(number) FROM miniblocks WHERE l1_batch_number = $1)\n ORDER BY miniblock_number, operation_number\n " }, - "292e7d004a45cf3c65b1be4c1dfe5f3aeeb8097af85329c6c181077aac4752c6": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "DELETE FROM storage_logs_dedup WHERE l1_batch_number > $1" - }, "2b07fff3b8f793c010c0bd6f706d7c43786305e3335fd6ae344664ec60f815a8": { "describe": { "columns": [], @@ -1906,6 +2081,32 @@ }, "query": "SELECT version FROM contract_verification_zksolc_versions ORDER by version" }, + "32236a83e1525748f736fa87d53df6005e49f21968e90af9d933359fdd3fb330": { + "describe": { + "columns": [ + { + "name": "tx_hash", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "call_trace", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT * FROM call_traces WHERE tx_hash IN (\n SELECT hash FROM transactions WHERE miniblock_number = $1\n )\n " + }, "335826f54feadf6aa30a4e7668ad3f17a2afc6bd67d4f863e3ad61fefd1bd8d2": { "describe": { "columns": [ @@ -1956,6 +2157,28 @@ }, "query": "\n SELECT l1_batch_number, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url FROM node_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND leaf_layer_subqueues_blob_url is NOT NULL\n AND aggregation_outputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, + "38a3bdae346fdd362452af152c6886c93696dd2db561f6622f8eaf6fabb1e5be": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int4", + "Text", + "Timestamp" + ] + } + }, + "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at)\n VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3)\n RETURNING id" + }, "393345441797999e9f11b8b5ddce0b64356e1e167056d7f76ef6dfffd3534607": { "describe": { "columns": [ @@ -1988,243 +2211,21 @@ }, "query": "\n SELECT\n COALESCE(token_list_name, name) as \"name!\",\n COALESCE(token_list_symbol, symbol) as \"symbol!\",\n COALESCE(token_list_decimals, decimals) as \"decimals!\"\n FROM tokens WHERE l2_address = $1\n " }, - "3ab6a849873a78c741f5266aceedbc0bce3486b0d28066b2edd53ddeff6ca43a": { + "394bbd64939d47fda4e1545e2752b208901e872b7234a5c3af456bdf429a6074": { "describe": { "columns": [ { - "name": "hash", + "name": "tx_hash", "ordinal": 0, "type_info": "Bytea" }, { - "name": "is_priority", + "name": "call_trace", "ordinal": 1, - "type_info": "Bool" - }, - { - "name": "full_fee", - "ordinal": 2, - "type_info": "Numeric" - }, - { - "name": "layer_2_tip_fee", - "ordinal": 3, - "type_info": "Numeric" - }, - { - "name": "initiator_address", - "ordinal": 4, "type_info": "Bytea" - }, - { - "name": "nonce", - "ordinal": 5, - "type_info": "Int8" - }, - { - "name": "signature", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "input", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "priority_op_id", - "ordinal": 10, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "index_in_block", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" - }, - { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" - }, - { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" - }, - { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 18, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 19, - "type_info": "Timestamp" - }, - { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" - }, - { - "name": "in_mempool", - "ordinal": 22, - "type_info": "Bool" - }, - { - "name": "l1_block_number", - "ordinal": 23, - "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, - { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" - }, - { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" - }, - { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" - }, - { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" - }, - { - "name": "l1_batch_tx_index", - "ordinal": 31, - "type_info": "Int4" - }, - { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" - }, - { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" - }, - { - "name": "l1_tx_refund_recipient", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "block_hash?", - "ordinal": 35, - "type_info": "Bytea" - }, - { - "name": "eth_commit_tx_hash?", - "ordinal": 36, - "type_info": "Text" - }, - { - "name": "eth_prove_tx_hash?", - "ordinal": 37, - "type_info": "Text" - }, - { - "name": "eth_execute_tx_hash?", - "ordinal": 38, - "type_info": "Text" } ], "nullable": [ - false, - false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - false, - true, - false, - false, - false, - true, - true, - true, - true, - true, - false, - true, - true, - false, - false, false, false ], @@ -2234,7 +2235,19 @@ ] } }, - "query": "\n SELECT transactions.*,\n miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " + "query": "\n SELECT * FROM call_traces\n WHERE tx_hash = $1\n " + }, + "3ac1fe562e9664bbf8c02ba3090cf97a37663e228eff48fec326f74b2313daa9": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "DELETE FROM call_traces\n WHERE tx_hash = ANY($1)" }, "3b0bfc7445faaa87f6cabb68419ebff995120d65db3a4def70d998507e699811": { "describe": { @@ -2306,26 +2319,6 @@ }, "query": "DELETE FROM events WHERE miniblock_number > $1" }, - "3d7350a4252bfff0cb99d40330d09af2dcbda1a3f42a0d1f03ae88c4f5c3e5ef": { - "describe": { - "columns": [ - { - "name": "l1_batch_number?", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number?\"\n FROM prover_jobs\n WHERE status = 'queued' OR status = 'in_progress'\n OR status = 'in_gpu_proof'\n OR (status = 'failed' AND attempts < $1)\n " - }, "3de5668eca2211f9701304e374100d45b359b1f7832d4a30b325fa679012c3e7": { "describe": { "columns": [], @@ -2785,430 +2778,219 @@ }, "query": "SELECT DISTINCT ON (hashed_key) hashed_key FROM\n (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn" }, - "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { + "474c72dc36171ee1983e0eb4272cdbc180e3773093280556e8e5229b68bc793d": { "describe": { "columns": [ { "name": "hash", "ordinal": 0, "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Interval" - ] - } - }, - "query": "DELETE FROM transactions WHERE miniblock_number IS NULL AND received_at < now() - $1::interval AND is_priority=false AND error IS NULL RETURNING hash" - }, - "4ac92a8436108097a32e94e53f7fe99261c7c3a40dbc433c20ccea3a7d06650c": { - "describe": { - "columns": [ - { - "name": "hashed_key", - "ordinal": 0, - "type_info": "Bytea" }, { - "name": "value!", + "name": "is_priority", "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "ByteaArray" - ] - } - }, - "query": "SELECT hashed_key, value as \"value!\" FROM storage WHERE hashed_key = ANY($1)" - }, - "4acb725974d006c388be8965c3dff2e4c538ab8d2366addb3fb8cff3b789f114": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1" - }, - "4c0d2aa6e08f3b4748b88cad5cf7b3a9eb9c051e8e8e747a3c38c1b37ce3a6b7": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "DELETE FROM l2_to_l1_logs WHERE miniblock_number > $1" - }, - "4ca0356959e4cc50e09b6fe08e9d45cbd929601935506acbbade4a42c2eaea89": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Text" - ] - } - }, - "query": "\n INSERT INTO scheduler_witness_jobs\n (l1_batch_number, scheduler_witness, scheduler_witness_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, 'waiting_for_artifacts', now(), now())\n " - }, - "4d7b5a423b29ce07bd12f168d1ee707e6e413d9a4f0daafb4beed102d22d1745": { - "describe": { - "columns": [ - { - "name": "address", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "key", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT address, key FROM protective_reads\n WHERE l1_batch_number = $1\n " - }, - "4dc63a4431062cb1ae428db625251a6121c3aa2fc06e045ae07b3db6d2f66406": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE witness_inputs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " - }, - "5049eaa4b2050312d13a02c06e87f96548a299894d0f0b268d4e91d49c536cb6": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "Int4Array", - "ByteaArray", - "ByteaArray", - "NumericArray", - "NumericArray", - "NumericArray", - "NumericArray", - "Int4Array", - "Int4Array", - "VarcharArray", - "NumericArray", - "JsonbArray", - "ByteaArray", - "JsonbArray", - "Int8Array", - "NumericArray", - "ByteaArray", - "ByteaArray", - "ByteaArray", - "Int8" - ] - } - }, - "query": "\n UPDATE transactions\n SET \n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::int[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::numeric[]) AS gas_limit,\n UNNEST($6::numeric[]) AS max_fee_per_gas,\n UNNEST($7::numeric[]) AS max_priority_fee_per_gas,\n UNNEST($8::numeric[]) AS gas_per_pubdata_limit,\n UNNEST($9::int[]) AS tx_format,\n UNNEST($10::integer[]) AS index_in_block,\n UNNEST($11::varchar[]) AS error,\n UNNEST($12::numeric[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::bigint[]) as refunded_gas,\n UNNEST($17::numeric[]) as value,\n UNNEST($18::bytea[]) as contract_address,\n UNNEST($19::bytea[]) as paymaster,\n UNNEST($20::bytea[]) as paymaster_input\n ) AS data_table\n WHERE transactions.initiator_address=data_table.initiator_address \n AND transactions.nonce=data_table.nonce\n " - }, - "5089dfb745ff04a9b071b5785e68194a6f6a7a72754d23a65adc7d6838f7f640": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "UPDATE eth_txs SET has_failed = TRUE WHERE id = $1" - }, - "50f406ffe7802e753411baa0e348294bdb05c96b96b2041ee876e2b34a1a6ea6": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Bytea", - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM l1_batches\n WHERE number = $1\n AND hash = $2\n AND merkle_root_hash = $3\n AND parent_hash = $4\n AND l2_l1_merkle_root = $5\n " - }, - "516e309a97010cd1eb8398b2b7ff809786703c075e4c3dff1133c41cdcfdd3f3": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 1, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "is_finished", + "name": "full_fee", "ordinal": 2, - "type_info": "Bool" + "type_info": "Numeric" }, { - "name": "l1_tx_count", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "l2_tx_count", + "name": "initiator_address", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "fee_account_address", + "name": "nonce", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "bloom", + "name": "signature", "ordinal": 6, "type_info": "Bytea" }, { - "name": "priority_ops_onchain_data", + "name": "input", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Bytea" }, { - "name": "hash", + "name": "data", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "parent_hash", + "name": "received_at", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "commitment", + "name": "priority_op_id", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_write_logs", + "name": "l1_batch_number", "ordinal": 11, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_contracts", + "name": "index_in_block", "ordinal": 12, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "eth_prove_tx_id", + "name": "error", "ordinal": 13, - "type_info": "Int4" + "type_info": "Varchar" }, { - "name": "eth_commit_tx_id", + "name": "gas_limit", "ordinal": 14, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "eth_execute_tx_id", + "name": "gas_per_storage_limit", "ordinal": 15, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "created_at", + "name": "gas_per_pubdata_limit", "ordinal": 16, - "type_info": "Timestamp" + "type_info": "Numeric" }, { - "name": "updated_at", + "name": "tx_format", "ordinal": 17, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "merkle_root_hash", + "name": "created_at", "ordinal": 18, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_logs", + "name": "updated_at", "ordinal": 19, - "type_info": "ByteaArray" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_messages", + "name": "execution_info", "ordinal": 20, - "type_info": "ByteaArray" + "type_info": "Jsonb" }, { - "name": "predicted_commit_gas_cost", + "name": "contract_address", "ordinal": 21, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "predicted_prove_gas_cost", + "name": "in_mempool", "ordinal": 22, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "predicted_execute_gas_cost", + "name": "l1_block_number", "ordinal": 23, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "initial_bootloader_heap_content", + "name": "value", "ordinal": 24, - "type_info": "Jsonb" + "type_info": "Numeric" }, { - "name": "used_contract_hashes", + "name": "paymaster", "ordinal": 25, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "compressed_initial_writes", + "name": "paymaster_input", "ordinal": 26, "type_info": "Bytea" }, { - "name": "compressed_repeated_writes", + "name": "max_fee_per_gas", "ordinal": 27, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_compressed_messages", + "name": "max_priority_fee_per_gas", "ordinal": 28, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_merkle_root", + "name": "effective_gas_price", "ordinal": 29, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "gas_per_pubdata_byte_in_block", + "name": "miniblock_number", "ordinal": 30, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "rollup_last_leaf_index", + "name": "l1_batch_tx_index", "ordinal": 31, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "zkporter_is_available", + "name": "refunded_gas", "ordinal": 32, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "bootloader_code_hash", + "name": "l1_tx_mint", "ordinal": 33, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "default_aa_code_hash", + "name": "l1_tx_refund_recipient", "ordinal": 34, "type_info": "Bytea" }, { - "name": "base_fee_per_gas", + "name": "block_hash?", "ordinal": 35, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "gas_per_pubdata_limit", + "name": "miniblock_timestamp?", "ordinal": 36, "type_info": "Int8" }, { - "name": "aux_data_hash", + "name": "eth_commit_tx_hash?", "ordinal": 37, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "pass_through_data_hash", + "name": "eth_prove_tx_hash?", "ordinal": 38, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "meta_parameters_hash", + "name": "eth_execute_tx_hash?", "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "type_info": "Text" } ], "nullable": [ false, false, + true, + true, false, - false, - false, - false, + true, + true, + true, false, false, true, @@ -3221,11 +3003,10 @@ true, false, false, - true, - false, - false, false, + true, false, + true, false, false, false, @@ -3234,121 +3015,110 @@ true, true, true, - true, - true, + false, true, true, false, false, - true, - true, - true, false, false, false ], "parameters": { "Left": [ - "Int8" - ] - } - }, - "query": "SELECT * FROM l1_batches WHERE number = $1" - }, - "51faf352f402bf8137db9500d0438849a81334b35dc83b060ebfd956d1d3e791": { - "describe": { - "columns": [ - { - "name": "key", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", "Bytea" ] } }, - "query": "\n SELECT key\n FROM storage_logs\n WHERE address = $1 AND miniblock_number > $2 AND NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $3\n )\n " - }, - "523efb18ba96382c55ee9566b5402f8dd3082ae4a66205a2122eea5961f8b86b": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT COUNT(*) as \"count!\" FROM transactions\n WHERE miniblock_number > $1 AND miniblock_number IS NOT NULL" + "query": "\n SELECT transactions.*, miniblocks.hash as \"block_hash?\",\n miniblocks.timestamp as \"miniblock_timestamp?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " }, - "52602518095b2a45fadab7b76218acb6964b416a103be2a3b37b3dac4a970c14": { + "4a8a5df72c08e9a3423e93be72dd63c38daefd644977685384327689892e68cd": { "describe": { "columns": [ { - "name": "number", + "name": "id", "ordinal": 0, "type_info": "Int8" }, { - "name": "timestamp", + "name": "contract_address", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "hash", + "name": "source_code", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "l1_tx_count", + "name": "contract_name", "ordinal": 3, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "l2_tx_count", + "name": "compiler_zksolc_version", "ordinal": 4, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "base_fee_per_gas", + "name": "optimization_used", "ordinal": 5, - "type_info": "Numeric" + "type_info": "Bool" }, { - "name": "l1_gas_price", + "name": "constructor_arguments", "ordinal": 6, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l2_fair_gas_price", + "name": "status", "ordinal": 7, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "bootloader_code_hash", + "name": "error", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "default_aa_code_hash", + "name": "created_at", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 10, + "type_info": "Timestamp" + }, + { + "name": "compilation_errors", + "ordinal": 11, + "type_info": "Jsonb" + }, + { + "name": "processing_started_at", + "ordinal": 12, + "type_info": "Timestamp" + }, + { + "name": "compiler_solc_version", + "ordinal": 13, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "panic_message", + "ordinal": 15, + "type_info": "Text" + }, + { + "name": "is_system", + "ordinal": 16, + "type_info": "Bool" } ], "nullable": [ @@ -3361,442 +3131,242 @@ false, false, true, - true + false, + false, + true, + true, + false, + false, + true, + false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n bootloader_code_hash, default_aa_code_hash\n FROM miniblocks\n WHERE number = $1\n " + "query": "SELECT * FROM contract_verification_requests\n WHERE status = 'successful'\n ORDER BY id" }, - "541d22a9ffe9c7b31833f203af0820cca4513d7a9e6feed7313757674c30e667": { + "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { "describe": { "columns": [ { - "name": "address", + "name": "hash", "ordinal": 0, "type_info": "Bytea" - }, - { - "name": "key", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "value", - "ordinal": 2, - "type_info": "Bytea" } ], "nullable": [ - false, - false, false ], "parameters": { "Left": [ - "Int8", - "Int8" + "Interval" ] } }, - "query": "\n SELECT address, key, value FROM storage_logs\n WHERE miniblock_number >= $1 AND miniblock_number <= $2\n ORDER BY miniblock_number, operation_number ASC\n " + "query": "DELETE FROM transactions WHERE miniblock_number IS NULL AND received_at < now() - $1::interval AND is_priority=false AND error IS NULL RETURNING hash" }, - "5543380548ce40063d43c1d54e368c7d385800d7ade9e720306808cc4c376978": { + "4ac92a8436108097a32e94e53f7fe99261c7c3a40dbc433c20ccea3a7d06650c": { "describe": { "columns": [ { - "name": "number", + "name": "hashed_key", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "value!", "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "is_finished", - "ordinal": 2, - "type_info": "Bool" - }, - { - "name": "l1_tx_count", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "fee_account_address", - "ordinal": 5, - "type_info": "Bytea" - }, - { - "name": "bloom", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "priority_ops_onchain_data", - "ordinal": 7, - "type_info": "ByteaArray" - }, - { - "name": "hash", - "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "parent_hash", - "ordinal": 9, - "type_info": "Bytea" - }, - { - "name": "commitment", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "compressed_write_logs", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" - }, - { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "compressed_repeated_writes", - "ordinal": 27, "type_info": "Bytea" - }, - { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" - }, - { - "name": "l2_l1_merkle_root", - "ordinal": 29, - "type_info": "Bytea" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" - }, - { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" - }, - { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" - }, - { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" - }, + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "SELECT hashed_key, value as \"value!\" FROM storage WHERE hashed_key = ANY($1)" + }, + "4acb725974d006c388be8965c3dff2e4c538ab8d2366addb3fb8cff3b789f114": { + "describe": { + "columns": [ { - "name": "gas_per_pubdata_limit", - "ordinal": 36, + "name": "count!", + "ordinal": 0, "type_info": "Int8" - }, - { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1" + }, + "4bab972cbbd8b53237a840ba9307079705bd4b5270428d2b41f05ee3d2aa42af": { + "describe": { + "columns": [ { - "name": "l1_gas_price", - "ordinal": 41, + "name": "l1_batch_number!", + "ordinal": 0, "type_info": "Int8" }, { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "name": "circuit_type", + "ordinal": 1, + "type_info": "Text" } ], "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - true, - true, - false, - false, + null, false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "SELECT * FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT $1" + "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number!\", circuit_type\n FROM prover_jobs\n WHERE aggregation_round = 0 AND (status = 'queued' OR status = 'in_progress'\n OR status = 'in_gpu_proof'\n OR status = 'failed')\n GROUP BY circuit_type\n " }, - "55ae3cf154fe027f9036c60d21b5fd32972fbb2b17a74562d7721ec69dd19971": { + "4c0d2aa6e08f3b4748b88cad5cf7b3a9eb9c051e8e8e747a3c38c1b37ce3a6b7": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea" + "Int8" ] } }, - "query": "delete from storage where hashed_key = $1" + "query": "DELETE FROM l2_to_l1_logs WHERE miniblock_number > $1" }, - "57742ed088179b89b50920a2ab1a103b745598ee0ba05d1793fc54e63b477319": { + "4ca0356959e4cc50e09b6fe08e9d45cbd929601935506acbbade4a42c2eaea89": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int4", "Int8", - "Int8" + "Bytea", + "Text" ] } }, - "query": "UPDATE l1_batches SET eth_commit_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + "query": "\n INSERT INTO scheduler_witness_jobs\n (l1_batch_number, scheduler_witness, scheduler_witness_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, 'waiting_for_artifacts', now(), now())\n " }, - "57b4e8fb728f1e90dc5ed80c1493471f8e9eff828c99eadc531b28a068ade83e": { + "4d7b5a423b29ce07bd12f168d1ee707e6e413d9a4f0daafb4beed102d22d1745": { "describe": { "columns": [ { - "name": "count!", + "name": "address", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "circuit_type!", + "name": "key", "ordinal": 1, - "type_info": "Text" - }, - { - "name": "status!", - "ordinal": 2, - "type_info": "Text" + "type_info": "Bytea" } ], "nullable": [ - null, false, false ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] } }, - "query": "\n SELECT COUNT(*) as \"count!\", circuit_type as \"circuit_type!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY circuit_type, status\n " + "query": "\n SELECT address, key FROM protective_reads\n WHERE l1_batch_number = $1\n " }, - "580d973b404123108e8e8b27cd754f108a289e1556da10a466e4c795fbd23ddf": { + "4dc63a4431062cb1ae428db625251a6121c3aa2fc06e045ae07b3db6d2f66406": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int4", - "Int4" + "Int8Array" ] } }, - "query": "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now()\n WHERE id = $1 AND sent_at_block IS NULL" + "query": "\n UPDATE witness_inputs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " }, - "59b10abd699d19cbdf285334162ee40f294c5fad8f99fc00a4cdb3b233a494d6": { + "4e2b733fea9ca7cef542602fcd80acf1a9d2e0f1e22566f1076c4837e3ac7e61": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic2!", + "name": "instance_host", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Inet" }, { - "name": "topic3!", + "name": "instance_port", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "value!", + "name": "instance_status", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "l1_address!", + "name": "created_at", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_address!", + "name": "updated_at", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "symbol!", + "name": "processing_started_at", "ordinal": 6, - "type_info": "Varchar" + "type_info": "Timestamp" }, { - "name": "name!", + "name": "queue_free_slots", "ordinal": 7, - "type_info": "Varchar" + "type_info": "Int4" + }, + { + "name": "queue_capacity", + "ordinal": 8, + "type_info": "Int4" + }, + { + "name": "specialized_prover_group_id", + "ordinal": 9, + "type_info": "Int2" + }, + { + "name": "region", + "ordinal": 10, + "type_info": "Text" }, { - "name": "decimals!", - "ordinal": 8, - "type_info": "Int4" + "name": "zone", + "ordinal": 11, + "type_info": "Text" }, { - "name": "usd_price?", - "ordinal": 9, - "type_info": "Numeric" + "name": "num_gpu", + "ordinal": 12, + "type_info": "Int2" } ], "nullable": [ @@ -3806,206 +3376,356 @@ false, false, false, - false, + true, + true, + true, + true, false, false, true ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", - "Bytea" + "Interval", + "Int2", + "Text", + "Text" ] } }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n events.topic4 = ('\\x000000000000000000000000'::bytea || tokens.l2_address)\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE id in (\n SELECT id\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND region=$3\n AND zone=$4\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " }, - "5a47a5a007ca26497d7015f21d2bf31785ec8e061dbd4c3e847c3b53b40269c4": { + "5049eaa4b2050312d13a02c06e87f96548a299894d0f0b268d4e91d49c536cb6": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Jsonb", - "Int8", - "Numeric", - "Numeric", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Timestamp" + "ByteaArray", + "Int4Array", + "ByteaArray", + "ByteaArray", + "NumericArray", + "NumericArray", + "NumericArray", + "NumericArray", + "Int4Array", + "Int4Array", + "VarcharArray", + "NumericArray", + "JsonbArray", + "ByteaArray", + "JsonbArray", + "Int8Array", + "NumericArray", + "ByteaArray", + "ByteaArray", + "ByteaArray", + "Int8" ] } }, - "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, $17, $18, now(), now()\n )\n " + "query": "\n UPDATE transactions\n SET \n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::int[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::numeric[]) AS gas_limit,\n UNNEST($6::numeric[]) AS max_fee_per_gas,\n UNNEST($7::numeric[]) AS max_priority_fee_per_gas,\n UNNEST($8::numeric[]) AS gas_per_pubdata_limit,\n UNNEST($9::int[]) AS tx_format,\n UNNEST($10::integer[]) AS index_in_block,\n UNNEST($11::varchar[]) AS error,\n UNNEST($12::numeric[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::bigint[]) as refunded_gas,\n UNNEST($17::numeric[]) as value,\n UNNEST($18::bytea[]) as contract_address,\n UNNEST($19::bytea[]) as paymaster,\n UNNEST($20::bytea[]) as paymaster_input\n ) AS data_table\n WHERE transactions.initiator_address=data_table.initiator_address \n AND transactions.nonce=data_table.nonce\n " }, - "5a5844af61cc685a414fcd3cad70900bdce8f48e905c105f8dd50dc52e0c6f14": { + "5089dfb745ff04a9b071b5785e68194a6f6a7a72754d23a65adc7d6838f7f640": { "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "attempts", - "ordinal": 1, - "type_info": "Int4" - } - ], - "nullable": [ - false, - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Text", - "Int8" + "Int4" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n RETURNING l1_batch_number, attempts\n " + "query": "UPDATE eth_txs SET has_failed = TRUE WHERE id = $1" }, - "5aaec6df0337db49524e7dafd145334bfe66ca886f922d559d6f5484137003fd": { + "50f406ffe7802e753411baa0e348294bdb05c96b96b2041ee876e2b34a1a6ea6": { "describe": { "columns": [ { - "name": "id", + "name": "count!", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ - false + null ], "parameters": { "Left": [ - "Int4", - "Int8", "Int8", - "Text", + "Bytea", + "Bytea", + "Bytea", "Bytea" ] } }, - "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n RETURNING id" + "query": "\n SELECT COUNT(*) as \"count!\"\n FROM l1_batches\n WHERE number = $1\n AND hash = $2\n AND merkle_root_hash = $3\n AND parent_hash = $4\n AND l2_l1_merkle_root = $5\n " }, - "5ac872e2c5a00b376cc053324b3776ef6a0bb7f6850e5a24a133dfee052c49e1": { + "516e309a97010cd1eb8398b2b7ff809786703c075e4c3dff1133c41cdcfdd3f3": { "describe": { "columns": [ { - "name": "value", + "name": "number", "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "is_finished", + "ordinal": 2, + "type_info": "Bool" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "fee_account_address", + "ordinal": 5, "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT value FROM storage WHERE hashed_key = $1" - }, - "5b45825b92d6971d8b2fbad6eb68d24e1c666a54cbf1ceb1332e2039f9614d18": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "TextArray" - ] - } - }, - "query": "\n INSERT INTO contract_verification_zksolc_versions (version, created_at, updated_at)\n SELECT u.version, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)\n " - }, - "5b85d8bdf297f55e65978edda4a0966ded1dc0d24f4701e7b6048124f38b4cea": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "Int8" - ] - } - }, - "query": "INSERT INTO factory_deps\n (bytecode_hash, bytecode, miniblock_number, created_at, updated_at)\n SELECT u.bytecode_hash, u.bytecode, $3, now(), now()\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(bytecode_hash, bytecode)\n ON CONFLICT (bytecode_hash) DO NOTHING\n " - }, - "5bc8a41ae0f255b966df2102f1bd9059d55833e0afaf6e62c7ddcc9c06de8deb": { - "describe": { - "columns": [ + }, + { + "name": "bloom", + "ordinal": 6, + "type_info": "Bytea" + }, + { + "name": "priority_ops_onchain_data", + "ordinal": 7, + "type_info": "ByteaArray" + }, + { + "name": "hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "parent_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "commitment", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, + { + "name": "eth_prove_tx_id", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 16, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 17, + "type_info": "Timestamp" + }, + { + "name": "merkle_root_hash", + "ordinal": 18, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" + }, + { + "name": "predicted_commit_gas_cost", + "ordinal": 21, + "type_info": "Int8" + }, + { + "name": "predicted_prove_gas_cost", + "ordinal": 22, + "type_info": "Int8" + }, { - "name": "l1_batch_number!", - "ordinal": 0, + "name": "predicted_execute_gas_cost", + "ordinal": 23, "type_info": "Int8" }, { - "name": "aggregation_round", - "ordinal": 1, + "name": "initial_bootloader_heap_content", + "ordinal": 24, + "type_info": "Jsonb" + }, + { + "name": "used_contract_hashes", + "ordinal": 25, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 26, + "type_info": "Bytea" + }, + { + "name": "compressed_repeated_writes", + "ordinal": 27, + "type_info": "Bytea" + }, + { + "name": "l2_l1_compressed_messages", + "ordinal": 28, + "type_info": "Bytea" + }, + { + "name": "l2_l1_merkle_root", + "ordinal": 29, + "type_info": "Bytea" + }, + { + "name": "gas_per_pubdata_byte_in_block", + "ordinal": 30, "type_info": "Int4" - } - ], - "nullable": [ - null, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(l1_batch_number) as \"l1_batch_number!\", aggregation_round FROM prover_jobs \n WHERE status='successful'\n GROUP BY aggregation_round \n " - }, - "5bc8cdc7ed710bb2f9b0035654fd7e9dcc01731ca581c6aa75d55184817bc100": { - "describe": { - "columns": [ + }, { - "name": "number", - "ordinal": 0, + "name": "rollup_last_leaf_index", + "ordinal": 31, "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE hash IS NOT NULL" - }, - "5d1c3357b97f5e40a7e9d6fdcb7c3ebd8309e93f26e1c42d6371190f4aeaf8c6": { - "describe": { - "columns": [ + }, { - "name": "min?", - "ordinal": 0, + "name": "zkporter_is_available", + "ordinal": 32, + "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 33, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 34, + "type_info": "Bytea" + }, + { + "name": "base_fee_per_gas", + "ordinal": 35, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 36, "type_info": "Int8" }, { - "name": "max?", - "ordinal": 1, + "name": "aux_data_hash", + "ordinal": 37, + "type_info": "Bytea" + }, + { + "name": "pass_through_data_hash", + "ordinal": 38, + "type_info": "Bytea" + }, + { + "name": "meta_parameters_hash", + "ordinal": 39, + "type_info": "Bytea" + }, + { + "name": "skip_proof", + "ordinal": 40, + "type_info": "Bool" + }, + { + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 42, "type_info": "Int8" } ], "nullable": [ - null, - null + false, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + true, + true, + false, + false, + false ], "parameters": { "Left": [ @@ -4013,119 +3733,116 @@ ] } }, - "query": "\n SELECT MIN(miniblocks.number) as \"min?\", MAX(miniblocks.number) as \"max?\"\n FROM miniblocks\n WHERE l1_batch_number = $1\n " + "query": "SELECT * FROM l1_batches WHERE number = $1" }, - "5e09f2359dd69380c1f183f613d82696029a56896e2b985738a2fa25d6cb8a71": { + "52602518095b2a45fadab7b76218acb6964b416a103be2a3b37b3dac4a970c14": { "describe": { "columns": [ { - "name": "op_id", + "name": "number", "ordinal": 0, "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true" - }, - "5f5974e7033eea82896a435c7776a6740f4a2df77175744a9670d3fee2f24b32": { - "describe": { - "columns": [ - { - "name": "address", - "ordinal": 0, - "type_info": "Bytea" }, { - "name": "topic1", + "name": "timestamp", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic2", + "name": "hash", "ordinal": 2, "type_info": "Bytea" }, { - "name": "topic3", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic4", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "value", + "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "block_hash", + "name": "l1_gas_price", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "l1_batch_number?", + "name": "l2_fair_gas_price", "ordinal": 7, "type_info": "Int8" }, { - "name": "miniblock_number", + "name": "bootloader_code_hash", "ordinal": 8, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "tx_hash", - "ordinal": 9, + "name": "default_aa_code_hash", + "ordinal": 9, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n bootloader_code_hash, default_aa_code_hash\n FROM miniblocks\n WHERE number = $1\n " + }, + "541d22a9ffe9c7b31833f203af0820cca4513d7a9e6feed7313757674c30e667": { + "describe": { + "columns": [ + { + "name": "address", + "ordinal": 0, "type_info": "Bytea" }, { - "name": "tx_index_in_block", - "ordinal": 10, - "type_info": "Int4" - }, - { - "name": "event_index_in_block", - "ordinal": 11, - "type_info": "Int4" + "name": "key", + "ordinal": 1, + "type_info": "Bytea" }, { - "name": "event_index_in_tx", - "ordinal": 12, - "type_info": "Int4" + "name": "value", + "ordinal": 2, + "type_info": "Bytea" } ], "nullable": [ - false, - false, - false, - false, - false, - false, - null, - null, - false, - false, false, false, false ], "parameters": { "Left": [ - "Bytea" + "Int8", + "Int8" ] } }, - "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " + "query": "\n SELECT address, key, value FROM storage_logs\n WHERE miniblock_number >= $1 AND miniblock_number <= $2\n ORDER BY miniblock_number, operation_number ASC\n " }, - "604b41258da640307989571e014e8ccb4f457bba0caedcb42dc1065fc90f7950": { + "5543380548ce40063d43c1d54e368c7d385800d7ade9e720306808cc4c376978": { "describe": { "columns": [ { @@ -4391,203 +4108,333 @@ ], "parameters": { "Left": [ - "Bytea", - "Bytea", "Int8" ] } }, - "query": "SELECT * FROM l1_batches\n WHERE eth_commit_tx_id IS NULL\n AND number != 0\n AND bootloader_code_hash = $1 AND default_aa_code_hash = $2\n AND commitment IS NOT NULL\n ORDER BY number LIMIT $3" + "query": "SELECT * FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT $1" }, - "61f4f5ef369b2435732af17091493876301e3e59b68d6817fe0053c7da89291e": { + "55ae3cf154fe027f9036c60d21b5fd32972fbb2b17a74562d7721ec69dd19971": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "delete from storage where hashed_key = $1" + }, + "55debba852ef32f3b5ba6ffcb745f7b59d6888a21cb8792f8f9027e3b164a245": { "describe": { "columns": [ { - "name": "max_nonce?", + "name": "region", "ordinal": 0, + "type_info": "Text" + }, + { + "name": "zone", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "total_gpus", + "ordinal": 2, "type_info": "Int8" } ], "nullable": [ + false, + false, null ], "parameters": { "Left": [] } }, - "query": "SELECT MAX(nonce) as \"max_nonce?\" FROM eth_txs" - }, - "62e8b4afd4df9e30bfa08cb30c74ba4566fa2e9f4934b7a2777f9e90b49e8fce": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "DELETE FROM eth_txs_history\n WHERE id = $1" + "query": "\n SELECT region, zone, SUM(num_gpu) AS total_gpus\n FROM gpu_prover_queue\n GROUP BY region, zone\n " }, - "63616acc2c415f4c8d650a96fd5481a609436a94666d65363eb06808da8da4b8": { + "57742ed088179b89b50920a2ab1a103b745598ee0ba05d1793fc54e63b477319": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ + "Int4", + "Int8", "Int8" ] } }, - "query": "UPDATE transactions\n SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}'\n WHERE miniblock_number > $1" + "query": "UPDATE l1_batches SET eth_commit_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" }, - "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { + "57b4e8fb728f1e90dc5ed80c1493471f8e9eff828c99eadc531b28a068ade83e": { "describe": { "columns": [ { - "name": "status", + "name": "count!", "ordinal": 0, - "type_info": "Text" + "type_info": "Int8" }, { - "name": "error", + "name": "circuit_type!", "ordinal": 1, "type_info": "Text" }, { - "name": "compilation_errors", + "name": "status!", "ordinal": 2, - "type_info": "Jsonb" + "type_info": "Text" } ], "nullable": [ + null, false, - true, - true + false ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT COUNT(*) as \"count!\", circuit_type as \"circuit_type!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY circuit_type, status\n " + }, + "580d973b404123108e8e8b27cd754f108a289e1556da10a466e4c795fbd23ddf": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Int4", + "Int4" ] } }, - "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " + "query": "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now()\n WHERE id = $1 AND sent_at_block IS NULL" }, - "65bf55ff4ac5c4ac60bedd7c5b39d82f6e8793859749a7b6ab56121f623ed840": { + "59b10abd699d19cbdf285334162ee40f294c5fad8f99fc00a4cdb3b233a494d6": { "describe": { "columns": [ { - "name": "number", + "name": "tx_hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "commit_gas?", + "name": "topic2!", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "commit_base_gas_price?", + "name": "topic3!", "ordinal": 2, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "commit_priority_gas_price?", + "name": "value!", "ordinal": 3, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "prove_gas?", + "name": "l1_address!", "ordinal": 4, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "prove_base_gas_price?", + "name": "l2_address!", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "prove_priority_gas_price?", + "name": "symbol!", "ordinal": 6, - "type_info": "Int8" + "type_info": "Varchar" }, { - "name": "execute_gas?", + "name": "name!", "ordinal": 7, + "type_info": "Varchar" + }, + { + "name": "decimals!", + "ordinal": 8, + "type_info": "Int4" + }, + { + "name": "usd_price?", + "ordinal": 9, + "type_info": "Numeric" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + true + ], + "parameters": { + "Left": [ + "ByteaArray", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n events.topic4 = ('\\x000000000000000000000000'::bytea || tokens.l2_address)\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + }, + "5a5844af61cc685a414fcd3cad70900bdce8f48e905c105f8dd50dc52e0c6f14": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, "type_info": "Int8" }, { - "name": "execute_base_gas_price?", - "ordinal": 8, + "name": "attempts", + "ordinal": 1, + "type_info": "Int4" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE prover_jobs\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n RETURNING l1_batch_number, attempts\n " + }, + "5ac872e2c5a00b376cc053324b3776ef6a0bb7f6850e5a24a133dfee052c49e1": { + "describe": { + "columns": [ + { + "name": "value", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT value FROM storage WHERE hashed_key = $1" + }, + "5b45825b92d6971d8b2fbad6eb68d24e1c666a54cbf1ceb1332e2039f9614d18": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "TextArray" + ] + } + }, + "query": "\n INSERT INTO contract_verification_zksolc_versions (version, created_at, updated_at)\n SELECT u.version, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)\n " + }, + "5b85d8bdf297f55e65978edda4a0966ded1dc0d24f4701e7b6048124f38b4cea": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray", + "Int8" + ] + } + }, + "query": "INSERT INTO factory_deps\n (bytecode_hash, bytecode, miniblock_number, created_at, updated_at)\n SELECT u.bytecode_hash, u.bytecode, $3, now(), now()\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(bytecode_hash, bytecode)\n ON CONFLICT (bytecode_hash) DO NOTHING\n " + }, + "5bc8a41ae0f255b966df2102f1bd9059d55833e0afaf6e62c7ddcc9c06de8deb": { + "describe": { + "columns": [ + { + "name": "l1_batch_number!", + "ordinal": 0, "type_info": "Int8" }, { - "name": "execute_priority_gas_price?", - "ordinal": 9, - "type_info": "Int8" + "name": "aggregation_round", + "ordinal": 1, + "type_info": "Int4" } ], "nullable": [ - false, - true, - false, - false, - true, - false, - false, - true, - false, + null, false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT\n l1_batches.number,\n commit_tx_data.gas_used as \"commit_gas?\",\n commit_tx.base_fee_per_gas as \"commit_base_gas_price?\",\n commit_tx.priority_fee_per_gas as \"commit_priority_gas_price?\",\n prove_tx_data.gas_used as \"prove_gas?\",\n prove_tx.base_fee_per_gas as \"prove_base_gas_price?\",\n prove_tx.priority_fee_per_gas as \"prove_priority_gas_price?\",\n execute_tx_data.gas_used as \"execute_gas?\",\n execute_tx.base_fee_per_gas as \"execute_base_gas_price?\",\n execute_tx.priority_fee_per_gas as \"execute_priority_gas_price?\"\n FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx\n ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as commit_tx_data\n ON (l1_batches.eth_commit_tx_id = commit_tx_data.id)\n LEFT JOIN eth_txs_history as prove_tx\n ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as prove_tx_data\n ON (l1_batches.eth_prove_tx_id = prove_tx_data.id)\n LEFT JOIN eth_txs_history as execute_tx\n ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as execute_tx_data\n ON (l1_batches.eth_execute_tx_id = execute_tx_data.id)\n WHERE l1_batches.number = $1\n " + "query": "SELECT MAX(l1_batch_number) as \"l1_batch_number!\", aggregation_round FROM prover_jobs \n WHERE status='successful'\n GROUP BY aggregation_round \n " }, - "66a3761aec92aa8794e55ddd8299879e915e8ef84f8be9ebca9881c77438d2c8": { + "5bc8cdc7ed710bb2f9b0035654fd7e9dcc01731ca581c6aa75d55184817bc100": { "describe": { "columns": [ { - "name": "value", + "name": "number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ - false + null ], "parameters": { - "Left": [ - "Bytea", - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT value FROM storage_logs\n WHERE hashed_key = $1 AND miniblock_number <= $2\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n " + "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE hash IS NOT NULL" }, - "67a47f1e7d5f8dafcef94bea3f268b4baec1888c6ef11c92ab66480ecdcb9aef": { + "5d1c3357b97f5e40a7e9d6fdcb7c3ebd8309e93f26e1c42d6371190f4aeaf8c6": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "min?", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "max?", + "ordinal": 1, + "type_info": "Int8" + } + ], + "nullable": [ + null, + null + ], "parameters": { "Left": [ - "Time", - "Bytea", - "Text", "Int8" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'successful', updated_at = now(), time_taken = $1, result = $2, proccesed_by = $3\n WHERE id = $4\n " + "query": "\n SELECT MIN(miniblocks.number) as \"min?\", MAX(miniblocks.number) as \"max?\"\n FROM miniblocks\n WHERE l1_batch_number = $1\n " }, - "67b861c97d16bf99a2d305c100116cbcb0334473c4462e4164436885481197fb": { + "5e09f2359dd69380c1f183f613d82696029a56896e2b985738a2fa25d6cb8a71": { "describe": { "columns": [ { - "name": "total_transactions!", + "name": "op_id", "ordinal": 0, "type_info": "Int8" } @@ -4596,34 +4443,104 @@ null ], "parameters": { - "Left": [ - "Bytea" - ] + "Left": [] } }, - "query": "\n SELECT COUNT(*) as \"total_transactions!\"\n FROM transactions\n WHERE contract_address = $1\n " + "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true" }, - "67ecdc69e39e689f1f23f867d31e6b8c47e9c041e18cbd84a2ad6482a9be4e74": { + "5f5974e7033eea82896a435c7776a6740f4a2df77175744a9670d3fee2f24b32": { "describe": { "columns": [ { - "name": "l2_to_l1_logs", + "name": "address", "ordinal": 0, - "type_info": "ByteaArray" + "type_info": "Bytea" + }, + { + "name": "topic1", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "topic2", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "topic3", + "ordinal": 3, + "type_info": "Bytea" + }, + { + "name": "topic4", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "value", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "block_hash", + "ordinal": 6, + "type_info": "Bytea" + }, + { + "name": "l1_batch_number?", + "ordinal": 7, + "type_info": "Int8" + }, + { + "name": "miniblock_number", + "ordinal": 8, + "type_info": "Int8" + }, + { + "name": "tx_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "tx_index_in_block", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "event_index_in_block", + "ordinal": 11, + "type_info": "Int4" + }, + { + "name": "event_index_in_tx", + "ordinal": 12, + "type_info": "Int4" } ], "nullable": [ + false, + false, + false, + false, + false, + false, + null, + null, + false, + false, + false, + false, false ], "parameters": { "Left": [ - "Int8" + "Bytea" ] } }, - "query": "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1" + "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, - "69c3e2cfece5cb9f6989f5cbbea36af2a92addcdb41082541ea41b46fdd0ea1f": { + "604b41258da640307989571e014e8ccb4f457bba0caedcb42dc1065fc90f7950": { "describe": { "columns": [ { @@ -4889,149 +4806,217 @@ ], "parameters": { "Left": [ - "Float8", + "Bytea", + "Bytea", "Int8" ] } }, - "query": "SELECT l1_batches.* FROM l1_batches JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT(epoch from commit_tx.confirmed_at) < $1 ORDER BY number LIMIT $2" + "query": "SELECT * FROM l1_batches\n WHERE eth_commit_tx_id IS NULL\n AND number != 0\n AND bootloader_code_hash = $1 AND default_aa_code_hash = $2\n AND commitment IS NOT NULL\n ORDER BY number LIMIT $3" }, - "6ae4738857a3dc19860b8dc61b75790dee0030d84438bcc311e917cb1a076289": { + "61f4f5ef369b2435732af17091493876301e3e59b68d6817fe0053c7da89291e": { "describe": { "columns": [ { - "name": "proof", + "name": "max_nonce?", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT MAX(nonce) as \"max_nonce?\" FROM eth_txs" + }, + "62e8b4afd4df9e30bfa08cb30c74ba4566fa2e9f4934b7a2777f9e90b49e8fce": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "DELETE FROM eth_txs_history\n WHERE id = $1" + }, + "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { + "describe": { + "columns": [ + { + "name": "status", + "ordinal": 0, + "type_info": "Text" }, { - "name": "aggregation_result_coords", + "name": "error", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Text" + }, + { + "name": "compilation_errors", + "ordinal": 2, + "type_info": "Jsonb" } ], "nullable": [ + false, true, true ], "parameters": { "Left": [ - "Int8", "Int8" ] } }, - "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n AND scheduler_witness_jobs.status = 'successful'\n " + "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " }, - "6c81c5a55d595d0790ac20ca202ff3083b0677c47872f2eb1c65e568dd7c156a": { + "65bf55ff4ac5c4ac60bedd7c5b39d82f6e8793859749a7b6ab56121f623ed840": { "describe": { "columns": [ { - "name": "miniblock_number", + "name": "number", "ordinal": 0, "type_info": "Int8" }, { - "name": "log_index_in_miniblock", + "name": "commit_gas?", "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "log_index_in_tx", + "name": "commit_base_gas_price?", "ordinal": 2, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "tx_hash", + "name": "commit_priority_gas_price?", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "block_hash", + "name": "prove_gas?", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "l1_batch_number?", + "name": "prove_base_gas_price?", "ordinal": 5, "type_info": "Int8" }, { - "name": "shard_id", + "name": "prove_priority_gas_price?", "ordinal": 6, - "type_info": "Int4" - }, - { - "name": "is_service", - "ordinal": 7, - "type_info": "Bool" - }, - { - "name": "tx_index_in_miniblock", - "ordinal": 8, - "type_info": "Int4" - }, - { - "name": "tx_index_in_l1_batch", - "ordinal": 9, - "type_info": "Int4" - }, - { - "name": "sender", - "ordinal": 10, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "key", - "ordinal": 11, - "type_info": "Bytea" + "name": "execute_gas?", + "ordinal": 7, + "type_info": "Int8" }, { - "name": "value", - "ordinal": 12, - "type_info": "Bytea" + "name": "execute_base_gas_price?", + "ordinal": 8, + "type_info": "Int8" + }, + { + "name": "execute_priority_gas_price?", + "ordinal": 9, + "type_info": "Int8" } ], "nullable": [ false, + true, false, false, - false, - null, - null, - false, - false, - false, + true, false, false, + true, false, false ], "parameters": { "Left": [ - "Bytea" + "Int8" ] } }, - "query": "\n SELECT\n miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value\n FROM l2_to_l1_logs\n WHERE tx_hash = $1\n ORDER BY log_index_in_tx ASC\n " + "query": "\n SELECT\n l1_batches.number,\n commit_tx_data.gas_used as \"commit_gas?\",\n commit_tx.base_fee_per_gas as \"commit_base_gas_price?\",\n commit_tx.priority_fee_per_gas as \"commit_priority_gas_price?\",\n prove_tx_data.gas_used as \"prove_gas?\",\n prove_tx.base_fee_per_gas as \"prove_base_gas_price?\",\n prove_tx.priority_fee_per_gas as \"prove_priority_gas_price?\",\n execute_tx_data.gas_used as \"execute_gas?\",\n execute_tx.base_fee_per_gas as \"execute_base_gas_price?\",\n execute_tx.priority_fee_per_gas as \"execute_priority_gas_price?\"\n FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx\n ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as commit_tx_data\n ON (l1_batches.eth_commit_tx_id = commit_tx_data.id)\n LEFT JOIN eth_txs_history as prove_tx\n ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as prove_tx_data\n ON (l1_batches.eth_prove_tx_id = prove_tx_data.id)\n LEFT JOIN eth_txs_history as execute_tx\n ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as execute_tx_data\n ON (l1_batches.eth_execute_tx_id = execute_tx_data.id)\n WHERE l1_batches.number = $1\n " }, - "6d923b755e1762ebc499cf2c6d7e894357e7b55f3342be08071e2be183ad2a00": { + "66a3761aec92aa8794e55ddd8299879e915e8ef84f8be9ebca9881c77438d2c8": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "value", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Int8" + ] + } + }, + "query": "\n SELECT value FROM storage_logs\n WHERE hashed_key = $1 AND miniblock_number <= $2\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n " + }, + "67a47f1e7d5f8dafcef94bea3f268b4baec1888c6ef11c92ab66480ecdcb9aef": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", + "Bytea", + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE prover_jobs\n SET status = 'successful', updated_at = now(), time_taken = $1, result = $2, proccesed_by = $3\n WHERE id = $4\n " + }, + "67b861c97d16bf99a2d305c100116cbcb0334473c4462e4164436885481197fb": { + "describe": { + "columns": [ + { + "name": "total_transactions!", "ordinal": 0, "type_info": "Int8" - }, + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "\n SELECT COUNT(*) as \"total_transactions!\"\n FROM transactions\n WHERE contract_address = $1\n " + }, + "67ecdc69e39e689f1f23f867d31e6b8c47e9c041e18cbd84a2ad6482a9be4e74": { + "describe": { + "columns": [ { - "name": "merkel_tree_paths_blob_url", - "ordinal": 1, - "type_info": "Text" + "name": "l2_to_l1_logs", + "ordinal": 0, + "type_info": "ByteaArray" } ], "nullable": [ - false, - true + false ], "parameters": { "Left": [ @@ -5039,13 +5024,13 @@ ] } }, - "query": "\n SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND merkel_tree_paths_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + "query": "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1" }, - "6de96eb86301418de9a4342cd66447afd6eb42759d36e164e36adddbd42e98e2": { + "67efc7ea5bd3821d8325759ed8357190f6122dd2ae503a57faf15d8b749a4361": { "describe": { "columns": [ { - "name": "number", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" } @@ -5057,275 +5042,134 @@ "Left": [] } }, - "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id)\n WHERE execute_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN leaf_aggregation_witness_jobs lawj ON prover_jobs.l1_batch_number = lawj.l1_batch_number\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 0\n GROUP BY prover_jobs.l1_batch_number, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number;\n " }, - "6ebe0d6a315050d72ffead2dd695f0ba1926a3f4a1ed56b3f291d0f41b72c4d4": { + "6ac39e83e446e70a2875624db78a05e56eb35f46e11d0f2fbb2165cda56fbacd": { "describe": { "columns": [ { - "name": "hashed_key!", + "name": "bytecode", "ordinal": 0, "type_info": "Bytea" }, { - "name": "value?", + "name": "data?", "ordinal": 1, + "type_info": "Jsonb" + }, + { + "name": "contract_address?", + "ordinal": 2, "type_info": "Bytea" } ], "nullable": [ - null, - null + false, + false, + true ], "parameters": { "Left": [ - "ByteaArray", - "Int8" + "Bytea", + "Bytea" ] } }, - "query": "\n SELECT u.hashed_key as \"hashed_key!\",\n (SELECT value FROM storage_logs\n WHERE hashed_key = u.hashed_key AND miniblock_number < $2\n ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as \"value?\"\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n " + "query": "\n SELECT factory_deps.bytecode, transactions.data as \"data?\", transactions.contract_address as \"contract_address?\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n LEFT JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " }, - "6f9edffc50202b888d12f80e57a2a346d865e522aa5a02fe3fcfa155406227a4": { + "6c81c5a55d595d0790ac20ca202ff3083b0677c47872f2eb1c65e568dd7c156a": { "describe": { "columns": [ { - "name": "hash", + "name": "miniblock_number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "is_priority", + "name": "log_index_in_miniblock", "ordinal": 1, - "type_info": "Bool" + "type_info": "Int4" }, { - "name": "full_fee", + "name": "log_index_in_tx", "ordinal": 2, - "type_info": "Numeric" + "type_info": "Int4" }, { - "name": "layer_2_tip_fee", + "name": "tx_hash", "ordinal": 3, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "initiator_address", + "name": "block_hash", "ordinal": 4, "type_info": "Bytea" }, { - "name": "nonce", + "name": "l1_batch_number?", "ordinal": 5, "type_info": "Int8" }, { - "name": "signature", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "input", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "priority_op_id", - "ordinal": 10, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "index_in_block", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" - }, - { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" - }, - { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" - }, - { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 18, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 19, - "type_info": "Timestamp" - }, - { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" - }, - { - "name": "in_mempool", - "ordinal": 22, - "type_info": "Bool" - }, - { - "name": "l1_block_number", - "ordinal": 23, - "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, - { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" - }, - { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" + "name": "shard_id", + "ordinal": 6, + "type_info": "Int4" }, { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" + "name": "is_service", + "ordinal": 7, + "type_info": "Bool" }, { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" + "name": "tx_index_in_miniblock", + "ordinal": 8, + "type_info": "Int4" }, { - "name": "l1_batch_tx_index", - "ordinal": 31, + "name": "tx_index_in_l1_batch", + "ordinal": 9, "type_info": "Int4" }, { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" + "name": "sender", + "ordinal": 10, + "type_info": "Bytea" }, { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" + "name": "key", + "ordinal": 11, + "type_info": "Bytea" }, { - "name": "l1_tx_refund_recipient", - "ordinal": 34, + "name": "value", + "ordinal": 12, "type_info": "Bytea" } ], "nullable": [ false, false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, false, false, + null, + null, false, - true, false, - true, false, false, false, - true, - true, - true, - true, - true, false, - true, - true + false ], "parameters": { "Left": [ - "Int8", - "Numeric", - "Numeric" - ] - } - }, - "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n ) as subquery\n WHERE transactions.hash = subquery.hash\n RETURNING transactions.*" - }, - "71df95e25f719ed9bc32622b33c1da0aad14c6ad1a96f25454ce8618470c2ea3": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "Int8" + "Bytea" ] } }, - "query": "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at)\n SELECT u.hashed_key, $2, now(), now()\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n ON CONFLICT (hashed_key) DO NOTHING\n " + "query": "\n SELECT\n miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value\n FROM l2_to_l1_logs\n WHERE tx_hash = $1\n ORDER BY log_index_in_tx ASC\n " }, - "721367902328f9e2e5f8a99820b11d230c60553db366fc76f97c5680470bece8": { + "6d923b755e1762ebc499cf2c6d7e894357e7b55f3342be08071e2be183ad2a00": { "describe": { "columns": [ { @@ -5334,19 +5178,13 @@ "type_info": "Int8" }, { - "name": "basic_circuits_blob_url", + "name": "merkel_tree_paths_blob_url", "ordinal": 1, "type_info": "Text" - }, - { - "name": "basic_circuits_inputs_blob_url", - "ordinal": 2, - "type_info": "Text" } ], "nullable": [ false, - true, true ], "parameters": { @@ -5355,31 +5193,54 @@ ] } }, - "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + "query": "\n SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND merkel_tree_paths_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, - "7229ddaadb494c5723946a1e917840eb6035b7d0923518aac7ba2fb81c711d7b": { + "6de96eb86301418de9a4342cd66447afd6eb42759d36e164e36adddbd42e98e2": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id)\n WHERE execute_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" + }, + "6ebe0d6a315050d72ffead2dd695f0ba1926a3f4a1ed56b3f291d0f41b72c4d4": { + "describe": { + "columns": [ + { + "name": "hashed_key!", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "value?", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + null, + null + ], "parameters": { "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea" + "ByteaArray", + "Int8" ] } }, - "query": "\n INSERT INTO miniblocks (\n number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, \n bootloader_code_hash, default_aa_code_hash,\n created_at, updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now())\n " + "query": "\n SELECT u.hashed_key as \"hashed_key!\",\n (SELECT value FROM storage_logs\n WHERE hashed_key = u.hashed_key AND miniblock_number < $2\n ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as \"value?\"\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n " }, - "734fc9cc1ffe10a6c6b56150c0681b6b2757d14b2ea04a289abb1de64dffb172": { + "6f9edffc50202b888d12f80e57a2a346d865e522aa5a02fe3fcfa155406227a4": { "describe": { "columns": [ { @@ -5556,26 +5417,6 @@ "name": "l1_tx_refund_recipient", "ordinal": 34, "type_info": "Bytea" - }, - { - "name": "block_hash?", - "ordinal": 35, - "type_info": "Bytea" - }, - { - "name": "eth_commit_tx_hash?", - "ordinal": 36, - "type_info": "Text" - }, - { - "name": "eth_prove_tx_hash?", - "ordinal": 37, - "type_info": "Text" - }, - { - "name": "eth_execute_tx_hash?", - "ordinal": 38, - "type_info": "Text" } ], "nullable": [ @@ -5613,136 +5454,84 @@ true, false, true, - true, - false, - false, - false, - false + true ], "parameters": { "Left": [ - "Bytea" + "Int8", + "Numeric", + "Numeric" ] } }, - "query": "\n SELECT transactions.*, miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " + "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n ) as subquery\n WHERE transactions.hash = subquery.hash\n RETURNING transactions.*" }, - "75273db544f363b2c75bb7b579ba72fbf9447dd76182159edc40a48b32a9f738": { + "71df95e25f719ed9bc32622b33c1da0aad14c6ad1a96f25454ce8618470c2ea3": { "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "circuit_type", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "prover_input", - "ordinal": 3, - "type_info": "Bytea" - }, - { - "name": "status", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "error", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "processing_started_at", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "created_at", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 8, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 9, - "type_info": "Time" - }, - { - "name": "aggregation_round", - "ordinal": 10, - "type_info": "Int4" - }, - { - "name": "result", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "sequence_number", - "ordinal": 12, - "type_info": "Int4" - }, + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int8" + ] + } + }, + "query": "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at)\n SELECT u.hashed_key, $2, now(), now()\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n ON CONFLICT (hashed_key) DO NOTHING\n " + }, + "721367902328f9e2e5f8a99820b11d230c60553db366fc76f97c5680470bece8": { + "describe": { + "columns": [ { - "name": "attempts", - "ordinal": 13, - "type_info": "Int4" + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "circuit_input_blob_url", - "ordinal": 14, + "name": "basic_circuits_blob_url", + "ordinal": 1, "type_info": "Text" }, { - "name": "proccesed_by", - "ordinal": 15, + "name": "basic_circuits_inputs_blob_url", + "ordinal": 2, "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 16, - "type_info": "Bool" } ], "nullable": [ - false, - false, - false, - false, - false, - true, - true, - false, - false, - false, - false, - true, - false, false, true, - true, - false + true ], "parameters": { "Left": [ - "Interval", - "Int4" + "Int8" + ] + } + }, + "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + }, + "7229ddaadb494c5723946a1e917840eb6035b7d0923518aac7ba2fb81c711d7b": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued' \n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'in_gpu_proof' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " + "query": "\n INSERT INTO miniblocks (\n number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, \n bootloader_code_hash, default_aa_code_hash,\n created_at, updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now())\n " }, "766119f845a7a11b6a5bb2a29bab32e2890df772b13e1a378222e089736fd3bf": { "describe": { @@ -5762,6 +5551,24 @@ }, "query": "SELECT COALESCE(max(number), 0) as \"number!\" FROM l1_batches\n WHERE eth_prove_tx_id IS NOT NULL" }, + "769c021b51b9aaafdf27b4019834729047702b17b0684f7271eecd6ffdf96e7c": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE scheduler_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN scheduler_witness_jobs swj ON prover_jobs.l1_batch_number = swj.l1_batch_number\n WHERE swj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 2\n GROUP BY prover_jobs.l1_batch_number\n HAVING COUNT(*) = 1)\n RETURNING l1_batch_number;\n " + }, "7889294ffe999d3c8b3b093d3add7f9b826e8259451068aeaeca0da0772648e8": { "describe": { "columns": [ @@ -5800,6 +5607,26 @@ }, "query": "\n SELECT l1_batch_number FROM initial_writes\n WHERE hashed_key = $1\n " }, + "7ca78be8b18638857111cdbc6117ed2c204e3eb22682d5e4553ac4f47efab6e2": { + "describe": { + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "UPDATE transactions\n SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}'\n WHERE miniblock_number > $1\n RETURNING hash\n " + }, "7cf855c4869db43b765b92762402596f6b97b3717735b6d87a16a5776f2eca71": { "describe": { "columns": [], @@ -5827,6 +5654,24 @@ }, "query": "\n INSERT INTO contracts_verification_info\n (address, verification_info)\n VALUES ($1, $2)\n ON CONFLICT (address)\n DO UPDATE SET verification_info = $2\n " }, + "7d4210089c5abb84befec962fc769b396ff7ad7da212d079bd4460f9ea4d60dc": { + "describe": { + "columns": [ + { + "name": "l1_batch_number?", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number?\" FROM (\n SELECT MIN(l1_batch_number) as \"l1_batch_number\"\n FROM prover_jobs\n WHERE status = 'successful' OR aggregation_round < 3\n GROUP BY l1_batch_number\n HAVING MAX(aggregation_round) < 3\n ) as inn\n " + }, "7e3623674226e5bb934f7769cdf595138015ad346e12074398fd57dbc03962d3": { "describe": { "columns": [ @@ -6099,16 +5944,6 @@ }, "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL ORDER BY number LIMIT $1" }, - "7f1a7b5cc5786e1554cb082c2f4cd1368c511e67aeb12465e16661ba940e9538": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [] - } - }, - "query": "LOCK TABLE prover_jobs IN EXCLUSIVE MODE" - }, "831e1beb42dab1dc4e9b585bb35ce568196e7f46cb655357fdf5437ece519270": { "describe": { "columns": [], @@ -6342,35 +6177,155 @@ "type_info": "Int8" }, { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" + "name": "l1_tx_mint", + "ordinal": 33, + "type_info": "Numeric" + }, + { + "name": "l1_tx_refund_recipient", + "ordinal": 34, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + true, + true, + false, + true, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + false, + true, + false, + false, + false, + true, + true, + true, + true, + true, + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT * FROM transactions\n WHERE miniblock_number = $1\n ORDER BY index_in_block\n " + }, + "8b881a834dc813ac5bd4dcd2f973d34ae92cafa929ce933982704d4afe13f972": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number!", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 2, + "type_info": "Int8" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "root_hash?", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "commit_tx_hash?", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "committed_at?", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "prove_tx_hash?", + "ordinal": 8, + "type_info": "Text" + }, + { + "name": "proven_at?", + "ordinal": 9, + "type_info": "Timestamp" + }, + { + "name": "execute_tx_hash?", + "ordinal": 10, + "type_info": "Text" + }, + { + "name": "executed_at?", + "ordinal": 11, + "type_info": "Timestamp" + }, + { + "name": "l1_gas_price", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 13, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 14, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 15, + "type_info": "Bytea" }, { - "name": "l1_tx_refund_recipient", - "ordinal": 34, + "name": "fee_account_address?", + "ordinal": 16, "type_info": "Bytea" } ], "nullable": [ false, - false, - true, - true, - false, - true, - true, - true, + null, false, false, - true, - true, - true, - true, - true, - true, - true, - true, false, false, false, @@ -6378,16 +6333,12 @@ false, true, false, + true, false, false, true, true, - true, - true, - true, - false, - true, - true + false ], "parameters": { "Left": [ @@ -6395,7 +6346,7 @@ ] } }, - "query": "\n SELECT * FROM transactions\n WHERE miniblock_number = $1\n ORDER BY index_in_block\n " + "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " }, "8b96fbf5b8adabd76ea2648688c38c4d9917b3736ca53ed3896c35c0da427369": { "describe": { @@ -6711,6 +6662,30 @@ }, "query": "SELECT * FROM l1_batches\n ORDER BY number DESC\n LIMIT 1" }, + "9008367aad7877f269b765c4d0772d0f60689fcde6987c620fe5749a259a8db7": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int8", + "Text", + "Bytea" + ] + } + }, + "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING id" + }, "91db60cc4f98ebcaef1435342607da0a86fe16e20a696cb81a569772d5d5ae88": { "describe": { "columns": [ @@ -6732,115 +6707,23 @@ }, "query": "\n SELECT value\n FROM storage_logs\n WHERE storage_logs.hashed_key = $1 AND storage_logs.miniblock_number <= $2\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n " }, - "9457eab74b924d7d7fd5ecf91886bbfe31844d2158f061cac5aef2ebf8714850": { + "928b5c1fbec2b2cfb9293cfe6312f7a0549f47a7cff4981acc0c2fda81079701": { "describe": { "columns": [ { "name": "number", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "l1_batch_number!", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 2, - "type_info": "Int8" - }, - { - "name": "l1_tx_count", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "root_hash?", - "ordinal": 5, - "type_info": "Bytea" - }, - { - "name": "commit_tx_hash?", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "committed_at?", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "prove_tx_hash?", - "ordinal": 8, - "type_info": "Text" - }, - { - "name": "proven_at?", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "execute_tx_hash?", - "ordinal": 10, - "type_info": "Text" - }, - { - "name": "executed_at?", - "ordinal": 11, - "type_info": "Timestamp" - }, - { - "name": "l1_gas_price", - "ordinal": 12, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 13, - "type_info": "Int8" - }, - { - "name": "bootloader_code_hash", - "ordinal": 14, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 15, - "type_info": "Bytea" } ], "nullable": [ - false, - null, - false, - false, - false, - false, - false, - true, - false, - true, - false, - true, - false, - false, - true, - true + false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " + "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id)\n WHERE prove_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" }, "95ce099fde99c57a930ed3d44f74a90d632b831360210ec7fe21b33bed1a4582": { "describe": { @@ -6986,109 +6869,223 @@ }, "query": "\n UPDATE witness_inputs\n SET merkle_tree_paths=''\n WHERE l1_batch_number = ANY($1);\n " }, - "9e014fe6841b7aab6317b3ee1dc1ab85b2f75ea7836777ef0c70fa1a1023d38f": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Int4", - "Int4", - "Int2", - "Text" - ] - } - }, - "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, now(), now())\n ON CONFLICT(instance_host, instance_port, region)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, updated_at=now()" - }, - "9e994205fe5886f0f8f729110599f3c344562e560fd492e071c9c5bbe50812cf": { + "a2758f1cfaac42019e4b11a7fe21d62da2a83b98d997448658ab2855383d6ca4": { "describe": { "columns": [ { - "name": "id", - "ordinal": 0, + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "is_finished", + "ordinal": 2, + "type_info": "Bool" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "fee_account_address", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "bloom", + "ordinal": 6, + "type_info": "Bytea" + }, + { + "name": "priority_ops_onchain_data", + "ordinal": 7, + "type_info": "ByteaArray" + }, + { + "name": "hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "parent_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "commitment", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, + { + "name": "eth_prove_tx_id", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 16, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 17, + "type_info": "Timestamp" + }, + { + "name": "merkle_root_hash", + "ordinal": 18, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" + }, + { + "name": "predicted_commit_gas_cost", + "ordinal": 21, "type_info": "Int8" }, { - "name": "l1_batch_number", - "ordinal": 1, + "name": "predicted_prove_gas_cost", + "ordinal": 22, "type_info": "Int8" }, { - "name": "circuit_type", - "ordinal": 2, - "type_info": "Text" + "name": "predicted_execute_gas_cost", + "ordinal": 23, + "type_info": "Int8" }, { - "name": "prover_input", - "ordinal": 3, + "name": "initial_bootloader_heap_content", + "ordinal": 24, + "type_info": "Jsonb" + }, + { + "name": "used_contract_hashes", + "ordinal": 25, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 26, "type_info": "Bytea" }, { - "name": "status", - "ordinal": 4, - "type_info": "Text" + "name": "compressed_repeated_writes", + "ordinal": 27, + "type_info": "Bytea" }, { - "name": "error", - "ordinal": 5, - "type_info": "Text" + "name": "l2_l1_compressed_messages", + "ordinal": 28, + "type_info": "Bytea" }, { - "name": "processing_started_at", - "ordinal": 6, - "type_info": "Timestamp" + "name": "l2_l1_merkle_root", + "ordinal": 29, + "type_info": "Bytea" }, { - "name": "created_at", - "ordinal": 7, - "type_info": "Timestamp" + "name": "gas_per_pubdata_byte_in_block", + "ordinal": 30, + "type_info": "Int4" }, { - "name": "updated_at", - "ordinal": 8, - "type_info": "Timestamp" + "name": "rollup_last_leaf_index", + "ordinal": 31, + "type_info": "Int8" }, { - "name": "time_taken", - "ordinal": 9, - "type_info": "Time" + "name": "zkporter_is_available", + "ordinal": 32, + "type_info": "Bool" }, { - "name": "aggregation_round", - "ordinal": 10, - "type_info": "Int4" + "name": "bootloader_code_hash", + "ordinal": 33, + "type_info": "Bytea" }, { - "name": "result", - "ordinal": 11, + "name": "default_aa_code_hash", + "ordinal": 34, "type_info": "Bytea" }, { - "name": "sequence_number", - "ordinal": 12, - "type_info": "Int4" + "name": "base_fee_per_gas", + "ordinal": 35, + "type_info": "Numeric" }, { - "name": "attempts", - "ordinal": 13, - "type_info": "Int4" + "name": "gas_per_pubdata_limit", + "ordinal": 36, + "type_info": "Int8" }, { - "name": "circuit_input_blob_url", - "ordinal": 14, - "type_info": "Text" + "name": "aux_data_hash", + "ordinal": 37, + "type_info": "Bytea" }, { - "name": "proccesed_by", - "ordinal": 15, - "type_info": "Text" + "name": "pass_through_data_hash", + "ordinal": 38, + "type_info": "Bytea" }, { - "name": "is_blob_cleaned", - "ordinal": 16, + "name": "meta_parameters_hash", + "ordinal": 39, + "type_info": "Bytea" + }, + { + "name": "skip_proof", + "ordinal": 40, "type_info": "Bool" + }, + { + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 42, + "type_info": "Int8" } ], "nullable": [ @@ -7097,28 +7094,53 @@ false, false, false, + false, + false, + false, + true, true, true, + true, + true, + true, + true, + true, + false, + false, + true, false, false, false, false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, true, false, false, true, true, + true, + false, + false, false ], "parameters": { "Left": [ - "Interval", - "Int4", - "TextArray" + "Float8", + "Int8" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE circuit_type = ANY($3)\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'in_gpu_proof' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " + "query": "SELECT l1_batches.* FROM l1_batches JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) JOIN eth_txs_history as commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT(epoch from commit_tx.confirmed_at) < $1 ORDER BY number LIMIT $2" }, "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { "describe": { @@ -7436,21 +7458,6 @@ }, "query": "\n SELECT events.tx_hash, transactions.initiator_address as \"l1_sender!\", events.topic2 as \"topic2!\", events.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n INNER JOIN transactions ON transactions.hash = events.tx_hash\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, events.miniblock_number ASC, event_index_in_block ASC\n " }, - "a7f4d8a9520de951c50fd12fafc0ce8895e03932cbb0337ce0ea4e884296ca36": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Text", - "Int4", - "Int4" - ] - } - }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = $1, updated_at = now(), queue_free_slots = $4\n WHERE instance_host = $2::text::inet\n AND instance_port = $3\n " - }, "a8d2b80d197d8168a6c1b4666e799a9d6c2e31d84986ae352715e687989f913c": { "describe": { "columns": [ @@ -7590,6 +7597,33 @@ }, "query": "\n SELECT COUNT(*) as \"count!\" FROM tokens\n WHERE l2_address = $1 AND\n market_volume > $2 AND now() - market_volume_updated_at < $3 AND\n usd_price > 0 AND now() - usd_price_updated_at < $4\n " }, + "a9d96d6774af2637173d471f02995652cd4c131c05fdcb3d0e1644bcd1aa1809": { + "describe": { + "columns": [ + { + "name": "proof", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "aggregation_result_coords", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + true, + true + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n " + }, "aa1534f03679fd2d1d9e7c1da1f94cc0e2ec5fc3a0e1ac7137147533eacf0aaf": { "describe": { "columns": [ @@ -7676,24 +7710,6 @@ }, "query": "SELECT * FROM eth_txs \n WHERE id > (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history)\n ORDER BY id\n LIMIT $1\n " }, - "aa9256fd40c557a553b407506794bffcc99247ccb9badf6ab303552d7b1bf5d2": { - "describe": { - "columns": [ - { - "name": "count", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT MIN(count) as \"count\"\n FROM (SELECT COALESCE(SUM(queue_free_slots), 0) as \"count\"\n FROM gpu_prover_queue\n where instance_status = 'available'\n UNION\n SELECT count(*) as \"count\"\n from prover_jobs\n where status = 'queued'\n ) as t1;\n " - }, "ad11ec3e628ae6c64ac160d8dd689b2f64033f620e17a31469788b3ce4968ad3": { "describe": { "columns": [ @@ -7843,20 +7859,6 @@ }, "query": "SELECT l1_block_number FROM transactions\n WHERE priority_op_id IS NOT NULL\n ORDER BY priority_op_id DESC\n LIMIT 1" }, - "afc0448c58b0e2f7a7865cc1b5069d66f4cb9d4f609a0fab06cac3b7784910d1": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Int4", - "Int4" - ] - } - }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'available', updated_at = now(), queue_free_slots = $3\n WHERE instance_host = $1::text::inet\n AND instance_port = $2\n AND instance_status = 'full'\n " - }, "b1478907214ad20dddd4f3846fba4b0ddf1fff63ddb3b95c8999635e77c8b863": { "describe": { "columns": [ @@ -7937,6 +7939,35 @@ }, "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC" }, + "b4c576db7c762103dc6700ded458e996d2e9ef670d7b58b181dbfab02fa426ce": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Jsonb", + "Int8", + "Numeric", + "Numeric", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Timestamp" + ] + } + }, + "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, $17, $18, now(), now()\n )\n ON CONFLICT (hash) DO NOTHING\n " + }, "b4cd15d430b423cd5bad80199abf0f67c698ca469e55557f20d5c7460ed40b0d": { "describe": { "columns": [], @@ -8062,6 +8093,19 @@ }, "query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id" }, + "bb3ae24e27a04047af2d6ebc145e86619d29ec89bb2abe39244f5669e82c9571": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Int8" + ] + } + }, + "query": "\n UPDATE l1_batches\n SET hash = $1\n WHERE number = $2\n " + }, "bd4898ee283a312cb995853686a1f5252e73b22efea3cf9f158c4476c9639b32": { "describe": { "columns": [], @@ -8146,6 +8190,19 @@ }, "query": "\n SELECT nonce as \"nonce!\" FROM transactions\n WHERE initiator_address = $1 AND nonce >= $2\n AND is_priority = FALSE\n AND (miniblock_number IS NOT NULL OR error IS NULL)\n ORDER BY nonce\n " }, + "c2cf96a9eb6893c5ba7d9e5418d9f24084ccd87980cb6ee05de1b3bde5c654bd": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray" + ] + } + }, + "query": "\n INSERT INTO call_traces (tx_hash, call_trace)\n SELECT u.tx_hash, u.call_trace\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(tx_hash, call_trace)\n " + }, "c2f6f7fa37b303748f47ff2de01227e7afbc9ff041bc1428743d91300f5f5caf": { "describe": { "columns": [ @@ -8166,58 +8223,93 @@ }, "query": "\n SELECT l1_batch_number FROM miniblocks\n WHERE number = $1\n " }, - "c4250120d4a7333157bf50058e9dd568d92f8e2060c27d4fd51d337be91a9aa1": { + "c321d1210799dfd29e54f18f3a3698e9bf288850f2dbd782e817d1cfd9165b16": { "describe": { "columns": [ { - "name": "instance_host", + "name": "id", "ordinal": 0, - "type_info": "Inet" + "type_info": "Int8" }, { - "name": "instance_port", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "instance_status", + "name": "circuit_type", "ordinal": 2, "type_info": "Text" }, { - "name": "created_at", + "name": "prover_input", "ordinal": 3, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "updated_at", + "name": "status", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Text" }, { - "name": "processing_started_at", + "name": "error", "ordinal": 5, - "type_info": "Timestamp" + "type_info": "Text" }, { - "name": "queue_free_slots", + "name": "processing_started_at", "ordinal": 6, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "queue_capacity", + "name": "created_at", "ordinal": 7, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "specialized_prover_group_id", + "name": "updated_at", "ordinal": 8, - "type_info": "Int2" + "type_info": "Timestamp" }, { - "name": "region", + "name": "time_taken", "ordinal": 9, + "type_info": "Time" + }, + { + "name": "aggregation_round", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "result", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "sequence_number", + "ordinal": 12, + "type_info": "Int4" + }, + { + "name": "attempts", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "circuit_input_blob_url", + "ordinal": 14, + "type_info": "Text" + }, + { + "name": "proccesed_by", + "ordinal": 15, "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 16, + "type_info": "Bool" } ], "nullable": [ @@ -8228,19 +8320,24 @@ false, true, true, + false, + false, + false, + false, + true, + false, + false, true, true, false ], "parameters": { "Left": [ - "Interval", - "Int2", - "Text" + "TextArray" ] } }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE (instance_host, instance_port) in (\n SELECT instance_host, instance_port\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND region=$3\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " + "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE circuit_type = ANY($1)\n AND status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " }, "c6109267f85f38edcd53f361cf2654f43fa45928e39324cfab8389453b4e7031": { "describe": { @@ -8261,214 +8358,537 @@ "type_info": "Text" }, { - "name": "base_fee_per_gas", - "ordinal": 3, - "type_info": "Int8" + "name": "base_fee_per_gas", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "priority_fee_per_gas", + "ordinal": 4, + "type_info": "Int8" + }, + { + "name": "signed_raw_tx", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "nonce", + "ordinal": 6, + "type_info": "Int8" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + true, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT \n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM eth_txs_history \n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id \n WHERE eth_txs_history.sent_at_block IS NULL AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY eth_txs_history.id DESC" + }, + "c66b0e0867a1a634f984645ca576a6502b51b67aa0be2dae98e0e2adeb450963": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int4" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Interval", + "Int4" + ] + } + }, + "query": "\n UPDATE prover_jobs\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " + }, + "c6aadc4ec78e30f5775f7a9f866ad02984b78de3e3d1f34c144a4057ff44ea6a": { + "describe": { + "columns": [ + { + "name": "count", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT COUNT(*) FROM eth_txs WHERE has_failed = TRUE" + }, + "c766f2ee9e3054ba337873ba5ebb26d4f1a43691664372152e5eb782391f9f68": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8Array" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " + }, + "c8125b30eb64eebfa4500dc623972bf8771a83b218bd18a51e633d4cf4bf8eb3": { + "describe": { + "columns": [ + { + "name": "bytecode", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + } + }, + "query": "\n SELECT bytecode FROM (\n SELECT * FROM storage_logs\n WHERE\n storage_logs.hashed_key = $1 AND\n storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE value != $3\n " + }, + "c81a1ff168b3a1e94489fb66995b0978c4c6aac92a731144cc22fcc1f4369ba9": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "merkle_tree_paths", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "created_at", + "ordinal": 2, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 3, + "type_info": "Timestamp" + }, + { + "name": "status", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "time_taken", + "ordinal": 5, + "type_info": "Time" + }, + { + "name": "processing_started_at", + "ordinal": 6, + "type_info": "Timestamp" }, { - "name": "priority_fee_per_gas", - "ordinal": 4, - "type_info": "Int8" + "name": "error", + "ordinal": 7, + "type_info": "Varchar" }, { - "name": "signed_raw_tx", - "ordinal": 5, - "type_info": "Bytea" + "name": "attempts", + "ordinal": 8, + "type_info": "Int4" }, { - "name": "nonce", - "ordinal": 6, - "type_info": "Int8" + "name": "merkel_tree_paths_blob_url", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 10, + "type_info": "Bool" } ], "nullable": [ + false, + true, false, false, false, false, + true, + true, false, true, false ], "parameters": { - "Left": [] - } - }, - "query": "\n SELECT \n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM eth_txs_history \n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id \n WHERE eth_txs_history.sent_at_block IS NULL AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY eth_txs_history.id DESC" - }, - "c6aadc4ec78e30f5775f7a9f866ad02984b78de3e3d1f34c144a4057ff44ea6a": { - "describe": { - "columns": [ - { - "name": "count", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] + "Left": [ + "Interval", + "Int4", + "Int8" + ] } }, - "query": "SELECT COUNT(*) FROM eth_txs WHERE has_failed = TRUE" + "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " }, - "c766f2ee9e3054ba337873ba5ebb26d4f1a43691664372152e5eb782391f9f68": { + "c849561f88c775f2cce4d59387916793ba1623a8a714b415375477e090d86bd3": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8Array" + "Int8", + "Int4", + "Int4" ] } }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " + "query": "UPDATE eth_txs\n SET gas_used = $1, confirmed_eth_tx_history_id = $2\n WHERE id = $3" }, - "c8125b30eb64eebfa4500dc623972bf8771a83b218bd18a51e633d4cf4bf8eb3": { + "c891770305cb3aba4021738e60567d977eac54435c871b5178de7c3c96d2f721": { "describe": { "columns": [ { - "name": "bytecode", + "name": "usd_price", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Numeric" + }, + { + "name": "usd_price_updated_at", + "ordinal": 1, + "type_info": "Timestamp" } ], "nullable": [ - false + true, + true ], "parameters": { "Left": [ - "Bytea", - "Int8", "Bytea" ] } }, - "query": "\n SELECT bytecode FROM (\n SELECT * FROM storage_logs\n WHERE\n storage_logs.hashed_key = $1 AND\n storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE value != $3\n " + "query": "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1" }, - "c81a1ff168b3a1e94489fb66995b0978c4c6aac92a731144cc22fcc1f4369ba9": { + "c9eefe59225b10d90b67ab92a8f9e3bad92ec02f8dfc2719903149ab9f82fe1c": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "merkle_tree_paths", + "name": "is_priority", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "created_at", + "name": "full_fee", "ordinal": 2, - "type_info": "Timestamp" + "type_info": "Numeric" }, { - "name": "updated_at", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Timestamp" + "type_info": "Numeric" }, { - "name": "status", + "name": "initiator_address", "ordinal": 4, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "time_taken", + "name": "nonce", "ordinal": 5, - "type_info": "Time" + "type_info": "Int8" }, { - "name": "processing_started_at", + "name": "signature", "ordinal": 6, + "type_info": "Bytea" + }, + { + "name": "input", + "ordinal": 7, + "type_info": "Bytea" + }, + { + "name": "data", + "ordinal": 8, + "type_info": "Jsonb" + }, + { + "name": "received_at", + "ordinal": 9, + "type_info": "Timestamp" + }, + { + "name": "priority_op_id", + "ordinal": 10, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 11, + "type_info": "Int8" + }, + { + "name": "index_in_block", + "ordinal": 12, + "type_info": "Int4" + }, + { + "name": "error", + "ordinal": 13, + "type_info": "Varchar" + }, + { + "name": "gas_limit", + "ordinal": 14, + "type_info": "Numeric" + }, + { + "name": "gas_per_storage_limit", + "ordinal": 15, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 16, + "type_info": "Numeric" + }, + { + "name": "tx_format", + "ordinal": 17, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 18, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 19, "type_info": "Timestamp" }, { - "name": "error", - "ordinal": 7, - "type_info": "Varchar" + "name": "execution_info", + "ordinal": 20, + "type_info": "Jsonb" + }, + { + "name": "contract_address", + "ordinal": 21, + "type_info": "Bytea" + }, + { + "name": "in_mempool", + "ordinal": 22, + "type_info": "Bool" + }, + { + "name": "l1_block_number", + "ordinal": 23, + "type_info": "Int4" + }, + { + "name": "value", + "ordinal": 24, + "type_info": "Numeric" + }, + { + "name": "paymaster", + "ordinal": 25, + "type_info": "Bytea" + }, + { + "name": "paymaster_input", + "ordinal": 26, + "type_info": "Bytea" + }, + { + "name": "max_fee_per_gas", + "ordinal": 27, + "type_info": "Numeric" + }, + { + "name": "max_priority_fee_per_gas", + "ordinal": 28, + "type_info": "Numeric" + }, + { + "name": "effective_gas_price", + "ordinal": 29, + "type_info": "Numeric" + }, + { + "name": "miniblock_number", + "ordinal": 30, + "type_info": "Int8" + }, + { + "name": "l1_batch_tx_index", + "ordinal": 31, + "type_info": "Int4" + }, + { + "name": "refunded_gas", + "ordinal": 32, + "type_info": "Int8" + }, + { + "name": "l1_tx_mint", + "ordinal": 33, + "type_info": "Numeric" + }, + { + "name": "l1_tx_refund_recipient", + "ordinal": 34, + "type_info": "Bytea" + }, + { + "name": "miniblock_timestamp?", + "ordinal": 35, + "type_info": "Int8" + }, + { + "name": "block_hash?", + "ordinal": 36, + "type_info": "Bytea" }, { - "name": "attempts", - "ordinal": 8, - "type_info": "Int4" + "name": "eth_commit_tx_hash?", + "ordinal": 37, + "type_info": "Text" }, { - "name": "merkel_tree_paths_blob_url", - "ordinal": 9, + "name": "eth_prove_tx_hash?", + "ordinal": 38, "type_info": "Text" }, { - "name": "is_blob_cleaned", - "ordinal": 10, - "type_info": "Bool" + "name": "eth_execute_tx_hash?", + "ordinal": 39, + "type_info": "Text" } ], "nullable": [ + false, + false, + true, + true, + false, + true, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, false, true, false, + true, false, false, false, true, true, + true, + true, + true, false, true, + true, + false, + false, + false, + false, false ], "parameters": { "Left": [ - "Interval", - "Int4", - "Int8" - ] - } - }, - "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " - }, - "c849561f88c775f2cce4d59387916793ba1623a8a714b415375477e090d86bd3": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4" + "Bytea" ] } }, - "query": "UPDATE eth_txs\n SET gas_used = $1, confirmed_eth_tx_history_id = $2\n WHERE id = $3" + "query": "\n SELECT transactions.*,\n miniblocks.timestamp as \"miniblock_timestamp?\",\n miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " }, - "c891770305cb3aba4021738e60567d977eac54435c871b5178de7c3c96d2f721": { + "ca3a65591d2d14e6b597389ee47594f403b5212d79267279c957cbc64d44dc7a": { "describe": { "columns": [ { - "name": "usd_price", + "name": "number", "ordinal": 0, - "type_info": "Numeric" - }, - { - "name": "usd_price_updated_at", - "ordinal": 1, - "type_info": "Timestamp" + "type_info": "Int8" } ], "nullable": [ - true, - true + false ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id)\n WHERE commit_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" + }, + "ca8fa3521dab5ee985a837572e8625bd5b26bf79f58950698218b28110c29d1f": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Bytea" + "Text", + "Int4", + "Int4", + "Int2", + "Text", + "Text", + "Int2" ] } }, - "query": "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1" + "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, zone, num_gpu, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, $6, $7, now(), now())\n ON CONFLICT(instance_host, instance_port, region, zone)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, zone=$6, num_gpu=$7, updated_at=now()" }, "cbe9445b28efc540d4a01b4c8f1e62017e9854b2d01973c55b27603a8a81bbdd": { "describe": { @@ -8855,6 +9275,23 @@ }, "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches" }, + "d11ff84327058721c3c36bc3371c3139f41e2a2255f64bbc5108c1876848d8bb": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Int4", + "Int4", + "Text", + "Text" + ] + } + }, + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = $1, updated_at = now(), queue_free_slots = $4\n WHERE instance_host = $2::text::inet\n AND instance_port = $3\n AND region = $5\n AND zone = $6\n " + }, "d2f16dcd8175a337f57724ce5b2fb59d2934f60bb2d24c6ec77195dc63c26002": { "describe": { "columns": [ @@ -9084,6 +9521,24 @@ }, "query": "\n SELECT transactions.hash, transactions.received_at\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = miniblock_number\n WHERE received_at > $1\n ORDER BY received_at ASC\n LIMIT $2\n " }, + "dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572": { + "describe": { + "columns": [ + { + "name": "count", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" + }, "dd10ebfbf5db4d2ac44b03be3acf494ea180f59685d8fc156af481e8265079c2": { "describe": { "columns": [ @@ -9156,35 +9611,6 @@ }, "query": "\n SELECT circuit_type, result from prover_jobs\n WHERE l1_batch_number = $1 AND status = 'successful' AND aggregation_round = $2\n ORDER BY sequence_number ASC;\n " }, - "dec8533793968c9db379e3da18f262ea9d9dce2f8959c29b0a638296bf10ccc2": { - "describe": { - "columns": [ - { - "name": "key", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "bytecode", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int8", - "Int8" - ] - } - }, - "query": "\n SELECT storage_logs.key, factory_deps.bytecode\n FROM storage_logs\n JOIN factory_deps ON storage_logs.value = factory_deps.bytecode_hash\n WHERE\n storage_logs.address = $1 AND\n storage_logs.miniblock_number >= $3 AND\n storage_logs.miniblock_number <= $4 AND\n NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $2\n )\n " - }, "e14338281eb639856f1c7a8ba6b60fe3914d3f30d0b55cea8fb287209892df03": { "describe": { "columns": [ @@ -9255,39 +9681,6 @@ }, "query": "\n SELECT MIN(miniblock_number) as \"min?\"\n FROM l2_to_l1_logs\n " }, - "e2023b335b34b24cd0bd8d1d972aa1867a13c78504312fc718e801272c47b559": { - "describe": { - "columns": [ - { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 1, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 2, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false, - true - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT factory_deps.bytecode, transactions.data, transactions.contract_address\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " - }, "e29d263f33257a37f391907b7ff588f416a0350b606f16f4779fa1d3bf4be08b": { "describe": { "columns": [ @@ -9553,27 +9946,6 @@ }, "query": "\n UPDATE scheduler_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs.*\n " }, - "ec4a3bc6a7a9c13ad11a4b71bed019a961f918a1d1376440c484cc42432c6c9c": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Int8", - "Int4" - ] - } - }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM prover_jobs\n WHERE status = 'successful' AND l1_batch_number = $1 AND aggregation_round = $2\n " - }, "edc9e374698c57ba9f65f83f0e1945e4785d8b4bc95f46ed4d16c095e5511709": { "describe": { "columns": [], @@ -9691,6 +10063,40 @@ }, "query": "\n SELECT l1_batch_number, l1_batch_tx_index\n FROM transactions\n WHERE hash = $1\n " }, + "f0c83c517fdf9696a0acf288f061bd00a993e0b2379b667738b6876e2f588043": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN node_aggregation_witness_jobs nawj ON prover_jobs.l1_batch_number = nawj.l1_batch_number\n WHERE nawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 1\n GROUP BY prover_jobs.l1_batch_number, nawj.number_of_leaf_circuits\n HAVING COUNT(*) = nawj.number_of_leaf_circuits)\n RETURNING l1_batch_number;\n " + }, + "f1defa140e20b9c250d3212602dc259c0a35598c2e69d1c42746a8fab6dd8d3e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int4", + "Int4", + "Text", + "Text" + ] + } + }, + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'available', updated_at = now(), queue_free_slots = $3\n WHERE instance_host = $1::text::inet\n AND instance_port = $2\n AND instance_status = 'full'\n AND region = $4\n AND zone = $5\n " + }, "f3f7ceb708cc072d66e8609d64ba99e6faa80bf58ff0ce0ef49e882af63522d4": { "describe": { "columns": [], @@ -9761,6 +10167,27 @@ }, "query": "\n SELECT storage.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM storage\n INNER JOIN tokens ON\n storage.address = tokens.l2_address OR (storage.address = $2 AND tokens.l2_address = $3)\n WHERE storage.hashed_key = ANY($1)\n " }, + "f76f7d03cce064c0240da83a4ba75a0ce3fb57a18723c278a3d05eaf085f8994": { + "describe": { + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM transactions\n WHERE miniblock_number BETWEEN $1 AND $2" + }, "f93109d1cc02f5516b40a4a29082a46fd6fa66972bae710d08cfe6a1484b1616": { "describe": { "columns": [ diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index e89e24091736..760bfcd62cce 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -466,6 +466,38 @@ impl BlocksDal<'_, '_> { }) } + /// Returns the number of the last block for which an Ethereum commit tx was sent and confirmed. + pub fn get_number_of_last_block_committed_on_eth(&mut self) -> Option { + async_std::task::block_on(async { + sqlx::query!( + "SELECT number FROM l1_batches + LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) + WHERE commit_tx.confirmed_at IS NOT NULL + ORDER BY number DESC LIMIT 1" + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|record| L1BatchNumber(record.number as u32)) + }) + } + + /// Returns the number of the last block for which an Ethereum prove tx was sent and confirmed. + pub fn get_number_of_last_block_proven_on_eth(&mut self) -> Option { + async_std::task::block_on(async { + sqlx::query!( + "SELECT number FROM l1_batches + LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) + WHERE prove_tx.confirmed_at IS NOT NULL + ORDER BY number DESC LIMIT 1" + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|record| L1BatchNumber(record.number as u32)) + }) + } + /// Returns the number of the last block for which an Ethereum execute tx was sent and confirmed. pub fn get_number_of_last_block_executed_on_eth(&mut self) -> Option { async_std::task::block_on(async { @@ -637,7 +669,8 @@ impl BlocksDal<'_, '_> { sqlx::query_as!( StorageBlock, "SELECT l1_batches.* FROM l1_batches \ - JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) \ + JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) \ + JOIN eth_txs_history as commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) \ WHERE commit_tx.confirmed_at IS NOT NULL \ AND eth_prove_tx_id IS NOT NULL \ AND eth_execute_tx_id IS NULL \ @@ -752,7 +785,7 @@ impl BlocksDal<'_, '_> { }) } - fn get_block_with_metadata( + pub fn get_block_with_metadata( &mut self, storage_block: StorageBlock, ) -> Option { @@ -892,6 +925,21 @@ impl BlocksDal<'_, '_> { }) } + /// Returns `true` if there exists a non-sealed batch (i.e. there is one+ stored miniblock that isn't assigned + /// to any batch yet). + pub fn pending_batch_exists(&mut self) -> bool { + async_std::task::block_on(async { + let count = sqlx::query_scalar!( + r#"SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL"# + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .unwrap_or(0); + count != 0 + }) + } + pub fn get_last_l1_batch_number_with_witness_inputs(&mut self) -> L1BatchNumber { async_std::task::block_on(async { sqlx::query!( @@ -991,3 +1039,24 @@ impl BlocksDal<'_, '_> { }) } } + +impl BlocksDal<'_, '_> { + // This function is only used for tests. + // The actual l1 batch hash is only set by the metadata calculator. + pub fn set_l1_batch_hash(&mut self, batch_num: L1BatchNumber, hash: H256) { + async_std::task::block_on(async { + sqlx::query!( + " + UPDATE l1_batches + SET hash = $1 + WHERE number = $2 + ", + hash.as_bytes(), + batch_num.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + }) + } +} diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 88f94e8be995..6b8a16cac9f1 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -12,8 +12,10 @@ use std::time::Instant; use vm::utils::BLOCK_GAS_LIMIT; use zksync_config::constants::EMPTY_UNCLES_HASH; +use crate::models::storage_transaction::CallTrace; use zksync_types::api::{self, Block, BlockId, TransactionVariant}; use zksync_types::l2_to_l1_log::L2ToL1Log; +use zksync_types::vm_trace::Call; use zksync_types::web3::types::{BlockHeader, U64}; use zksync_types::{L1BatchNumber, L2ChainId, MiniblockNumber, H160, H256, U256}; use zksync_utils::{bigdecimal_to_u256, miniblock_hash}; @@ -266,17 +268,25 @@ impl BlocksWeb3Dal<'_, '_> { pub fn resolve_block_id( &mut self, - block_id: api::BlockId, + block_id: BlockId, ) -> Result, SqlxError> { async_std::task::block_on(async { let query_string = match block_id { - api::BlockId::Hash(_) => { - "SELECT number FROM miniblocks WHERE hash = $1".to_string() + BlockId::Hash(_) => "SELECT number FROM miniblocks WHERE hash = $1".to_string(), + BlockId::Number(api::BlockNumber::Number(block_number)) => { + // The reason why instead of returning the `block_number` directly we use query is + // to handle numbers of blocks that are not created yet. + // the `SELECT number FROM miniblocks WHERE number=block_number` for + // non-existing block number will returns zero. + format!( + "SELECT number FROM miniblocks WHERE number = {}", + block_number + ) } - api::BlockId::Number(api::BlockNumber::Number(_)) => { - "SELECT number FROM miniblocks WHERE number = $1".to_string() + BlockId::Number(api::BlockNumber::Earliest) => { + return Ok(Ok(MiniblockNumber(0))); } - api::BlockId::Number(block_number) => web3_block_number_to_sql(block_number, 1).0, + BlockId::Number(block_number) => web3_block_number_to_sql(block_number), }; let row = bind_block_where_sql_params(block_id, sqlx::query(&query_string)) .fetch_optional(self.storage.conn()) @@ -394,4 +404,47 @@ impl BlocksWeb3Dal<'_, '_> { Ok(result) }) } + + pub fn get_trace_for_miniblock(&mut self, block: BlockId) -> Result, Web3Error> { + async_std::task::block_on(async { + let block_number = self.resolve_block_id(block).unwrap()?; + let traces = sqlx::query_as!( + CallTrace, + r#" + SELECT * FROM call_traces WHERE tx_hash IN ( + SELECT hash FROM transactions WHERE miniblock_number = $1 + ) + "#, + block_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(Call::from) + .collect(); + Ok(traces) + }) + } +} + +#[cfg(test)] +mod tests { + use crate::ConnectionPool; + + use super::*; + use db_test_macro::db_test; + use zksync_types::{ + api::{BlockId, BlockNumber}, + MiniblockNumber, + }; + + #[db_test(dal_crate)] + async fn test_resolve_block_id_earliest(connection_pool: ConnectionPool) { + let storage = &mut connection_pool.access_test_storage().await; + let mut block_web3_dal = BlocksWeb3Dal { storage }; + let miniblock_number = + block_web3_dal.resolve_block_id(BlockId::Number(BlockNumber::Earliest)); + assert_eq!(miniblock_number.unwrap().unwrap(), MiniblockNumber(0)); + } } diff --git a/core/lib/dal/src/connection/holder.rs b/core/lib/dal/src/connection/holder.rs index d35e19bd9980..1ba455011f47 100644 --- a/core/lib/dal/src/connection/holder.rs +++ b/core/lib/dal/src/connection/holder.rs @@ -5,6 +5,7 @@ use sqlx::pool::PoolConnection; use sqlx::{postgres::Postgres, PgConnection, Transaction}; // Workspace imports // Local imports +use crate::connection::test_pool::TestPoolLock; /// Connection holder unifies the type of underlying connection, which /// can be either pooled or direct. @@ -12,7 +13,7 @@ pub enum ConnectionHolder<'a> { Pooled(PoolConnection), Direct(PgConnection), Transaction(Transaction<'a, Postgres>), - TestTransaction(&'a mut Transaction<'static, Postgres>), + TestTransaction(TestPoolLock), } impl<'a> fmt::Debug for ConnectionHolder<'a> { diff --git a/core/lib/dal/src/connection/mod.rs b/core/lib/dal/src/connection/mod.rs index 91de36ca0fb9..e11abe80edf8 100644 --- a/core/lib/dal/src/connection/mod.rs +++ b/core/lib/dal/src/connection/mod.rs @@ -36,6 +36,11 @@ impl ConnectionPool { Self::Real(pool) } + /// WARNING: this method is intentionally private. + /// `zksync_dal` crate uses `async-std` runtime, whereas most of our crates use `tokio`. + /// Calling `async-std` future from `tokio` context may cause deadlocks (and it did happen). + /// Use blocking counterpart instead. + /// /// Creates a `StorageProcessor` entity over a recoverable connection. /// Upon a database outage connection will block the thread until /// it will be able to recover the connection (or, if connection cannot @@ -44,7 +49,7 @@ impl ConnectionPool { /// /// This method is intended to be used in crucial contexts, where the /// database access is must-have (e.g. block committer). - pub async fn access_storage(&self) -> StorageProcessor<'_> { + async fn access_storage(&self) -> StorageProcessor<'_> { match self { ConnectionPool::Real(real_pool) => { let start = Instant::now(); @@ -56,7 +61,7 @@ impl ConnectionPool { } } - pub async fn acquire_connection_retried(pool: &PgPool) -> PoolConnection { + async fn acquire_connection_retried(pool: &PgPool) -> PoolConnection { const DB_CONNECTION_RETRIES: u32 = 3; let mut retry_count = 0; @@ -87,8 +92,8 @@ impl ConnectionPool { pub async fn access_test_storage(&self) -> StorageProcessor<'static> { match self { ConnectionPool::Test(test) => test.access_storage().await, - ConnectionPool::Real(_real) => { - panic!("Attempt to access test storage with the real pool") + ConnectionPool::Real(_) => { + panic!("Attempt to access test storage with the real pool"); } } } diff --git a/core/lib/dal/src/connection/test_pool.rs b/core/lib/dal/src/connection/test_pool.rs index 5d4ee35667f0..69fda8f6f8c7 100644 --- a/core/lib/dal/src/connection/test_pool.rs +++ b/core/lib/dal/src/connection/test_pool.rs @@ -1,22 +1,88 @@ // Built-in deps -use std::sync::Arc; +use std::{fmt, mem, pin::Pin, sync::Arc, time::Duration}; // External imports -use async_std::sync::Mutex; -use sqlx::{PgConnection, Postgres, Transaction}; +use async_std::{ + future::timeout, + sync::{Mutex, MutexGuardArc}, +}; +use sqlx::{Acquire, Connection, PgConnection, Postgres, Transaction}; +// Local imports +use crate::StorageProcessor; -// Public re-export for proc macro to use `begin` on the connection. -#[doc(hidden)] -pub use sqlx::Connection; +/// Self-referential struct powering [`TestPool`]. +// Ideally, we'd want to use a readily available crate like `ouroboros` to define this struct, +// but `ouroboros` in particular doesn't satisfy our needs: +// +// - It doesn't provide mutable access to the tail field (`subtransaction`), only allowing +// to mutably access it in a closure. +// - There is an error borrowing from `transaction` since it implements `Drop`. +struct TestPoolInner { + // Mutably references `_transaction`. + subtransaction: Transaction<'static, Postgres>, + // Mutably references `_connection`. Must not be used anywhere since it's mutably borrowed! + _transaction: Pin>>, + // Must not be used anywhere since it's mutably borrowed! + _connection: Pin>, +} -use crate::StorageProcessor; -// Local imports +impl fmt::Debug for TestPoolInner { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("Inner") + .field("subtransaction", &self.subtransaction) + .finish() + } +} + +impl TestPoolInner { + async fn new() -> Self { + let database_url = crate::get_test_database_url(); + let connection = PgConnection::connect(&database_url).await.unwrap(); + let mut connection = Box::pin(connection); + + let transaction = Connection::begin(&mut *connection).await.unwrap(); + let transaction: Transaction<'static, Postgres> = unsafe { + // SAFETY: We extend `transaction` lifetime to `'static`. This is valid + // because the transaction borrows from `connection`, which outlives `transaction` + // (since it's a field in the same struct, and fields in a struct are dropped + // in the declaration order), is not moved after the borrow + // (due to being wrapped in a `Pin>`), and is not accessed afterwards. + mem::transmute(transaction) + }; + let mut transaction = Box::pin(transaction); + + let subtransaction = transaction.begin().await.unwrap(); + let subtransaction: Transaction<'static, Postgres> = unsafe { + // SAFETY: We extend `subtransaction` lifetime to `'static`. This is valid + // for the same reasons that apply for `transaction`. + mem::transmute(subtransaction) + }; + + Self { + subtransaction, + _transaction: transaction, + _connection: connection, + } + } +} + +#[derive(Debug)] +pub struct TestPoolLock { + lock: MutexGuardArc, +} + +impl TestPoolLock { + pub fn as_connection(&mut self) -> &mut PgConnection { + &mut self.lock.subtransaction + } +} /// Implementation of the test/fake connection pool to be used in tests. /// This implementation works over an established transaction in order to reject /// any changes made to the database, even if the tested component initiates and commits /// its own transactions. /// -/// ## How does it work +/// # How it works /// /// Test pool uses an established transaction to be created. This transaction, in its turn, /// is used to establish a *subtransaction*. Reference to this subtransaction will be used @@ -31,78 +97,28 @@ use crate::StorageProcessor; /// use transaction and somewhere in test `StorageProcessor` is created, used without /// transaction and then dropped (which is a normal use case for e.g. test setup) -- such /// changes would be discarded and test will not execute correctly. -/// -/// ## Safety -/// -/// Test pool relies on unsafe code to work, so it comes with several invariants to be -/// upheld by its user. They are *not* enforced by compiler and breaking *any* of them -/// will result in undefined behavior. -/// -/// Usage invariants: -/// - This object should never outlive the transaction used to create it. If, for example, -/// test pool is created and passed to another thread and the thread with the original -/// connection panics (or connection is simply dropped), the behavior is undefined. -/// - Concurrent access to the pool is forbidden. `TestPool` has to be `Sync` in order to -/// not break the interface of the `ConnectionPool`, but since it operates over a single -/// established transaction, it can't be safely accessed from multiple threads. -/// Moving the object to another thread is safe though. -/// - Since we use mutable reference to the subtransaction to create `StorageProcessor`, you -/// should not create and use multiple `StorageProcessor` objects in the same scope. -/// -/// This object is meant to be used in unit tests only, any attempt to use it with the real -/// database is on the conscience of the user. I have warned you. #[derive(Debug, Clone)] pub struct TestPool { - // Sub-transaction to be used to instantiate connections. - // - // `Arc` is required to keep the pool `Clone` and `Send` and also to pin the transaction - // location in the memory. - // `Mutex` is required to keep the object `Sync` and provide mutable access to the transaction - // from the immutable `access_storage` method. - subtransaction: Arc>>, + inner: Arc>, } impl TestPool { - /// Establishes a Postgres connection to the test database. - pub async fn connect_to_test_db() -> PgConnection { - let database_url = crate::get_test_database_url(); - PgConnection::connect(&database_url).await.unwrap() - } - /// Constructs a new object using an already established transaction to the database. /// This method is unsafe, since internally it extends lifetime of the provided `Transaction`. - /// - /// ## Safety - /// - /// When calling this method, caller must guarantee that resulting object will not live longer - /// than the transaction to the database used to create this object. - pub async unsafe fn new(transaction: &mut Transaction<'_, Postgres>) -> Self { - // Using `std::mem::transmute` to extend the lifetime of an object is an unsafe but - // valid way to use this method. - let subtransaction: Transaction<'static, Postgres> = - std::mem::transmute(transaction.begin().await.unwrap()); + pub async fn new() -> Self { Self { - subtransaction: Arc::new(Mutex::new(subtransaction)), + inner: Arc::new(Mutex::new(TestPoolInner::new().await)), } } pub async fn access_storage(&self) -> StorageProcessor<'static> { - let mut lock = self.subtransaction.lock().await; - let subtransaction = &mut *lock; - - // ## Safety - // - // Guarantees held by this method: - // - memory location: original `transaction` object is behind the smart pointer, so its location don't change. - // - // Guarantees held by the caller: - // - cross-thread access: accessing `TestPool` concurrently is forbidden by the contract of the object. - // - having multiple `StorageProcessor` objects is forbidden by the contract of the object. - // - lifetime: we are transmuting lifetime to the static lifetime, so the transaction should never live longer - // than the test pool object. - let subtransaction_ref: &'static mut Transaction = - unsafe { std::mem::transmute(subtransaction) }; + const LOCK_TIMEOUT: Duration = Duration::from_secs(1); - StorageProcessor::from_test_transaction(subtransaction_ref) + let lock = self.inner.lock_arc(); + let lock = timeout(LOCK_TIMEOUT, lock).await.expect( + "Timed out waiting to acquire a lock in test `ConnectionPool`. \ + Check the backtrace and make sure that no `StorageProcessor`s are alive", + ); + StorageProcessor::from_test_transaction(TestPoolLock { lock }) } } diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 28b8963053ab..0aa5b697e664 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -2,8 +2,12 @@ use crate::models::storage_eth_tx::{ L1BatchEthSenderStats, StorageEthTx, StorageTxHistory, StorageTxHistoryToSend, }; use crate::StorageProcessor; -use sqlx::Row; +use sqlx::{ + types::chrono::{DateTime, Utc}, + Row, +}; use std::convert::TryFrom; +use std::str::FromStr; use zksync_types::aggregated_operations::AggregatedActionType; use zksync_types::eth_sender::{EthTx, TxHistory, TxHistoryToSend}; use zksync_types::{Address, L1BatchNumber, H256, U256}; @@ -33,15 +37,29 @@ impl EthSenderDal<'_, '_> { async_std::task::block_on(async { let mut stats = L1BatchEthSenderStats::default(); for tx_type in ["execute_tx", "commit_tx", "prove_tx"] { - let records= sqlx::query(&format!( - "SELECT MAX(number) as number, txs.confirmed_at IS NOT NULL as confirmed FROM l1_batches - LEFT JOIN eth_txs_history as txs ON (l1_batches.eth_{}_id = txs.eth_tx_id) - GROUP BY confirmed", + let mut records= sqlx::query(&format!( + "SELECT number as number, true as confirmed FROM l1_batches + INNER JOIN eth_txs_history ON (l1_batches.eth_{}_id = eth_txs_history.eth_tx_id) + WHERE eth_txs_history.confirmed_at IS NOT NULL + ORDER BY number DESC + LIMIT 1", tx_type )) .fetch_all(self.storage.conn()) .await .unwrap(); + + records.extend(sqlx::query(&format!( + "SELECT number as number, false as confirmed FROM l1_batches + INNER JOIN eth_txs_history ON (l1_batches.eth_{}_id = eth_txs_history.eth_tx_id) + ORDER BY number DESC + LIMIT 1", + tx_type + )) + .fetch_all(self.storage.conn()) + .await + .unwrap()); + for record in records { let batch_number = L1BatchNumber(record.get::("number") as u32); let aggregation_action = match tx_type { @@ -153,7 +171,7 @@ impl EthSenderDal<'_, '_> { priority_fee_per_gas: u64, tx_hash: H256, raw_signed_tx: Vec, - ) -> u32 { + ) -> Option { async_std::task::block_on(async { let priority_fee_per_gas = i64::try_from(priority_fee_per_gas).expect("Can't convert U256 to i64"); @@ -165,6 +183,7 @@ impl EthSenderDal<'_, '_> { "INSERT INTO eth_txs_history (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, now(), now()) + ON CONFLICT (tx_hash) DO NOTHING RETURNING id", eth_tx_id as u32, base_fee_per_gas, @@ -172,10 +191,10 @@ impl EthSenderDal<'_, '_> { tx_hash, raw_signed_tx ) - .fetch_one(self.storage.conn()) + .fetch_optional(self.storage.conn()) .await .unwrap() - .id as u32 + .map(|row| row.id as u32) }) } @@ -208,6 +227,7 @@ impl EthSenderDal<'_, '_> { pub fn confirm_tx(&mut self, tx_hash: H256, gas_used: U256) { async_std::task::block_on(async { + let mut transaction = self.storage.start_transaction().await; let gas_used = i64::try_from(gas_used).expect("Can't convert U256 to i64"); let tx_hash = format!("{:#x}", tx_hash); let ids = sqlx::query!( @@ -217,7 +237,7 @@ impl EthSenderDal<'_, '_> { RETURNING id, eth_tx_id", tx_hash, ) - .fetch_one(self.storage.conn()) + .fetch_one(transaction.conn()) .await .unwrap(); @@ -229,9 +249,117 @@ impl EthSenderDal<'_, '_> { ids.id, ids.eth_tx_id ) - .execute(self.storage.conn()) + .execute(transaction.conn()) + .await + .unwrap(); + + transaction.commit().await; + }) + } + + pub fn get_confirmed_tx_hash_by_eth_tx_id(&mut self, eth_tx_id: u32) -> Option { + async_std::task::block_on(async { + let tx_hash = sqlx::query!( + "SELECT tx_hash FROM eth_txs_history + WHERE eth_tx_id = $1 AND confirmed_at IS NOT NULL", + eth_tx_id as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + + tx_hash.map(|tx_hash| { + let tx_hash = tx_hash.tx_hash; + let tx_hash = tx_hash.trim_start_matches("0x"); + H256::from_str(tx_hash).unwrap() + }) + }) + } + + /// This method inserts a fake transaction into the database that would make the corresponding L1 batch + /// to be considered committed/proven/executed. + /// + /// The designed use case is the External Node usage, where we don't really care about the actual transactions apart + /// from the hash and the fact that tx was sent. + /// + /// ## Warning + /// + /// After this method is used anywhere in the codebase, it is considered a bug to try to directly query `eth_txs_history` + /// or `eth_txs` tables. + pub fn insert_bogus_confirmed_eth_tx( + &mut self, + l1_batch: L1BatchNumber, + tx_type: AggregatedActionType, + tx_hash: H256, + confirmed_at: DateTime, + ) { + async_std::task::block_on(async { + let mut transaction = self.storage.start_transaction().await; + let tx_hash = format!("{:#x}", tx_hash); + + let eth_tx_id = sqlx::query_scalar!( + "SELECT eth_txs.id FROM eth_txs_history JOIN eth_txs + ON eth_txs.confirmed_eth_tx_history_id = eth_txs_history.id + WHERE eth_txs_history.tx_hash = $1", + tx_hash + ) + .fetch_optional(transaction.conn()) .await .unwrap(); + + // Check if the transaction with the corresponding hash already exists. + let eth_tx_id = if let Some(eth_tx_id) = eth_tx_id { + eth_tx_id + } else { + // No such transaction in the database yet, we have to insert it. + + // Insert general tx descriptor. + let eth_tx_id = sqlx::query_scalar!( + "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at) + VALUES ('\\x00', 0, $1, '', 0, now(), now()) + RETURNING id", + tx_type.to_string() + ) + .fetch_one(transaction.conn()) + .await + .unwrap(); + + // Insert a "sent transaction". + let eth_history_id = sqlx::query_scalar!( + "INSERT INTO eth_txs_history + (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at) + VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3) + RETURNING id", + eth_tx_id, + tx_hash, + confirmed_at.naive_utc() + ) + .fetch_one(transaction.conn()) + .await + .unwrap(); + + // Mark general entry as confirmed. + sqlx::query!( + "UPDATE eth_txs + SET confirmed_eth_tx_history_id = $1 + WHERE id = $2", + eth_history_id, + eth_tx_id + ) + .execute(transaction.conn()) + .await + .unwrap(); + + eth_tx_id + }; + + // Tie the ETH tx to the L1 batch. + super::BlocksDal { + storage: &mut transaction, + } + .set_eth_tx_id(l1_batch, l1_batch, eth_tx_id as u32, tx_type); + + transaction.commit().await; }) } diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 11c0f36a50c7..f5c6f637cccb 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -30,7 +30,6 @@ impl EventsDal<'_, '_> { ) .await .unwrap(); - let mut bytes: Vec = Vec::new(); let now = Utc::now().naive_utc().to_string(); let mut event_index_in_block = 0u32; @@ -81,6 +80,7 @@ impl EventsDal<'_, '_> { } } copy.send(bytes).await.unwrap(); + // note: all the time spent in this function is spent in `copy.finish()` copy.finish().await.unwrap(); }) } diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 7e3e8c5e9fdd..4c854fc95350 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -3,14 +3,15 @@ use std::time::Instant; use sqlx::Row; use crate::models::storage_block::web3_block_number_to_sql; -use crate::models::storage_event::StorageWeb3Log; -use crate::SqlxError; -use crate::StorageProcessor; use zksync_types::{ - api::{self, GetLogsFilter, Log}, + api::{GetLogsFilter, Log}, MiniblockNumber, }; +use crate::models::storage_event::StorageWeb3Log; +use crate::SqlxError; +use crate::StorageProcessor; + #[derive(Debug)] pub struct EventsWeb3Dal<'a, 'c> { pub storage: &'a mut StorageProcessor<'c>, @@ -40,11 +41,7 @@ impl EventsWeb3Dal<'_, '_> { ); let mut query = sqlx::query(&query); - query = query.bind(filter.from_block.0 as i64); - if let Some(api::BlockNumber::Number(number)) = filter.to_block { - query = query.bind(number.as_u64() as i64); - } if !filter.addresses.is_empty() { let addresses: Vec<_> = filter .addresses @@ -94,11 +91,6 @@ impl EventsWeb3Dal<'_, '_> { ); let mut query = sqlx::query_as(&query); - query = query.bind(filter.from_block.0 as i64); - - if let Some(api::BlockNumber::Number(number)) = filter.to_block { - query = query.bind(number.as_u64() as i64); - } if !filter.addresses.is_empty() { let addresses: Vec<_> = filter .addresses @@ -123,17 +115,11 @@ impl EventsWeb3Dal<'_, '_> { fn build_get_logs_where_clause(&self, filter: &GetLogsFilter) -> (String, u8) { let mut arg_index = 1; - let (block_sql, new_arg_index) = web3_block_number_to_sql( - api::BlockNumber::Number(filter.from_block.0.into()), - arg_index, - ); - let mut where_sql = format!("(miniblock_number >= {})", block_sql); - arg_index = new_arg_index; + let mut where_sql = format!("(miniblock_number >= {})", filter.from_block.0 as i64); if let Some(to_block) = filter.to_block { - let (block_sql, new_arg_index) = web3_block_number_to_sql(to_block, arg_index); + let block_sql = web3_block_number_to_sql(to_block); where_sql += &format!(" AND (miniblock_number <= {})", block_sql); - arg_index = new_arg_index; } if !filter.addresses.is_empty() { where_sql += &format!(" AND (address = ANY(${}))", arg_index); @@ -171,10 +157,40 @@ impl EventsWeb3Dal<'_, '_> { "#, from_block.0 as i64 ) - .fetch_all(self.storage.conn()) - .await?; + .fetch_all(self.storage.conn()) + .await?; let logs = db_logs.into_iter().map(Into::into).collect(); Ok(logs) }) } } + +#[cfg(test)] +mod tests { + use db_test_macro::db_test; + use vm::zk_evm::ethereum_types::{Address, H256}; + use zksync_types::api::BlockNumber; + + use super::*; + use crate::connection::ConnectionPool; + + #[db_test(dal_crate)] + async fn test_build_get_logs_where_clause(connection_pool: ConnectionPool) { + let storage = &mut connection_pool.access_test_storage().await; + let events_web3_dal = EventsWeb3Dal { storage }; + let filter = GetLogsFilter { + from_block: MiniblockNumber(100), + to_block: Some(BlockNumber::Number(200.into())), + addresses: vec![Address::from_low_u64_be(123)], + topics: vec![(0, vec![H256::from_low_u64_be(456)])], + }; + + let expected_sql = "(miniblock_number >= 100) AND (miniblock_number <= 200) AND (address = ANY($1)) AND (topic0 = ANY($2))"; + let expected_arg_index = 3; + + let (actual_sql, actual_arg_index) = events_web3_dal.build_get_logs_where_clause(&filter); + + assert_eq!(actual_sql, expected_sql); + assert_eq!(actual_arg_index, expected_arg_index); + } +} diff --git a/core/lib/dal/src/explorer/contract_verification_dal.rs b/core/lib/dal/src/explorer/contract_verification_dal.rs index f1bb69bd19b8..9f80efc936f9 100644 --- a/core/lib/dal/src/explorer/contract_verification_dal.rs +++ b/core/lib/dal/src/explorer/contract_verification_dal.rs @@ -231,8 +231,8 @@ impl ContractVerificationDal<'_, '_> { async_std::task::block_on(async { let hashed_key = get_code_key(&address).hashed_key(); let result = sqlx::query!( - " - SELECT factory_deps.bytecode, transactions.data, transactions.contract_address + r#" + SELECT factory_deps.bytecode, transactions.data as "data?", transactions.contract_address as "contract_address?" FROM ( SELECT * FROM storage_logs WHERE storage_logs.hashed_key = $1 @@ -240,9 +240,9 @@ impl ContractVerificationDal<'_, '_> { LIMIT 1 ) storage_logs JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value - JOIN transactions ON transactions.hash = storage_logs.tx_hash + LEFT JOIN transactions ON transactions.hash = storage_logs.tx_hash WHERE storage_logs.value != $2 - ", + "#, hashed_key.as_bytes(), FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes() ) @@ -253,7 +253,9 @@ impl ContractVerificationDal<'_, '_> { Some(contract_address) if contract_address == CONTRACT_DEPLOYER_ADDRESS.0.to_vec() => { - let data: serde_json::Value = row.data; + // `row.contract_address` and `row.data` are either both `None` or both `Some(_)`. + // In this arm it's checked that `row.contract_address` is `Some(_)`, so it's safe to unwrap `row.data`. + let data: serde_json::Value = row.data.unwrap(); let calldata_str: String = serde_json::from_value(data.get("calldata").unwrap().clone()).unwrap(); let calldata = hex::decode(&calldata_str[2..]).unwrap(); @@ -363,4 +365,32 @@ impl ContractVerificationDal<'_, '_> { Ok(()) }) } + + pub fn get_all_successful_requests(&mut self) -> Result, SqlxError> { + async_std::task::block_on(async { + let result = sqlx::query!( + "SELECT * FROM contract_verification_requests + WHERE status = 'successful' + ORDER BY id", + ) + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| VerificationRequest { + id: row.id as usize, + req: VerificationIncomingRequest { + contract_address: Address::from_slice(&row.contract_address), + source_code_data: serde_json::from_str(&row.source_code).unwrap(), + contract_name: row.contract_name, + compiler_zksolc_version: row.compiler_zksolc_version, + compiler_solc_version: row.compiler_solc_version, + optimization_used: row.optimization_used, + constructor_arguments: row.constructor_arguments.into(), + is_system: row.is_system, + }, + }) + .collect(); + Ok(result) + }) + } } diff --git a/core/lib/dal/src/explorer/explorer_blocks_dal.rs b/core/lib/dal/src/explorer/explorer_blocks_dal.rs index 0de5365c4c48..22ad9de51873 100644 --- a/core/lib/dal/src/explorer/explorer_blocks_dal.rs +++ b/core/lib/dal/src/explorer/explorer_blocks_dal.rs @@ -4,7 +4,7 @@ use zksync_types::explorer_api::{ BlockDetails, BlockPageItem, BlocksQuery, L1BatchDetails, L1BatchPageItem, L1BatchesQuery, PaginationDirection, }; -use zksync_types::{L1BatchNumber, MiniblockNumber}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber}; use crate::models::storage_block::{ block_page_item_from_storage, l1_batch_page_item_from_storage, StorageBlockDetails, @@ -63,10 +63,11 @@ impl ExplorerBlocksDal<'_, '_> { pub fn get_block_details( &mut self, block_number: MiniblockNumber, + current_operator_address: Address, ) -> Result, SqlxError> { async_std::task::block_on(async { let started_at = Instant::now(); - let block_details: Option = sqlx::query_as!( + let storage_block_details: Option = sqlx::query_as!( StorageBlockDetails, r#" SELECT miniblocks.number, @@ -84,7 +85,8 @@ impl ExplorerBlocksDal<'_, '_> { miniblocks.l1_gas_price, miniblocks.l2_fair_gas_price, miniblocks.bootloader_code_hash, - miniblocks.default_aa_code_hash + miniblocks.default_aa_code_hash, + l1_batches.fee_account_address as "fee_account_address?" FROM miniblocks LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) @@ -97,7 +99,9 @@ impl ExplorerBlocksDal<'_, '_> { .fetch_optional(self.storage.conn()) .await?; metrics::histogram!("dal.request", started_at.elapsed(), "method" => "explorer_get_block_details"); - Ok(block_details.map(BlockDetails::from)) + Ok(storage_block_details.map(|storage_block_details| { + storage_block_details.into_block_details(current_operator_address) + })) }) } @@ -112,14 +116,14 @@ impl ExplorerBlocksDal<'_, '_> { PaginationDirection::Newer => (">", "ASC"), }; let cmp_str = if query.from.is_some() { - format!("WHERE l1_batches.number {} $3", cmp_sign) + format!("AND l1_batches.number {} $3", cmp_sign) } else { "".to_string() }; let sql_query_str = format!( " SELECT number, l1_tx_count, l2_tx_count, hash, timestamp FROM l1_batches - {} + WHERE l1_batches.hash IS NOT NULL {} ORDER BY l1_batches.number {} LIMIT $1 OFFSET $2 diff --git a/core/lib/dal/src/explorer/explorer_transactions_dal.rs b/core/lib/dal/src/explorer/explorer_transactions_dal.rs index bbe563ca28af..b6ead0b27847 100644 --- a/core/lib/dal/src/explorer/explorer_transactions_dal.rs +++ b/core/lib/dal/src/explorer/explorer_transactions_dal.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::time::Instant; use itertools::Itertools; use once_cell::sync::Lazy; @@ -29,15 +30,17 @@ pub struct ExplorerTransactionsDal<'a, 'c> { } impl ExplorerTransactionsDal<'_, '_> { - pub fn get_transactions_count_after( + pub fn get_transactions_count_between( &mut self, - block_number: MiniblockNumber, + from_block_number: MiniblockNumber, + to_block_number: MiniblockNumber, ) -> Result { async_std::task::block_on(async { let tx_count = sqlx::query!( r#"SELECT COUNT(*) as "count!" FROM transactions - WHERE miniblock_number > $1 AND miniblock_number IS NOT NULL"#, - block_number.0 as i64 + WHERE miniblock_number BETWEEN $1 AND $2"#, + from_block_number.0 as i64, + to_block_number.0 as i64, ) .fetch_one(self.storage.conn()) .await? @@ -56,6 +59,7 @@ impl ExplorerTransactionsDal<'_, '_> { StorageTransactionDetails, r#" SELECT transactions.*, miniblocks.hash as "block_hash?", + miniblocks.timestamp as "miniblock_timestamp?", commit_tx.tx_hash as "eth_commit_tx_hash?", prove_tx.tx_hash as "eth_prove_tx_hash?", execute_tx.tx_hash as "eth_execute_tx_hash?" @@ -162,6 +166,7 @@ impl ExplorerTransactionsDal<'_, '_> { let sql_query_list_str = format!( r#" SELECT transactions.*, miniblocks.hash as "block_hash", + miniblocks.timestamp as "miniblock_timestamp", commit_tx.tx_hash as eth_commit_tx_hash, prove_tx.tx_hash as eth_prove_tx_hash, execute_tx.tx_hash as eth_execute_tx_hash @@ -232,6 +237,7 @@ impl ExplorerTransactionsDal<'_, '_> { let sql_query_str = format!( r#" SELECT transactions.*, miniblocks.hash as "block_hash", + miniblocks.timestamp as "miniblock_timestamp", commit_tx.tx_hash as eth_commit_tx_hash, prove_tx.tx_hash as eth_prove_tx_hash, execute_tx.tx_hash as eth_execute_tx_hash @@ -266,6 +272,7 @@ impl ExplorerTransactionsDal<'_, '_> { max_total: usize, ) -> Result<(Vec>, usize), SqlxError> { async_std::task::block_on(async { + let started_at = Instant::now(); let (cmp_sign, order_str) = match pagination.direction { PaginationDirection::Older => ("<", "DESC"), PaginationDirection::Newer => (">", "ASC"), @@ -293,84 +300,122 @@ impl ExplorerTransactionsDal<'_, '_> { let mut padded_address = [0u8; 12].to_vec(); padded_address.extend_from_slice(account_address.as_bytes()); - let sql_query_str = format!( - " - SELECT tx_hash FROM ( - SELECT tx_hash, lag(tx_hash) OVER (ORDER BY miniblock_number {0}, tx_index_in_block {0}) as prev_hash, - miniblock_number, tx_index_in_block - FROM events - WHERE + // We query more events than `max_total`, so after deduplication we receive at least `max_total`. + let estimated_required_limit = max_total * 4; + + let mut started_at_stage = Instant::now(); + let hashes_transfer_from: Vec<(Vec, i64, i32)> = { + let sql_query_str = format!( + r#" + SELECT tx_hash, miniblock_number, tx_index_in_block FROM events + WHERE topic1 = $1 AND topic2 = $2 + {1} + ORDER BY miniblock_number {0}, tx_index_in_block {0} + LIMIT {2} + "#, + order_str, optional_filters, estimated_required_limit + ); + let sql_query = sqlx::query(&sql_query_str) + .bind(ERC20_TRANSFER_TOPIC.as_bytes().to_vec()) + .bind(padded_address.clone()); + sql_query + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| { ( - ( - ( - topic2 = $1 - OR - topic3 = $1 - ) - AND topic1 = $2 - AND (address IN (SELECT l2_address FROM tokens) OR address = $3) - ) - OR events.tx_initiator_address = $4 + row.get::, &str>("tx_hash"), + row.get::("miniblock_number"), + row.get::("tx_index_in_block"), ) - {1} - ) AS h - WHERE prev_hash IS NULL OR tx_hash != prev_hash - ORDER BY miniblock_number {0}, tx_index_in_block {0} - LIMIT {2} OFFSET {3} - ", - order_str, optional_filters, pagination.limit, pagination.offset - ); - let sql_query = sqlx::query(&sql_query_str) - .bind(padded_address.clone()) - .bind(ERC20_TRANSFER_TOPIC.as_bytes().to_vec()) - .bind(L2_ETH_TOKEN_ADDRESS.as_bytes().to_vec()) - .bind(account_address.as_bytes().to_vec()); - let hashes: Vec> = sql_query - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| row.get::, &str>("tx_hash")) - .collect(); + }) + .collect() + }; + metrics::histogram!("dal.request", started_at_stage.elapsed(), "method" => "get_hashes_transfer_from"); - let sql_count_query_str = format!( - r#" - SELECT COUNT(*) as "count" FROM ( - SELECT true FROM ( - SELECT tx_hash, lag(tx_hash) OVER (ORDER BY miniblock_number {0}, tx_index_in_block {0}) as prev_hash, - miniblock_number, tx_index_in_block - FROM events - WHERE + started_at_stage = Instant::now(); + let hashes_transfer_to: Vec<(Vec, i64, i32)> = { + let sql_query_str = format!( + r#" + SELECT tx_hash, miniblock_number, tx_index_in_block FROM events + WHERE topic1 = $1 AND topic3 = $2 + {1} + ORDER BY miniblock_number {0}, tx_index_in_block {0} + LIMIT {2} + "#, + order_str, optional_filters, estimated_required_limit + ); + let sql_query = sqlx::query(&sql_query_str) + .bind(ERC20_TRANSFER_TOPIC.as_bytes().to_vec()) + .bind(padded_address.clone()); + sql_query + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| { ( - ( - ( - topic2 = $1 - OR - topic3 = $1 - ) - AND topic1 = $2 - AND (address IN (SELECT l2_address FROM tokens) OR address = $3) - ) - OR events.tx_initiator_address = $4 + row.get::, &str>("tx_hash"), + row.get::("miniblock_number"), + row.get::("tx_index_in_block"), ) - {1} - ) AS h - WHERE prev_hash IS NULL OR tx_hash != prev_hash - ORDER BY miniblock_number {0}, tx_index_in_block {0} + }) + .collect() + }; + metrics::histogram!("dal.request", started_at_stage.elapsed(), "method" => "get_hashes_transfer_to"); + + started_at_stage = Instant::now(); + let hashes_initiated: Vec<(Vec, i64, i32)> = { + let sql_query_str = format!( + r#" + SELECT hash, miniblock_number, index_in_block FROM transactions + WHERE initiator_address = $1 AND miniblock_number IS NOT NULL + {1} + ORDER BY nonce {0} LIMIT {2} - ) AS c "#, - order_str, optional_filters, max_total - ); - let sql_count_query = sqlx::query(&sql_count_query_str) - .bind(padded_address) - .bind(ERC20_TRANSFER_TOPIC.as_bytes().to_vec()) - .bind(L2_ETH_TOKEN_ADDRESS.as_bytes().to_vec()) - .bind(account_address.as_bytes().to_vec()); - let total = sql_count_query - .fetch_one(self.storage.conn()) - .await? - .get::("count"); - Ok((hashes, total as usize)) + order_str, + optional_filters.replace("tx_index_in_block", "index_in_block"), + max_total + ); + let sql_query = + sqlx::query(&sql_query_str).bind(account_address.as_bytes().to_vec()); + sql_query + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| { + ( + row.get::, &str>("hash"), + row.get::("miniblock_number"), + row.get::("index_in_block"), + ) + }) + .collect() + }; + metrics::histogram!("dal.request", started_at_stage.elapsed(), "method" => "get_hashes_initiated"); + + let mut merged: Vec<_> = hashes_transfer_from + .into_iter() + .chain(hashes_transfer_to.into_iter()) + .chain(hashes_initiated.into_iter()) + .sorted_by(|(_, b1, i1), (_, b2, i2)| match pagination.direction { + PaginationDirection::Older => (b2, i2).cmp(&(b1, i1)), + PaginationDirection::Newer => (b1, i1).cmp(&(b2, i2)), + }) + .map(|(hash, _, _)| hash) + .collect(); + merged.dedup(); + + let total = merged.len(); + let result: Vec<_> = merged + .into_iter() + .skip(pagination.offset) + .take(pagination.limit) + .collect(); + + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_account_transactions_hashes_page"); + + Ok((result, total)) }) } diff --git a/core/lib/dal/src/gpu_prover_queue_dal.rs b/core/lib/dal/src/gpu_prover_queue_dal.rs index a4fc47aed885..083b4d0cb483 100644 --- a/core/lib/dal/src/gpu_prover_queue_dal.rs +++ b/core/lib/dal/src/gpu_prover_queue_dal.rs @@ -3,6 +3,7 @@ use std::time::Duration; use crate::time_utils::pg_interval_from_duration; use crate::StorageProcessor; +use std::collections::HashMap; #[derive(Debug)] pub struct GpuProverQueueDal<'a, 'c> { @@ -28,11 +29,12 @@ pub enum GpuProverInstanceStatus { } impl GpuProverQueueDal<'_, '_> { - pub fn get_free_prover_instance( + pub fn lock_available_prover( &mut self, processing_timeout: Duration, specialized_prover_group_id: u8, region: String, + zone: String, ) -> Option { async_std::task::block_on(async { let processing_timeout = pg_interval_from_duration(processing_timeout); @@ -42,11 +44,12 @@ impl GpuProverQueueDal<'_, '_> { SET instance_status = 'reserved', updated_at = now(), processing_started_at = now() - WHERE (instance_host, instance_port) in ( - SELECT instance_host, instance_port + WHERE id in ( + SELECT id FROM gpu_prover_queue WHERE specialized_prover_group_id=$2 AND region=$3 + AND zone=$4 AND ( instance_status = 'available' OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval) @@ -60,7 +63,8 @@ impl GpuProverQueueDal<'_, '_> { ", &processing_timeout, specialized_prover_group_id as i16, - region + region, + zone ) .fetch_optional(self.storage.conn()) .await @@ -80,19 +84,23 @@ impl GpuProverQueueDal<'_, '_> { queue_capacity: usize, specialized_prover_group_id: u8, region: String, + zone: String, + num_gpu: u8, ) { async_std::task::block_on(async { sqlx::query!( " - INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, created_at, updated_at) - VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, now(), now()) - ON CONFLICT(instance_host, instance_port, region) - DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, updated_at=now()", + INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, zone, num_gpu, created_at, updated_at) + VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, $6, $7, now(), now()) + ON CONFLICT(instance_host, instance_port, region, zone) + DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, zone=$6, num_gpu=$7, updated_at=now()", format!("{}",address.host), address.port as i32, queue_capacity as i32, specialized_prover_group_id as i16, - region) + region, + zone, + num_gpu as i16) .execute(self.storage.conn()) .await .unwrap(); @@ -104,6 +112,8 @@ impl GpuProverQueueDal<'_, '_> { address: SocketAddress, status: GpuProverInstanceStatus, queue_free_slots: usize, + region: String, + zone: String, ) { async_std::task::block_on(async { sqlx::query!( @@ -112,11 +122,15 @@ impl GpuProverQueueDal<'_, '_> { SET instance_status = $1, updated_at = now(), queue_free_slots = $4 WHERE instance_host = $2::text::inet AND instance_port = $3 + AND region = $5 + AND zone = $6 ", format!("{:?}", status).to_lowercase(), format!("{}", address.host), address.port as i32, queue_free_slots as i32, + region, + zone ) .execute(self.storage.conn()) .await @@ -128,6 +142,8 @@ impl GpuProverQueueDal<'_, '_> { &mut self, address: SocketAddress, queue_free_slots: usize, + region: String, + zone: String, ) { async_std::task::block_on(async { sqlx::query!( @@ -137,10 +153,14 @@ impl GpuProverQueueDal<'_, '_> { WHERE instance_host = $1::text::inet AND instance_port = $2 AND instance_status = 'full' + AND region = $4 + AND zone = $5 ", format!("{}", address.host), address.port as i32, - queue_free_slots as i32 + queue_free_slots as i32, + region, + zone ) .execute(self.storage.conn()) .await @@ -148,26 +168,21 @@ impl GpuProverQueueDal<'_, '_> { }) } - pub fn get_count_of_jobs_ready_for_processing(&mut self) -> u32 { + pub fn get_prover_gpu_count_per_region_zone(&mut self) -> HashMap<(String, String), u64> { async_std::task::block_on(async { sqlx::query!( r#" - SELECT MIN(count) as "count" - FROM (SELECT COALESCE(SUM(queue_free_slots), 0) as "count" - FROM gpu_prover_queue - where instance_status = 'available' - UNION - SELECT count(*) as "count" - from prover_jobs - where status = 'queued' - ) as t1; + SELECT region, zone, SUM(num_gpu) AS total_gpus + FROM gpu_prover_queue + GROUP BY region, zone "#, ) - .fetch_one(self.storage.conn()) + .fetch_all(self.storage.conn()) .await .unwrap() - .count - .unwrap() as u32 + .into_iter() + .map(|row| ((row.region, row.zone), row.total_gpus.unwrap() as u64)) + .collect() }) } } diff --git a/core/lib/dal/src/healthcheck.rs b/core/lib/dal/src/healthcheck.rs new file mode 100644 index 000000000000..dfc10c09d068 --- /dev/null +++ b/core/lib/dal/src/healthcheck.rs @@ -0,0 +1,25 @@ +use crate::ConnectionPool; +use zksync_health_check::{CheckHealth, CheckHealthStatus}; + +// HealthCheck used to verify if we can connect to the database. +// This guarantees that the app can use it's main "communication" channel. +// Used in the /health endpoint +#[derive(Clone, Debug)] +pub struct ConnectionPoolHealthCheck { + connection_pool: ConnectionPool, +} + +impl ConnectionPoolHealthCheck { + pub fn new(connection_pool: ConnectionPool) -> ConnectionPoolHealthCheck { + Self { connection_pool } + } +} + +impl CheckHealth for ConnectionPoolHealthCheck { + fn check_health(&self) -> CheckHealthStatus { + // This check is rather feeble, plan to make reliable here: + // https://linear.app/matterlabs/issue/PLA-255/revamp-db-connection-health-check + let _ = self.connection_pool.access_storage_blocking(); + CheckHealthStatus::Ready + } +} diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 90d099ac476b..d8cf27e7218f 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -13,8 +13,8 @@ pub use sqlx::types::BigDecimal; // Local imports use crate::blocks_dal::BlocksDal; use crate::blocks_web3_dal::BlocksWeb3Dal; -use crate::connection::holder::ConnectionHolder; pub use crate::connection::ConnectionPool; +use crate::connection::{holder::ConnectionHolder, test_pool::TestPoolLock}; use crate::eth_sender_dal::EthSenderDal; use crate::events_dal::EventsDal; use crate::events_web3_dal::EventsWeb3Dal; @@ -42,6 +42,7 @@ pub mod events_web3_dal; pub mod explorer; pub mod fee_monitor_dal; pub mod gpu_prover_queue_dal; +pub mod healthcheck; mod models; pub mod prover_dal; pub mod storage_dal; @@ -84,8 +85,13 @@ pub struct StorageProcessor<'a> { } impl<'a> StorageProcessor<'a> { + /// WARNING: this method is intentionally private. + /// `zksync_dal` crate uses `async-std` runtime, whereas most of our crates use `tokio`. + /// Calling `async-std` future from `tokio` context may cause deadlocks (and it did happen). + /// Use blocking counterpart instead. + /// /// Creates a `StorageProcessor` using an unique sole connection to the database. - pub async fn establish_connection(connect_to_master: bool) -> StorageProcessor<'static> { + async fn establish_connection(connect_to_master: bool) -> StorageProcessor<'static> { let database_url = if connect_to_master { get_master_database_url() } else { @@ -98,7 +104,16 @@ impl<'a> StorageProcessor<'a> { } } - pub async fn start_transaction<'c: 'b, 'b>(&'c mut self) -> StorageProcessor<'b> { + /// Creates a `StorageProcessor` using an unique sole connection to the database. + pub fn establish_connection_blocking(connect_to_master: bool) -> StorageProcessor<'static> { + block_on(Self::establish_connection(connect_to_master)) + } + + /// WARNING: this method is intentionally private. + /// `zksync_dal` crate uses `async-std` runtime, whereas most of our crates use `tokio`. + /// Calling `async-std` future from `tokio` context may cause deadlocks (and it did happen). + /// Use blocking counterpart instead. + async fn start_transaction<'c: 'b, 'b>(&'c mut self) -> StorageProcessor<'b> { let transaction = self.conn().begin().await.unwrap(); let mut processor = StorageProcessor::from_transaction(transaction); @@ -116,23 +131,26 @@ impl<'a> StorageProcessor<'a> { self.in_transaction } - pub fn from_transaction(conn: Transaction<'_, Postgres>) -> StorageProcessor<'_> { - StorageProcessor { + pub fn from_transaction(conn: Transaction<'a, Postgres>) -> Self { + Self { conn: ConnectionHolder::Transaction(conn), in_transaction: true, } } - pub fn from_test_transaction<'b>( - conn: &'b mut Transaction<'static, Postgres>, - ) -> StorageProcessor<'b> { + pub fn from_test_transaction(conn: TestPoolLock) -> StorageProcessor<'static> { StorageProcessor { conn: ConnectionHolder::TestTransaction(conn), in_transaction: true, } } - pub async fn commit(self) { + /// WARNING: this method is intentionally private. + /// `zksync_dal` crate uses `async-std` runtime, whereas most of our crates use `tokio`. + /// Calling `async-std` future from `tokio` context may cause deadlocks (and it did happen). + /// Use blocking counterpart instead. + /// + async fn commit(self) { if let ConnectionHolder::Transaction(transaction) = self.conn { transaction.commit().await.unwrap(); } else { @@ -159,7 +177,7 @@ impl<'a> StorageProcessor<'a> { ConnectionHolder::Pooled(conn) => conn, ConnectionHolder::Direct(conn) => conn, ConnectionHolder::Transaction(conn) => conn, - ConnectionHolder::TestTransaction(conn) => conn, + ConnectionHolder::TestTransaction(conn) => conn.as_connection(), } } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index d94bde4d073e..2941478269dd 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -1,18 +1,18 @@ -use bigdecimal::{BigDecimal, ToPrimitive}; -use sqlx::postgres::PgArguments; use std::convert::TryInto; use std::str::FromStr; -use thiserror::Error; -use zksync_types::explorer_api::{BlockDetails, L1BatchDetails, L1BatchPageItem}; +use bigdecimal::{BigDecimal, ToPrimitive}; +use sqlx::postgres::PgArguments; use sqlx::query::Query; - use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; use sqlx::Postgres; +use thiserror::Error; + use zksync_contracts::BaseSystemContractsHashes; use zksync_types::api::{self, BlockId}; use zksync_types::block::MiniblockHeader; use zksync_types::commitment::{BlockMetaParameters, BlockMetadata}; +use zksync_types::explorer_api::{BlockDetails, L1BatchDetails, L1BatchPageItem}; use zksync_types::{ block::L1BatchHeader, explorer_api::{BlockPageItem, BlockStatus}, @@ -272,42 +272,33 @@ pub fn l1_batch_page_item_from_storage( } } -/// Returns block_number SQL statement and the next argument index that can be used -pub fn web3_block_number_to_sql(block_number: api::BlockNumber, arg_index: u8) -> (String, u8) { +/// Returns block_number SQL statement +pub fn web3_block_number_to_sql(block_number: api::BlockNumber) -> String { match block_number { - api::BlockNumber::Earliest => ("(SELECT 0::bigint as number)".to_string(), arg_index), - api::BlockNumber::Pending => ( - "(SELECT (MAX(number) + 1) as number FROM miniblocks)".to_string(), - arg_index, - ), - api::BlockNumber::Latest => ( - "(SELECT MAX(number) as number FROM miniblocks)".to_string(), - arg_index, - ), - api::BlockNumber::Number(_) => { - (format!("(SELECT ${} as number)", arg_index), arg_index + 1) + api::BlockNumber::Earliest => 0.to_string(), + api::BlockNumber::Pending => { + "(SELECT (MAX(number) + 1) as number FROM miniblocks)".to_string() } - api::BlockNumber::Committed => ( - "(SELECT MAX(number) as number FROM miniblocks)".to_string(), - arg_index, - ), - api::BlockNumber::Finalized => ( - " + api::BlockNumber::Latest | api::BlockNumber::Committed => { + "(SELECT MAX(number) as number FROM miniblocks)".to_string() + } + api::BlockNumber::Number(block_number) => format!("{}", block_number), + api::BlockNumber::Finalized => " (SELECT COALESCE( ( - SELECT miniblocks.number FROM miniblocks - JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number - JOIN eth_txs ON l1_batches.eth_execute_tx_id = eth_txs.id - WHERE eth_txs.confirmed_eth_tx_history_id IS NOT NULL - ORDER BY miniblocks.number DESC - LIMIT 1 + SELECT MAX(number) FROM miniblocks + WHERE l1_batch_number = ( + SELECT MAX(number) FROM l1_batches + JOIN eth_txs ON + l1_batches.eth_execute_tx_id = eth_txs.id + WHERE + eth_txs.confirmed_eth_tx_history_id IS NOT NULL + ) ), 0 ) as number) " - .to_string(), - arg_index, - ), + .to_string(), } } @@ -315,7 +306,7 @@ pub fn web3_block_where_sql(block_id: BlockId, arg_index: u8) -> String { match block_id { BlockId::Hash(_) => format!("miniblocks.hash = ${}", arg_index), BlockId::Number(number) => { - let block_sql = web3_block_number_to_sql(number, arg_index).0; + let block_sql = web3_block_number_to_sql(number); format!("miniblocks.number = {}", block_sql) } } @@ -328,7 +319,6 @@ pub fn bind_block_where_sql_params( match block_id { // these block_id types result in `$1` in the query string, which we have to `bind` BlockId::Hash(block_hash) => query.bind(block_hash.0.to_vec()), - BlockId::Number(api::BlockNumber::Number(number)) => query.bind(number.as_u64() as i64), // others don't introduce `$1`, so we don't have to `bind` anything _ => query, } @@ -348,65 +338,67 @@ pub struct StorageBlockDetails { pub proven_at: Option, pub execute_tx_hash: Option, pub executed_at: Option, - pub l1_gas_price: i64, // L1 gas price assumed in the corresponding batch - pub l2_fair_gas_price: i64, // L2 gas price assumed in the corresponding batch + // L1 gas price assumed in the corresponding batch + pub l1_gas_price: i64, + // L2 gas price assumed in the corresponding batch + pub l2_fair_gas_price: i64, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub fee_account_address: Option>, // May be None if the block is not yet sealed } -impl From for BlockDetails { - fn from(storage_block_details: StorageBlockDetails) -> Self { - let status = if storage_block_details.number == 0 - || storage_block_details.execute_tx_hash.is_some() - { +impl StorageBlockDetails { + pub(crate) fn into_block_details(self, current_operator_address: Address) -> BlockDetails { + let status = if self.number == 0 || self.execute_tx_hash.is_some() { BlockStatus::Verified } else { BlockStatus::Sealed }; BlockDetails { - number: MiniblockNumber(storage_block_details.number as u32), - l1_batch_number: L1BatchNumber(storage_block_details.l1_batch_number as u32), - timestamp: storage_block_details.timestamp as u64, - l1_tx_count: storage_block_details.l1_tx_count as usize, - l2_tx_count: storage_block_details.l2_tx_count as usize, + number: MiniblockNumber(self.number as u32), + l1_batch_number: L1BatchNumber(self.l1_batch_number as u32), + timestamp: self.timestamp as u64, + l1_tx_count: self.l1_tx_count as usize, + l2_tx_count: self.l2_tx_count as usize, status, - root_hash: storage_block_details - .root_hash - .as_deref() - .map(H256::from_slice), - commit_tx_hash: storage_block_details + root_hash: self.root_hash.as_deref().map(H256::from_slice), + commit_tx_hash: self .commit_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect commit_tx hash")), - committed_at: storage_block_details + committed_at: self .committed_at .map(|committed_at| DateTime::::from_utc(committed_at, Utc)), - prove_tx_hash: storage_block_details + prove_tx_hash: self .prove_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect verify_tx hash")), - proven_at: storage_block_details + proven_at: self .proven_at .map(|proven_at| DateTime::::from_utc(proven_at, Utc)), - execute_tx_hash: storage_block_details + execute_tx_hash: self .execute_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect verify_tx hash")), - executed_at: storage_block_details + executed_at: self .executed_at .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), - l1_gas_price: storage_block_details.l1_gas_price as u64, - l2_fair_gas_price: storage_block_details.l2_fair_gas_price as u64, + l1_gas_price: self.l1_gas_price as u64, + l2_fair_gas_price: self.l2_fair_gas_price as u64, base_system_contracts_hashes: BaseSystemContractsHashes { - bootloader: storage_block_details + bootloader: self .bootloader_code_hash .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) .expect("Should be not none"), - default_aa: storage_block_details + default_aa: self .default_aa_code_hash .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) .expect("Should be not none"), }, + operator_address: self + .fee_account_address + .map(|fee_account_address| Address::from_slice(&fee_account_address)) + .unwrap_or(current_operator_address), } } } @@ -493,8 +485,10 @@ pub struct StorageMiniblockHeader { pub l1_tx_count: i32, pub l2_tx_count: i32, pub base_fee_per_gas: BigDecimal, - pub l1_gas_price: i64, // L1 gas price assumed in the corresponding batch - pub l2_fair_gas_price: i64, // L2 gas price assumed in the corresponding batch + pub l1_gas_price: i64, + // L1 gas price assumed in the corresponding batch + pub l2_fair_gas_price: i64, + // L2 gas price assumed in the corresponding batch pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, } @@ -523,3 +517,71 @@ impl From for MiniblockHeader { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_web3_block_number_to_sql_earliest() { + let sql = web3_block_number_to_sql(api::BlockNumber::Earliest); + assert_eq!(sql, 0.to_string()); + } + + #[test] + fn test_web3_block_number_to_sql_pending() { + let sql = web3_block_number_to_sql(api::BlockNumber::Pending); + assert_eq!( + sql, + "(SELECT (MAX(number) + 1) as number FROM miniblocks)".to_string() + ); + } + + #[test] + fn test_web3_block_number_to_sql_latest() { + let sql = web3_block_number_to_sql(api::BlockNumber::Latest); + assert_eq!( + sql, + "(SELECT MAX(number) as number FROM miniblocks)".to_string() + ); + } + + #[test] + fn test_web3_block_number_to_sql_committed() { + let sql = web3_block_number_to_sql(api::BlockNumber::Committed); + assert_eq!( + sql, + "(SELECT MAX(number) as number FROM miniblocks)".to_string() + ); + } + + #[test] + fn test_web3_block_number_to_sql_number() { + let sql = web3_block_number_to_sql(api::BlockNumber::Number(123.into())); + assert_eq!(sql, "123".to_string()); + } + + #[test] + fn test_web3_block_number_to_sql_finalized() { + let sql = web3_block_number_to_sql(api::BlockNumber::Finalized); + assert_eq!( + sql, + " + (SELECT COALESCE( + ( + SELECT MAX(number) FROM miniblocks + WHERE l1_batch_number = ( + SELECT MAX(number) FROM l1_batches + JOIN eth_txs ON + l1_batches.eth_execute_tx_id = eth_txs.id + WHERE + eth_txs.confirmed_eth_tx_history_id IS NOT NULL + ) + ), + 0 + ) as number) + " + .to_string() + ); + } +} diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 16ea89f4bf5b..0a5e4c5ed617 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -8,6 +8,7 @@ use sqlx::Row; use std::str::FromStr; use zksync_types::l2::TransactionType; use zksync_types::transaction_request::PaymasterParams; +use zksync_types::vm_trace::Call; use zksync_types::web3::types::U64; use zksync_types::{api, explorer_api, L2_ETH_TOKEN_ADDRESS}; use zksync_types::{ @@ -91,6 +92,7 @@ pub struct StorageTransactionDetails { pub l1_batch_tx_index: Option, pub l1_batch_number: Option, pub miniblock_number: Option, + pub miniblock_timestamp: Option, pub block_hash: Option>, pub index_in_block: Option, pub error: Option, @@ -133,26 +135,17 @@ impl From for api::TransactionDetails { fn from(tx_details: StorageTransactionDetails) -> Self { let status = tx_details.get_transaction_status(); - let fee = if tx_details.is_priority { - let full_fee_string = tx_details - .full_fee - .expect("full fee is mandatory for priority operation") - .to_string(); + let effective_gas_price = + bigdecimal_to_u256(tx_details.effective_gas_price.clone().unwrap_or_default()); - U256::from_dec_str(&full_fee_string) - .unwrap_or_else(|_| panic!("Incorrect full fee value in DB {}", full_fee_string)) - } else { - let effective_gas_price = - bigdecimal_to_u256(tx_details.effective_gas_price.clone().unwrap_or_default()); - - let gas_limit = bigdecimal_to_u256( - tx_details - .gas_limit - .clone() - .expect("gas limit is mandatory for transaction"), - ); - gas_limit * effective_gas_price - }; + let gas_limit = bigdecimal_to_u256( + tx_details + .gas_limit + .clone() + .expect("gas limit is mandatory for transaction"), + ); + let gas_refunded = U256::from(tx_details.refunded_gas as u32); + let fee = (gas_limit - gas_refunded) * effective_gas_price; let initiator_address = H160::from_slice(tx_details.initiator_address.as_slice()); let received_at = DateTime::::from_utc(tx_details.received_at, Utc); @@ -480,6 +473,7 @@ pub fn transaction_details_from_storage( let block_number = tx_details .miniblock_number .map(|number| MiniblockNumber(number as u32)); + let miniblock_timestamp = tx_details.miniblock_timestamp.map(|number| number as u64); let l1_batch_number = tx_details .l1_batch_number .map(|number| L1BatchNumber(number as u32)); @@ -539,12 +533,7 @@ pub fn transaction_details_from_storage( let effective_gas_price = bigdecimal_to_u256(storage_tx.effective_gas_price.clone().unwrap_or_default()); let tx: Transaction = storage_tx.into(); - let fee = match &tx.common_data { - ExecuteTransactionCommon::L1(data) => data.full_fee, - ExecuteTransactionCommon::L2(data) => { - (data.fee.gas_limit - tx_details.refunded_gas) * effective_gas_price - } - }; + let fee = (tx.gas_limit() - tx_details.refunded_gas) * effective_gas_price; let tx_type = tx.tx_format(); @@ -569,13 +558,16 @@ pub fn transaction_details_from_storage( to, amount: withdraw.amount, }; - let elem_to_remove = transfer_changes - .iter() - .find_position(|event| event == &&burn_event_to_remove); + let elem_to_remove = transfer_changes.iter().find_position(|event| { + event.token_info.l2_address == burn_event_to_remove.token_info.l2_address + && event.from == burn_event_to_remove.from + && event.to == burn_event_to_remove.to + && event.amount == burn_event_to_remove.amount + }); if let Some(idx_to_remove) = elem_to_remove { transfer_changes.remove(idx_to_remove.0); } else { - vlog::error!( + vlog::warn!( "Burn event for withdrawal must be present, tx hash: {:?}", transaction_hash ); @@ -590,13 +582,16 @@ pub fn transaction_details_from_storage( to: deposit.to, amount: deposit.amount, }; - let elem_to_remove = transfer_changes - .iter() - .find_position(|event| event == &&mint_event_to_remove); + let elem_to_remove = transfer_changes.iter().find_position(|event| { + event.token_info.l2_address == mint_event_to_remove.token_info.l2_address + && event.from == mint_event_to_remove.from + && event.to == mint_event_to_remove.to + && event.amount == mint_event_to_remove.amount + }); if let Some(idx_to_remove) = elem_to_remove { transfer_changes.remove(idx_to_remove.0); } else { - vlog::error!( + vlog::warn!( "Mint event for deposit must be present, tx hash: {:?}", transaction_hash ); @@ -641,6 +636,7 @@ pub fn transaction_details_from_storage( index_in_block, initiator_address, received_at, + miniblock_timestamp, eth_commit_tx_hash, eth_prove_tx_hash, eth_execute_tx_hash, @@ -650,3 +646,15 @@ pub fn transaction_details_from_storage( r#type: tx_type as u32, } } + +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct CallTrace { + pub tx_hash: Vec, + pub call_trace: Vec, +} + +impl From for Call { + fn from(call_trace: CallTrace) -> Self { + bincode::deserialize(&call_trace.call_trace).unwrap() + } +} diff --git a/core/lib/dal/src/prover_dal.rs b/core/lib/dal/src/prover_dal.rs index 1c18bdc00b19..fa2414653268 100644 --- a/core/lib/dal/src/prover_dal.rs +++ b/core/lib/dal/src/prover_dal.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use std::convert::{TryFrom, TryInto}; use std::ops::Range; use std::time::{Duration, Instant}; -use zksync_object_store::gcs_utils::prover_circuit_input_blob_url; + use zksync_types::aggregated_operations::BlockProofForL1; use zksync_types::proofs::{ AggregationRound, JobCountStatistics, JobExtendedStatistics, ProverJobInfo, ProverJobMetadata, @@ -21,13 +21,8 @@ pub struct ProverDal<'a, 'c> { } impl ProverDal<'_, '_> { - pub fn get_next_prover_job( - &mut self, - _processing_timeout: Duration, - max_attempts: u32, - ) -> Option { + pub fn get_next_prover_job(&mut self) -> Option { async_std::task::block_on(async { - let processing_timeout = pg_interval_from_duration(_processing_timeout); let result: Option = sqlx::query!( " UPDATE prover_jobs @@ -36,10 +31,7 @@ impl ProverDal<'_, '_> { WHERE id = ( SELECT id FROM prover_jobs - WHERE status = 'queued' - OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) - OR (status = 'in_gpu_proof' AND processing_started_at < now() - $1::interval) - OR (status = 'failed' AND attempts < $2) + WHERE status = 'queued' ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC LIMIT 1 FOR UPDATE @@ -47,8 +39,6 @@ impl ProverDal<'_, '_> { ) RETURNING prover_jobs.* ", - &processing_timeout, - max_attempts as i32 ) .fetch_optional(self.storage.conn()) .await @@ -88,12 +78,9 @@ impl ProverDal<'_, '_> { pub fn get_next_prover_job_by_circuit_types( &mut self, - processing_timeout: Duration, - max_attempts: u32, circuit_types: Vec, ) -> Option { async_std::task::block_on(async { - let processing_timeout = pg_interval_from_duration(processing_timeout); let result: Option = sqlx::query!( " UPDATE prover_jobs @@ -102,13 +89,8 @@ impl ProverDal<'_, '_> { WHERE id = ( SELECT id FROM prover_jobs - WHERE circuit_type = ANY($3) - AND - ( status = 'queued' - OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) - OR (status = 'in_gpu_proof' AND processing_started_at < now() - $1::interval) - OR (status = 'failed' AND attempts < $2) - ) + WHERE circuit_type = ANY($1) + AND status = 'queued' ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC LIMIT 1 FOR UPDATE @@ -116,20 +98,18 @@ impl ProverDal<'_, '_> { ) RETURNING prover_jobs.* ", - &processing_timeout, - max_attempts as i32, &circuit_types[..], ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| ProverJobMetadata { - id: row.id as u32, - block_number: L1BatchNumber(row.l1_batch_number as u32), - circuit_type: row.circuit_type, - aggregation_round: AggregationRound::try_from(row.aggregation_round).unwrap(), - sequence_number: row.sequence_number as usize, - }); + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| ProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_type: row.circuit_type, + aggregation_round: AggregationRound::try_from(row.aggregation_round).unwrap(), + sequence_number: row.sequence_number as usize, + }); result }) @@ -139,18 +119,13 @@ impl ProverDal<'_, '_> { pub fn insert_prover_jobs( &mut self, l1_batch_number: L1BatchNumber, - circuits: Vec, + circuit_types_and_urls: Vec<(&'static str, String)>, aggregation_round: AggregationRound, ) { async_std::task::block_on(async { let started_at = Instant::now(); - for (sequence_number, circuit) in circuits.into_iter().enumerate() { - let circuit_input_blob_url = prover_circuit_input_blob_url( - l1_batch_number, - sequence_number, - circuit.clone(), - aggregation_round, - ); + let it = circuit_types_and_urls.into_iter().enumerate(); + for (sequence_number, (circuit, circuit_input_blob_url)) in it { sqlx::query!( " INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, status, created_at, updated_at) @@ -164,9 +139,10 @@ impl ProverDal<'_, '_> { aggregation_round as i64, circuit_input_blob_url ) - .execute(self.storage.conn()) - .await - .unwrap(); + .execute(self.storage.conn()) + .await + .unwrap(); + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_witness"); } }) @@ -200,15 +176,6 @@ impl ProverDal<'_, '_> { }) } - pub fn lock_prover_jobs_table_exclusive(&mut self) { - async_std::task::block_on(async { - sqlx::query!("LOCK TABLE prover_jobs IN EXCLUSIVE MODE") - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } - pub fn save_proof_error(&mut self, id: u32, error: String, max_attempts: u32) { async_std::task::block_on(async { let mut transaction = self.storage.start_transaction().await; @@ -237,6 +204,34 @@ impl ProverDal<'_, '_> { }) } + pub fn requeue_stuck_jobs( + &mut self, + processing_timeout: Duration, + max_attempts: u32, + ) -> Vec { + let processing_timeout = pg_interval_from_duration(processing_timeout); + async_std::task::block_on(async { + sqlx::query!( + " + UPDATE prover_jobs + SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() + WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'failed' AND attempts < $2) + RETURNING id, status, attempts + ", + &processing_timeout, + max_attempts as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckProverJobs{id: row.id as u64, status: row.status, attempts: row.attempts as u64}) + .collect() + }) + } + // For each block in the provided range it returns a tuple: // (aggregation_coords; scheduler_proof) pub fn get_final_proofs_for_blocks( @@ -253,7 +248,6 @@ impl ProverDal<'_, '_> { WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2 AND prover_jobs.aggregation_round = 3 AND prover_jobs.status = 'successful' - AND scheduler_witness_jobs.status = 'successful' ", from_block.0 as i32, to_block.0 as i32 @@ -337,45 +331,47 @@ impl ProverDal<'_, '_> { }) } - pub fn successful_proofs_count( - &mut self, - block_number: L1BatchNumber, - aggregation_round: AggregationRound, - ) -> usize { + pub fn min_unproved_l1_batch_number(&mut self) -> Option { async_std::task::block_on(async { sqlx::query!( r#" - SELECT COUNT(*) as "count!" - FROM prover_jobs - WHERE status = 'successful' AND l1_batch_number = $1 AND aggregation_round = $2 - "#, - block_number.0 as i64, - aggregation_round as i64 + SELECT MIN(l1_batch_number) as "l1_batch_number?" FROM ( + SELECT MIN(l1_batch_number) as "l1_batch_number" + FROM prover_jobs + WHERE status = 'successful' OR aggregation_round < 3 + GROUP BY l1_batch_number + HAVING MAX(aggregation_round) < 3 + ) as inn + "# ) .fetch_one(self.storage.conn()) .await .unwrap() - .count as usize + .l1_batch_number + .map(|n| L1BatchNumber(n as u32)) }) } - pub fn min_unproved_l1_batch_number(&mut self, max_attempts: u32) -> Option { + pub fn min_unproved_l1_batch_number_by_basic_circuit_type( + &mut self, + ) -> Vec<(String, L1BatchNumber)> { async_std::task::block_on(async { sqlx::query!( r#" - SELECT MIN(l1_batch_number) as "l1_batch_number?" - FROM prover_jobs - WHERE status = 'queued' OR status = 'in_progress' - OR status = 'in_gpu_proof' - OR (status = 'failed' AND attempts < $1) - "#, - max_attempts as i32 + SELECT MIN(l1_batch_number) as "l1_batch_number!", circuit_type + FROM prover_jobs + WHERE aggregation_round = 0 AND (status = 'queued' OR status = 'in_progress' + OR status = 'in_gpu_proof' + OR status = 'failed') + GROUP BY circuit_type + "# ) - .fetch_one(self.storage.conn()) + .fetch_all(self.storage.conn()) .await .unwrap() - .l1_batch_number - .map(|n| L1BatchNumber(n as u32)) + .into_iter() + .map(|row| (row.circuit_type, L1BatchNumber(row.l1_batch_number as u32))) + .collect() }) } @@ -598,3 +594,10 @@ impl GetProverJobsParams { } } } + +#[derive(Debug)] +pub struct StuckProverJobs { + pub id: u64, + pub status: String, + pub attempts: u64, +} diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index 34716eca2f7f..2414abc8f515 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -5,10 +5,9 @@ use std::time::Instant; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_types::{ vm_trace::ContractSourceDebugInfo, Address, MiniblockNumber, StorageKey, StorageLog, - StorageValue, ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, - U256, + StorageValue, H256, U256, }; -use zksync_utils::{bytes_to_be_words, bytes_to_chunks, h256_to_account_address}; +use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; #[derive(Debug)] pub struct StorageDal<'a, 'c> { @@ -67,7 +66,7 @@ impl StorageDal<'_, '_> { async_std::task::block_on(async { let bootloader_bytecode = self .get_factory_dep(bootloader_hash) - .expect("Bootloader code should be presented in the database"); + .expect("Bootloader code should be present in the database"); let bootloader_code = SystemContractCode { code: bytes_to_be_words(bootloader_bytecode), hash: bootloader_hash, @@ -75,7 +74,7 @@ impl StorageDal<'_, '_> { let default_aa_bytecode = self .get_factory_dep(default_aa_hash) - .expect("Default account code should be presented in the database"); + .expect("Default account code should be present in the database"); let default_aa_code = SystemContractCode { code: bytes_to_be_words(default_aa_bytecode), @@ -110,33 +109,6 @@ impl StorageDal<'_, '_> { }) } - pub fn get_contracts_for_revert(&mut self, block_number: MiniblockNumber) -> Vec
{ - async_std::task::block_on(async { - sqlx::query!( - " - SELECT key - FROM storage_logs - WHERE address = $1 AND miniblock_number > $2 AND NOT EXISTS ( - SELECT 1 FROM storage_logs as s - WHERE - s.hashed_key = storage_logs.hashed_key AND - (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND - s.value = $3 - ) - ", - ACCOUNT_CODE_STORAGE_ADDRESS.as_bytes(), - block_number.0 as i64, - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes() - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| h256_to_account_address(&H256::from_slice(&row.key))) - .collect() - }) - } - pub fn get_factory_deps_for_revert(&mut self, block_number: MiniblockNumber) -> Vec { async_std::task::block_on(async { sqlx::query!( diff --git a/core/lib/dal/src/storage_load_dal.rs b/core/lib/dal/src/storage_load_dal.rs index 16b382526a30..8f9a7a75a4c8 100644 --- a/core/lib/dal/src/storage_load_dal.rs +++ b/core/lib/dal/src/storage_load_dal.rs @@ -6,7 +6,6 @@ use zksync_types::{ AccountTreeId, Address, L1BatchNumber, StorageKey, StorageLog, ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, }; -use zksync_utils::h256_to_account_address; #[derive(Debug)] pub struct StorageLoadDal<'a, 'c> { @@ -70,38 +69,6 @@ impl StorageLoadDal<'_, '_> { "loading deployed contracts for l1 batch {}", current_l1_batch_number ); - sqlx::query!( - " - SELECT storage_logs.key, factory_deps.bytecode - FROM storage_logs - JOIN factory_deps ON storage_logs.value = factory_deps.bytecode_hash - WHERE - storage_logs.address = $1 AND - storage_logs.miniblock_number >= $3 AND - storage_logs.miniblock_number <= $4 AND - NOT EXISTS ( - SELECT 1 FROM storage_logs as s - WHERE - s.hashed_key = storage_logs.hashed_key AND - (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND - s.value = $2 - ) - ", - ACCOUNT_CODE_STORAGE_ADDRESS.as_bytes(), - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), - from_miniblock_number.0 as i64, - to_miniblock_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .for_each(|row| { - result.store_contract( - h256_to_account_address(&H256::from_slice(&row.key)), - row.bytecode, - ) - }); vlog::debug!( "loading factory deps for l1 batch {}", diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index bb2c83ab5ffb..8bcf159a6494 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -1,8 +1,9 @@ use crate::StorageProcessor; use sqlx::types::chrono::Utc; +use std::collections::HashMap; use zksync_types::{ - get_code_key, Address, MiniblockNumber, StorageLog, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, - H256, + get_code_key, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, + FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, }; #[derive(Debug)] @@ -197,4 +198,153 @@ impl StorageLogsDal<'_, '_> { count > 0 }) } + + pub fn get_touched_slots_for_l1_batch( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> HashMap { + async_std::task::block_on(async { + let storage_logs = sqlx::query!( + " + SELECT address, key, value + FROM storage_logs + WHERE miniblock_number BETWEEN (SELECT MIN(number) FROM miniblocks WHERE l1_batch_number = $1) + AND (SELECT MAX(number) FROM miniblocks WHERE l1_batch_number = $1) + ORDER BY miniblock_number, operation_number + ", + l1_batch_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + let mut touched_slots = HashMap::new(); + for storage_log in storage_logs.into_iter() { + touched_slots.insert( + StorageKey::new( + AccountTreeId::new(Address::from_slice(&storage_log.address)), + H256::from_slice(&storage_log.key), + ), + H256::from_slice(&storage_log.value), + ); + } + touched_slots + }) + } + + pub fn get_storage_logs_for_revert( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> Vec<(H256, Option)> { + async_std::task::block_on(async { + let miniblock_number = match self + .storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(l1_batch_number) + { + None => return Vec::new(), + Some((_, number)) => number, + }; + + vlog::info!("fetching keys that were changed after given block number"); + let modified_keys: Vec = sqlx::query!( + "SELECT DISTINCT ON (hashed_key) hashed_key FROM + (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn", + miniblock_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| H256::from_slice(&row.hashed_key)) + .collect(); + vlog::info!("loaded {:?} keys", modified_keys.len()); + + let mut result: Vec<(H256, Option)> = vec![]; + + for key in modified_keys { + let initially_written_at: Option = sqlx::query!( + " + SELECT l1_batch_number FROM initial_writes + WHERE hashed_key = $1 + ", + key.as_bytes(), + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + match initially_written_at { + // Key isn't written to the storage - nothing to rollback. + None => continue, + // Key was initially written, it's needed to remove it. + Some(initially_written_at) if initially_written_at > l1_batch_number => { + result.push((key, None)); + } + // Key was rewritten, it's needed to restore the previous value. + Some(_) => { + let previous_value: Vec = sqlx::query!( + " + SELECT value FROM storage_logs + WHERE hashed_key = $1 AND miniblock_number <= $2 + ORDER BY miniblock_number DESC, operation_number DESC + LIMIT 1 + ", + key.as_bytes(), + miniblock_number.0 as i64 + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .value; + result.push((key, Some(H256::from_slice(&previous_value)))); + } + } + if result.len() % 1000 == 0 { + vlog::info!("processed {:?} values", result.len()); + } + } + + result + }) + } + + pub fn get_previous_storage_values( + &mut self, + hashed_keys: Vec, + l1_batch_number: L1BatchNumber, + ) -> HashMap { + async_std::task::block_on(async { + let hashed_keys: Vec<_> = hashed_keys.into_iter().map(|key| key.0.to_vec()).collect(); + let (miniblock_number, _) = self + .storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(l1_batch_number) + .unwrap(); + sqlx::query!( + r#" + SELECT u.hashed_key as "hashed_key!", + (SELECT value FROM storage_logs + WHERE hashed_key = u.hashed_key AND miniblock_number < $2 + ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as "value?" + FROM UNNEST($1::bytea[]) AS u(hashed_key) + "#, + &hashed_keys, + miniblock_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + ( + H256::from_slice(&row.hashed_key), + row.value + .map(|value| H256::from_slice(&value)) + .unwrap_or_else(H256::zero), + ) + }) + .collect() + }) + } } diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index b4f05c540f3f..d54a2e2970c2 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,6 +1,6 @@ use crate::StorageProcessor; use sqlx::types::chrono::Utc; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use vm::zk_evm::aux_structures::LogQuery; use zksync_types::{AccountTreeId, Address, L1BatchNumber, StorageKey, H256}; use zksync_utils::u256_to_h256; @@ -11,65 +11,6 @@ pub struct StorageLogsDedupDal<'a, 'c> { } impl StorageLogsDedupDal<'_, '_> { - pub fn insert_storage_logs(&mut self, block_number: L1BatchNumber, logs: &[LogQuery]) { - async_std::task::block_on(async { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY storage_logs_dedup (hashed_key, address, key, value_read, value_written, operation_number, is_write, l1_batch_number, created_at) - FROM STDIN WITH (DELIMITER '|')", - ) - .await - .unwrap(); - - let mut bytes: Vec = Vec::new(); - let now = Utc::now().naive_utc().to_string(); - for (operation_number, log) in logs.iter().enumerate() { - let hashed_key_str = format!( - "\\\\x{}", - hex::encode(StorageKey::raw_hashed_key( - &log.address, - &u256_to_h256(log.key) - )) - ); - let address_str = format!("\\\\x{}", hex::encode(log.address.0)); - let key_str = format!("\\\\x{}", hex::encode(u256_to_h256(log.key).0)); - let read_value_str = - format!("\\\\x{}", hex::encode(u256_to_h256(log.read_value).0)); - let written_value_str = - format!("\\\\x{}", hex::encode(u256_to_h256(log.written_value).0)); - let row = format!( - "{}|{}|{}|{}|{}|{}|{}|{}|{}\n", - hashed_key_str, - address_str, - key_str, - read_value_str, - written_value_str, - operation_number, - log.rw_flag, - block_number, - now - ); - bytes.extend_from_slice(row.as_bytes()); - } - copy.send(bytes).await.unwrap(); - copy.finish().await.unwrap(); - }) - } - - pub fn rollback_storage_logs(&mut self, block_number: L1BatchNumber) { - async_std::task::block_on(async { - sqlx::query!( - "DELETE FROM storage_logs_dedup WHERE l1_batch_number > $1", - block_number.0 as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } - pub fn insert_protective_reads( &mut self, l1_batch_number: L1BatchNumber, @@ -155,153 +96,4 @@ impl StorageLogsDedupDal<'_, '_> { .collect() }) } - - pub fn get_touched_slots_for_l1_batch( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> HashMap { - async_std::task::block_on(async { - let storage_logs = sqlx::query!( - " - SELECT address, key, value - FROM storage_logs - WHERE miniblock_number BETWEEN (SELECT MIN(number) FROM miniblocks WHERE l1_batch_number = $1) - AND (SELECT MAX(number) FROM miniblocks WHERE l1_batch_number = $1) - ORDER BY miniblock_number, operation_number - ", - l1_batch_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - - let mut touched_slots = HashMap::new(); - for storage_log in storage_logs.into_iter() { - touched_slots.insert( - StorageKey::new( - AccountTreeId::new(Address::from_slice(&storage_log.address)), - H256::from_slice(&storage_log.key), - ), - H256::from_slice(&storage_log.value), - ); - } - touched_slots - }) - } - - pub fn get_storage_logs_for_revert( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> Vec<(H256, Option)> { - async_std::task::block_on(async { - let miniblock_number = match self - .storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(l1_batch_number) - { - None => return Vec::new(), - Some((_, number)) => number, - }; - - vlog::info!("fetching keys that were changed after given block number"); - let modified_keys: Vec = sqlx::query!( - "SELECT DISTINCT ON (hashed_key) hashed_key FROM - (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn", - miniblock_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| H256::from_slice(&row.hashed_key)) - .collect(); - vlog::info!("loaded {:?} keys", modified_keys.len()); - - let mut result: Vec<(H256, Option)> = vec![]; - - for key in modified_keys { - let initially_written_at: Option = sqlx::query!( - " - SELECT l1_batch_number FROM initial_writes - WHERE hashed_key = $1 - ", - key.as_bytes(), - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - match initially_written_at { - // Key isn't written to the storage - nothing to rollback. - None => continue, - // Key was initially written, it's needed to remove it. - Some(initially_written_at) if initially_written_at > l1_batch_number => { - result.push((key, None)); - } - // Key was rewritten, it's needed to restore the previous value. - Some(_) => { - let previous_value: Vec = sqlx::query!( - " - SELECT value FROM storage_logs - WHERE hashed_key = $1 AND miniblock_number <= $2 - ORDER BY miniblock_number DESC, operation_number DESC - LIMIT 1 - ", - key.as_bytes(), - miniblock_number.0 as i64 - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .value; - result.push((key, Some(H256::from_slice(&previous_value)))); - } - } - if result.len() % 1000 == 0 { - vlog::info!("processed {:?} values", result.len()); - } - } - - result - }) - } - - pub fn get_previous_storage_values( - &mut self, - hashed_keys: Vec, - l1_batch_number: L1BatchNumber, - ) -> HashMap { - async_std::task::block_on(async { - let hashed_keys: Vec<_> = hashed_keys.into_iter().map(|key| key.0.to_vec()).collect(); - let (miniblock_number, _) = self - .storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(l1_batch_number) - .unwrap(); - sqlx::query!( - r#" - SELECT u.hashed_key as "hashed_key!", - (SELECT value FROM storage_logs - WHERE hashed_key = u.hashed_key AND miniblock_number < $2 - ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as "value?" - FROM UNNEST($1::bytea[]) AS u(hashed_key) - "#, - &hashed_keys, - miniblock_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| { - ( - H256::from_slice(&row.hashed_key), - row.value - .map(|value| H256::from_slice(&value)) - .unwrap_or_else(H256::zero), - ) - }) - .collect() - }) - } } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 6f9947f82c71..08dda6980954 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -1,3 +1,4 @@ +use std::fs; use std::time::Duration; use db_test_macro::db_test; @@ -15,10 +16,12 @@ use zksync_types::{ }; use crate::blocks_dal::BlocksDal; +use crate::connection::ConnectionPool; use crate::prover_dal::{GetProverJobsParams, ProverDal}; use crate::transactions_dal::L2TxSubmissionResult; use crate::transactions_dal::TransactionsDal; use crate::transactions_web3_dal::TransactionsWeb3Dal; +use crate::witness_generator_dal::WitnessGeneratorDal; fn mock_tx_execution_metrics() -> TransactionExecutionMetrics { TransactionExecutionMetrics::default() @@ -125,8 +128,6 @@ async fn workflow_with_submit_tx_diff_hashes(connection_pool: ConnectionPool) { async fn remove_stuck_txs(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; let mut transactions_dal = TransactionsDal { storage }; - let storage = &mut connection_pool.access_test_storage().await; - let mut blocks_dal = BlocksDal { storage }; // Stuck tx let mut tx = mock_l2_transaction(); @@ -152,7 +153,8 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { let txs = transactions_dal.sync_mempool(vec![], vec![], 0, 0, 1000).0; assert_eq!(txs.len(), 4); - blocks_dal.insert_miniblock(MiniblockHeader { + let storage = transactions_dal.storage; + BlocksDal { storage }.insert_miniblock(MiniblockHeader { number: MiniblockNumber(1), timestamp: 0, hash: Default::default(), @@ -163,6 +165,8 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { l2_fair_gas_price: 0, base_system_contracts_hashes: Default::default(), }); + + let mut transactions_dal = TransactionsDal { storage }; transactions_dal.mark_txs_as_executed_in_miniblock( MiniblockNumber(1), &[TransactionExecutionResult { @@ -173,6 +177,8 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { refunded_gas: 0, operator_suggested_refund: 0, compressed_bytecodes: vec![], + call_traces: vec![], + revert_reason: None, }], U256::from(1), ); @@ -190,7 +196,7 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { assert_eq!(txs.len(), 2); // We shouldn't collect executed tx - let storage = &mut connection_pool.access_test_storage().await; + let storage = transactions_dal.storage; let mut transactions_web3_dal = TransactionsWeb3Dal { storage }; transactions_web3_dal .get_transaction_receipt(executed_tx.hash()) @@ -198,6 +204,21 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { .unwrap(); } +fn create_circuits() -> Vec<(&'static str, String)> { + vec![ + ("Main VM", "1_0_Main VM_BasicCircuits.bin".to_owned()), + ("SHA256", "1_1_SHA256_BasicCircuits.bin".to_owned()), + ( + "Code decommitter", + "1_2_Code decommitter_BasicCircuits.bin".to_owned(), + ), + ( + "Log demuxer", + "1_3_Log demuxer_BasicCircuits.bin".to_owned(), + ), + ] +} + #[db_test(dal_crate)] async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; @@ -213,12 +234,7 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { .insert_l1_batch(header, Default::default()); let mut prover_dal = ProverDal { storage }; - let circuits: Vec = vec![ - "Main VM".to_string(), - "SHA256".to_string(), - "Code decommitter".to_string(), - "Log demuxer".to_string(), - ]; + let circuits = create_circuits(); let l1_batch_number = L1BatchNumber(block_number); prover_dal.insert_prover_jobs( l1_batch_number, @@ -246,3 +262,226 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { let jobs = prover_dal.get_jobs(prover_jobs_params).unwrap(); assert_eq!(circuits.len(), jobs.len()); } + +#[db_test(dal_crate)] +async fn test_requeue_prover_jobs(connection_pool: ConnectionPool) { + let storage = &mut connection_pool.access_test_storage().await; + let block_number = 1; + let header = L1BatchHeader::new( + L1BatchNumber(block_number), + 0, + Default::default(), + Default::default(), + ); + storage + .blocks_dal() + .insert_l1_batch(header, Default::default()); + + let mut prover_dal = ProverDal { storage }; + let circuits = create_circuits(); + let l1_batch_number = L1BatchNumber(block_number); + prover_dal.insert_prover_jobs(l1_batch_number, circuits, AggregationRound::BasicCircuits); + + // take all jobs from prover_job table + for _ in 1..=4 { + let job = prover_dal.get_next_prover_job(); + assert!(job.is_some()); + } + let job = prover_dal.get_next_prover_job(); + assert!(job.is_none()); + // re-queue jobs + let stuck_jobs = prover_dal.requeue_stuck_jobs(Duration::from_secs(0), 10); + assert_eq!(4, stuck_jobs.len()); + // re-check that all jobs can be taken again + for _ in 1..=4 { + let job = prover_dal.get_next_prover_job(); + assert!(job.is_some()); + } +} + +#[db_test(dal_crate)] +async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: ConnectionPool) { + let storage = &mut connection_pool.access_test_storage().await; + let block_number = 1; + let header = L1BatchHeader::new( + L1BatchNumber(block_number), + 0, + Default::default(), + Default::default(), + ); + storage + .blocks_dal() + .insert_l1_batch(header, Default::default()); + + let mut prover_dal = ProverDal { storage }; + let circuits = create_circuits(); + let l1_batch_number = L1BatchNumber(block_number); + prover_dal.insert_prover_jobs( + l1_batch_number, + circuits.clone(), + AggregationRound::BasicCircuits, + ); + let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); + let jobs = prover_dal.get_jobs(prover_jobs_params); + let job_ids: Vec = jobs.unwrap().into_iter().map(|job| job.id).collect(); + + let proof = get_sample_proof(); + + // mark all basic circuit proofs as successful. + job_ids.iter().for_each(|&id| { + prover_dal.save_proof(id, Duration::from_secs(0), proof.clone(), "unit-test") + }); + let mut witness_generator_dal = WitnessGeneratorDal { storage }; + + witness_generator_dal.create_aggregation_jobs( + l1_batch_number, + "basic_circuits_1.bin", + "basic_circuits_inputs_1.bin", + circuits.len(), + "scheduler_witness_1.bin", + ); + + // move the leaf aggregation job to be queued + witness_generator_dal.move_leaf_aggregation_jobs_from_waiting_to_queued(); + + // Ensure get-next job gives the leaf aggregation witness job + let job = witness_generator_dal.get_next_leaf_aggregation_witness_job( + Duration::from_secs(0), + 10, + u32::MAX, + ); + assert_eq!(l1_batch_number, job.unwrap().block_number); +} + +#[db_test(dal_crate)] +async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: ConnectionPool) { + let storage = &mut connection_pool.access_test_storage().await; + let block_number = 1; + let header = L1BatchHeader::new( + L1BatchNumber(block_number), + 0, + Default::default(), + Default::default(), + ); + storage + .blocks_dal() + .insert_l1_batch(header, Default::default()); + + let mut prover_dal = ProverDal { storage }; + let circuits = create_circuits(); + let l1_batch_number = L1BatchNumber(block_number); + prover_dal.insert_prover_jobs( + l1_batch_number, + circuits.clone(), + AggregationRound::LeafAggregation, + ); + let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); + let jobs = prover_dal.get_jobs(prover_jobs_params); + let job_ids: Vec = jobs.unwrap().into_iter().map(|job| job.id).collect(); + + let proof = get_sample_proof(); + // mark all leaf aggregation circuit proofs as successful. + job_ids.iter().for_each(|&id| { + prover_dal.save_proof(id, Duration::from_secs(0), proof.clone(), "unit-test") + }); + let mut witness_generator_dal = WitnessGeneratorDal { storage }; + + witness_generator_dal.create_aggregation_jobs( + l1_batch_number, + "basic_circuits_1.bin", + "basic_circuits_inputs_1.bin", + circuits.len(), + "scheduler_witness_1.bin", + ); + witness_generator_dal.save_leaf_aggregation_artifacts( + l1_batch_number, + circuits.len(), + "leaf_layer_subqueues_1.bin", + "aggregation_outputs_1.bin", + ); + + // move the leaf aggregation job to be queued + witness_generator_dal.move_node_aggregation_jobs_from_waiting_to_queued(); + + // Ensure get-next job gives the node aggregation witness job + let job = witness_generator_dal.get_next_node_aggregation_witness_job( + Duration::from_secs(0), + 10, + u32::MAX, + ); + assert_eq!(l1_batch_number, job.unwrap().block_number); +} + +#[db_test(dal_crate)] +async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: ConnectionPool) { + let storage = &mut connection_pool.access_test_storage().await; + let block_number = 1; + let header = L1BatchHeader::new( + L1BatchNumber(block_number), + 0, + Default::default(), + Default::default(), + ); + storage + .blocks_dal() + .insert_l1_batch(header, Default::default()); + + let mut prover_dal = ProverDal { storage }; + let circuits = vec![( + "Node aggregation", + "1_0_Node aggregation_NodeAggregation.bin".to_owned(), + )]; + let l1_batch_number = L1BatchNumber(block_number); + prover_dal.insert_prover_jobs( + l1_batch_number, + circuits.clone(), + AggregationRound::NodeAggregation, + ); + let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); + let jobs = prover_dal.get_jobs(prover_jobs_params); + let job_ids: Vec = jobs.unwrap().into_iter().map(|job| job.id).collect(); + + let proof = get_sample_proof(); + // mark node aggregation circuit proofs as successful. + job_ids.iter().for_each(|&id| { + prover_dal.save_proof(id, Duration::from_secs(0), proof.clone(), "unit-test") + }); + let mut witness_generator_dal = WitnessGeneratorDal { storage }; + + witness_generator_dal.create_aggregation_jobs( + l1_batch_number, + "basic_circuits_1.bin", + "basic_circuits_inputs_1.bin", + circuits.len(), + "scheduler_witness_1.bin", + ); + witness_generator_dal + .save_node_aggregation_artifacts(l1_batch_number, "final_node_aggregations_1.bin"); + + // move the leaf aggregation job to be queued + witness_generator_dal.move_scheduler_jobs_from_waiting_to_queued(); + + // Ensure get-next job gives the scheduler witness job + let job = + witness_generator_dal.get_next_scheduler_witness_job(Duration::from_secs(0), 10, u32::MAX); + assert_eq!(l1_batch_number, job.unwrap().block_number); +} + +fn get_default_prover_jobs_params(l1_batch_number: L1BatchNumber) -> GetProverJobsParams { + GetProverJobsParams { + statuses: None, + blocks: Some(std::ops::Range { + start: l1_batch_number, + end: l1_batch_number + 1, + }), + limit: None, + desc: false, + round: None, + } +} + +fn get_sample_proof() -> Vec { + let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); + fs::read(format!("{}/etc/prover-test-data/proof.bin", zksync_home)) + .expect("Failed reading test proof file") +} diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 8295709940c8..b9179156bae4 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -2,7 +2,7 @@ use bigdecimal::BigDecimal; use std::collections::HashMap; use std::fmt::{self, Debug}; use std::iter::FromIterator; -use std::time::Duration; +use std::time::{Duration, Instant}; use zksync_types::fee::TransactionExecutionMetrics; use itertools::Itertools; @@ -10,6 +10,7 @@ use sqlx::error; use sqlx::types::chrono::NaiveDateTime; use zksync_types::tx::tx_execution_info::TxExecutionStatus; +use zksync_types::vm_trace::Call; use zksync_types::{get_nonce_key, U256}; use zksync_types::{ l1::L1Tx, l2::L2Tx, tx::TransactionExecutionResult, vm_trace::VmExecutionTrace, Address, @@ -18,7 +19,7 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u32, u256_to_big_decimal}; -use crate::models::storage_transaction::StorageTransaction; +use crate::models::storage_transaction::{CallTrace, StorageTransaction}; use crate::time_utils::pg_interval_from_duration; use crate::StorageProcessor; @@ -102,6 +103,7 @@ impl TransactionsDal<'_, '_> { $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, now(), now() ) + ON CONFLICT (hash) DO NOTHING ", tx_hash, sender, @@ -329,6 +331,7 @@ impl TransactionsDal<'_, '_> { block_base_fee_per_gas: U256, ) { async_std::task::block_on(async { + let mut transaction = self.storage.start_transaction().await; let mut l1_hashes = Vec::with_capacity(transactions.len()); let mut l1_indices_in_block = Vec::with_capacity(transactions.len()); let mut l1_errors = Vec::with_capacity(transactions.len()); @@ -356,6 +359,8 @@ impl TransactionsDal<'_, '_> { let mut l2_gas_per_pubdata_limit = Vec::with_capacity(transactions.len()); let mut l2_refunded_gas = Vec::with_capacity(transactions.len()); + let mut call_traces_tx_hashes = Vec::with_capacity(transactions.len()); + let mut bytea_call_traces = Vec::with_capacity(transactions.len()); transactions .iter() .enumerate() @@ -378,6 +383,16 @@ impl TransactionsDal<'_, '_> { TxExecutionStatus::Failure => Some("Bootloader-based tx failed".to_owned()), }; + if let Some(call_trace) = tx_res.call_trace() { + let started_at = Instant::now(); + bytea_call_traces.push(bincode::serialize(&call_trace).unwrap()); + call_traces_tx_hashes.push(hash.0.to_vec()); + metrics::histogram!( + "dal.transactions.serialize_tracer", + started_at.elapsed() + ); + } + match &transaction.common_data { ExecuteTransactionCommon::L1(_) => { l1_hashes.push(hash.0.to_vec()); @@ -504,7 +519,7 @@ impl TransactionsDal<'_, '_> { &l2_paymaster_input, miniblock_number.0 as i32, ) - .execute(self.storage.conn()) + .execute(transaction.conn()) .await .unwrap(); } @@ -538,12 +553,31 @@ impl TransactionsDal<'_, '_> { &l1_indices_in_block, &l1_errors, &l1_execution_infos, - &l1_refunded_gas + &l1_refunded_gas, ) - .execute(self.storage.conn()) + .execute(transaction.conn()) + .await + .unwrap(); + } + + if !bytea_call_traces.is_empty() { + let started_at = Instant::now(); + sqlx::query!( + r#" + INSERT INTO call_traces (tx_hash, call_trace) + SELECT u.tx_hash, u.call_trace + FROM UNNEST($1::bytea[], $2::bytea[]) + AS u(tx_hash, call_trace) + "#, + &call_traces_tx_hashes, + &bytea_call_traces + ) + .execute(transaction.conn()) .await .unwrap(); + metrics::histogram!("dal.transactions.insert_call_tracer", started_at.elapsed()); } + transaction.commit().await; }) } @@ -567,12 +601,25 @@ impl TransactionsDal<'_, '_> { pub fn reset_transactions_state(&mut self, miniblock_number: MiniblockNumber) { async_std::task::block_on(async { - sqlx::query!( + let tx_hashes = sqlx::query!( "UPDATE transactions SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}' - WHERE miniblock_number > $1", + WHERE miniblock_number > $1 + RETURNING hash + ", miniblock_number.0 as i64 ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + sqlx::query!( + "DELETE FROM call_traces + WHERE tx_hash = ANY($1)", + &tx_hashes + .iter() + .map(|tx| tx.hash.clone()) + .collect::>>() + ) .execute(self.storage.conn()) .await .unwrap(); @@ -826,4 +873,21 @@ impl TransactionsDal<'_, '_> { .collect() }) } + + pub fn get_call_trace(&mut self, tx_hash: H256) -> Option { + async_std::task::block_on(async { + sqlx::query_as!( + CallTrace, + r#" + SELECT * FROM call_traces + WHERE tx_hash = $1 + "#, + tx_hash.as_bytes() + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|trace| trace.into()) + }) + } } diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index d8a22bbf9c41..f2222e292b44 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -243,6 +243,7 @@ impl TransactionsWeb3Dal<'_, '_> { StorageTransactionDetails, r#" SELECT transactions.*, + miniblocks.timestamp as "miniblock_timestamp?", miniblocks.hash as "block_hash?", commit_tx.tx_hash as "eth_commit_tx_hash?", prove_tx.tx_hash as "eth_prove_tx_hash?", diff --git a/core/lib/dal/src/witness_generator_dal.rs b/core/lib/dal/src/witness_generator_dal.rs index 61b4cecce7a6..51e059baa608 100644 --- a/core/lib/dal/src/witness_generator_dal.rs +++ b/core/lib/dal/src/witness_generator_dal.rs @@ -5,12 +5,6 @@ use std::time::{Duration, Instant}; use itertools::Itertools; use sqlx::Row; -use crate::models::storage_witness_job_info::StorageWitnessJobInfo; -use zksync_object_store::gcs_utils::merkle_tree_paths_blob_url; -use zksync_object_store::gcs_utils::{ - aggregation_outputs_blob_url, basic_circuits_blob_url, basic_circuits_inputs_blob_url, - final_node_aggregations_blob_url, leaf_layer_subqueues_blob_url, scheduler_witness_blob_url, -}; use zksync_types::proofs::{ AggregationRound, JobCountStatistics, WitnessGeneratorJobMetadata, WitnessJobInfo, }; @@ -21,6 +15,7 @@ use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::proof::P use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; use zksync_types::L1BatchNumber; +use crate::models::storage_witness_job_info::StorageWitnessJobInfo; use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; use crate::StorageProcessor; @@ -168,7 +163,7 @@ impl WitnessGeneratorDal<'_, '_> { WitnessGeneratorJobMetadata { block_number: l1_batch_number, - proofs: basic_circuits_proofs + proofs: basic_circuits_proofs, } }) }) @@ -229,7 +224,7 @@ impl WitnessGeneratorDal<'_, '_> { ); WitnessGeneratorJobMetadata { block_number: l1_batch_number, - proofs: leaf_circuits_proofs + proofs: leaf_circuits_proofs, } }) }) @@ -288,7 +283,7 @@ impl WitnessGeneratorDal<'_, '_> { WitnessGeneratorJobMetadata { block_number: l1_batch_number, - proofs: leaf_circuits_proofs + proofs: leaf_circuits_proofs, } }) }) @@ -459,7 +454,10 @@ impl WitnessGeneratorDal<'_, '_> { pub fn create_aggregation_jobs( &mut self, block_number: L1BatchNumber, + basic_circuits_blob_url: &str, + basic_circuits_inputs_blob_url: &str, number_of_basic_circuits: usize, + scheduler_witness_blob_url: &str, ) { async_std::task::block_on(async { let started_at = Instant::now(); @@ -473,8 +471,8 @@ impl WitnessGeneratorDal<'_, '_> { block_number.0 as i64, vec![], vec![], - basic_circuits_blob_url(block_number), - basic_circuits_inputs_blob_url(block_number), + basic_circuits_blob_url, + basic_circuits_inputs_blob_url, number_of_basic_circuits as i64, ) .execute(self.storage.conn()) @@ -501,7 +499,7 @@ impl WitnessGeneratorDal<'_, '_> { ", block_number.0 as i64, vec![], - scheduler_witness_blob_url(block_number), + scheduler_witness_blob_url, ) .execute(self.storage.conn()) .await @@ -520,6 +518,8 @@ impl WitnessGeneratorDal<'_, '_> { &mut self, block_number: L1BatchNumber, number_of_leaf_circuits: usize, + leaf_layer_subqueues_blob_url: &str, + aggregation_outputs_blob_url: &str, ) { async_std::task::block_on(async { let started_at = Instant::now(); @@ -535,23 +535,30 @@ impl WitnessGeneratorDal<'_, '_> { ", number_of_leaf_circuits as i64, block_number.0 as i64, - leaf_layer_subqueues_blob_url(block_number), - aggregation_outputs_blob_url(block_number), + leaf_layer_subqueues_blob_url, + aggregation_outputs_blob_url, ) .execute(self.storage.conn()) .await .unwrap(); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_leaf_aggregation_artifacts"); + metrics::histogram!( + "dal.request", + started_at.elapsed(), + "method" => "save_leaf_aggregation_artifacts" + ); }) } - /// Saves artifacts in scheduler_artifacts_jobs` - /// and advances it to `waiting_for_proofs` status - /// it will be advanced to `queued` by the prover when all the dependency proofs are computed. - /// If the scheduler witness job was already `queued` in case of connrecunt run of same node aggregation job - /// we keep the status as is to prevent data race. - pub fn save_node_aggregation_artifacts(&mut self, block_number: L1BatchNumber) { + /// Saves artifacts in `scheduler_artifacts_jobs` and advances it to `waiting_for_proofs` status. + /// It will be advanced to `queued` by the prover when all the dependency proofs are computed. + /// If the scheduler witness job was already queued the in case of concurrent run + /// of same node aggregation job, we keep the status as is to prevent data race. + pub fn save_node_aggregation_artifacts( + &mut self, + block_number: L1BatchNumber, + node_aggregations_blob_url: &str, + ) { async_std::task::block_on(async { let started_at = Instant::now(); sqlx::query!( @@ -563,13 +570,17 @@ impl WitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = $1 AND status != 'queued' ", block_number.0 as i64, - final_node_aggregations_blob_url(block_number), + node_aggregations_blob_url, ) .execute(self.storage.conn()) .await .unwrap(); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_node_aggregation_artifacts"); + metrics::histogram!( + "dal.request", + started_at.elapsed(), + "method" => "save_node_aggregation_artifacts", + ); }) } @@ -629,39 +640,6 @@ impl WitnessGeneratorDal<'_, '_> { }) } - pub fn required_proofs_count( - &mut self, - block_number: L1BatchNumber, - aggregation_round: AggregationRound, - ) -> usize { - async_std::task::block_on(async { - let table_name = Self::input_table_name_for(aggregation_round); - let circuits_number_input_name = match aggregation_round { - // Basic circuit job doesn't have any pre-requirements - AggregationRound::BasicCircuits => unreachable!(), - AggregationRound::LeafAggregation => "number_of_basic_circuits", - AggregationRound::NodeAggregation => "number_of_leaf_circuits", - // There is always just one final node circuit - AggregationRound::Scheduler => return 1, - }; - let sql = format!( - r#" - SELECT {} as "count" - FROM {} - WHERE l1_batch_number = $1 - "#, - circuits_number_input_name, table_name - ); - let mut query = sqlx::query(&sql); - query = query.bind(block_number.0 as i64); - query - .fetch_one(self.storage.conn()) - .await - .unwrap() - .get::("count") as usize - }) - } - fn input_table_name_for(aggregation_round: AggregationRound) -> &'static str { match aggregation_round { AggregationRound::BasicCircuits => "witness_inputs", @@ -736,7 +714,7 @@ impl WitnessGeneratorDal<'_, '_> { .collect()) } - pub fn save_witness_inputs(&mut self, block_number: L1BatchNumber) { + pub fn save_witness_inputs(&mut self, block_number: L1BatchNumber, object_key: &str) { async_std::task::block_on(async { sqlx::query!( "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, created_at, updated_at) \ @@ -744,7 +722,7 @@ impl WitnessGeneratorDal<'_, '_> { ON CONFLICT (l1_batch_number) DO NOTHING", block_number.0 as i64, vec![], - merkle_tree_paths_blob_url(block_number), + object_key, ) .fetch_optional(self.storage.conn()) .await @@ -901,6 +879,89 @@ impl WitnessGeneratorDal<'_, '_> { .unwrap(); }) } + + pub fn move_leaf_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec { + async_std::task::block_on(async { + sqlx::query!( + r#" + UPDATE leaf_aggregation_witness_jobs + SET status='queued' + WHERE l1_batch_number IN + (SELECT prover_jobs.l1_batch_number + FROM prover_jobs + JOIN leaf_aggregation_witness_jobs lawj ON prover_jobs.l1_batch_number = lawj.l1_batch_number + WHERE lawj.status = 'waiting_for_proofs' + AND prover_jobs.status = 'successful' + AND prover_jobs.aggregation_round = 0 + GROUP BY prover_jobs.l1_batch_number, lawj.number_of_basic_circuits + HAVING COUNT(*) = lawj.number_of_basic_circuits) + RETURNING l1_batch_number; + "#, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| row.l1_batch_number) + .collect() + }) + } + + pub fn move_node_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec { + async_std::task::block_on(async { + sqlx::query!( + r#" + UPDATE node_aggregation_witness_jobs + SET status='queued' + WHERE l1_batch_number IN + (SELECT prover_jobs.l1_batch_number + FROM prover_jobs + JOIN node_aggregation_witness_jobs nawj ON prover_jobs.l1_batch_number = nawj.l1_batch_number + WHERE nawj.status = 'waiting_for_proofs' + AND prover_jobs.status = 'successful' + AND prover_jobs.aggregation_round = 1 + GROUP BY prover_jobs.l1_batch_number, nawj.number_of_leaf_circuits + HAVING COUNT(*) = nawj.number_of_leaf_circuits) + RETURNING l1_batch_number; + "#, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| row.l1_batch_number) + .collect() + }) + } + + pub fn move_scheduler_jobs_from_waiting_to_queued(&mut self) -> Vec { + async_std::task::block_on(async { + // There is always just one final node circuit + // hence we do AND p.number_of_jobs = 1 + sqlx::query!( + r#" + UPDATE scheduler_witness_jobs + SET status='queued' + WHERE l1_batch_number IN + (SELECT prover_jobs.l1_batch_number + FROM prover_jobs + JOIN scheduler_witness_jobs swj ON prover_jobs.l1_batch_number = swj.l1_batch_number + WHERE swj.status = 'waiting_for_proofs' + AND prover_jobs.status = 'successful' + AND prover_jobs.aggregation_round = 2 + GROUP BY prover_jobs.l1_batch_number + HAVING COUNT(*) = 1) + RETURNING l1_batch_number; + "#, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| row.l1_batch_number) + .collect() + }) + } } pub struct GetWitnessJobsParams { diff --git a/core/lib/db_storage_provider/Cargo.toml b/core/lib/db_storage_provider/Cargo.toml new file mode 100644 index 000000000000..85f96b64334c --- /dev/null +++ b/core/lib/db_storage_provider/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "zksync_db_storage_provider" +version = "1.0.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +[dependencies] +zksync_types = { path = "../types", version = "1.0" } +zksync_dal = { path = "../dal", version = "1.0" } diff --git a/core/bin/zksync_core/src/db_storage_provider.rs b/core/lib/db_storage_provider/src/lib.rs similarity index 79% rename from core/bin/zksync_core/src/db_storage_provider.rs rename to core/lib/db_storage_provider/src/lib.rs index 8fe7eeb897d8..605a4f40f723 100644 --- a/core/bin/zksync_core/src/db_storage_provider.rs +++ b/core/lib/db_storage_provider/src/lib.rs @@ -1,5 +1,5 @@ use zksync_dal::StorageProcessor; -use zksync_types::{Address, MiniblockNumber, StorageKey, StorageValue, ZkSyncReadStorage, H256}; +use zksync_types::{MiniblockNumber, StorageKey, StorageValue, ZkSyncReadStorage, H256}; #[derive(Debug)] pub struct DbStorageProvider<'a> { @@ -37,13 +37,6 @@ impl<'a> ZkSyncReadStorage for DbStorageProvider<'a> { .unwrap() } - fn load_contract(&mut self, address: Address) -> Option> { - self.connection - .storage_web3_dal() - .get_contract_code_unchecked(address, self.block_number) - .unwrap() - } - fn load_factory_dep(&mut self, hash: H256) -> Option> { self.connection .storage_web3_dal() diff --git a/core/lib/db_test_macro/src/lib.rs b/core/lib/db_test_macro/src/lib.rs index 32f9dd64aaec..3aefaecf9497 100644 --- a/core/lib/db_test_macro/src/lib.rs +++ b/core/lib/db_test_macro/src/lib.rs @@ -3,7 +3,7 @@ use quote::quote; use syn::{ parse::{Parse, ParseStream}, punctuated::Punctuated, - Ident, Token, + FnArg, Ident, Token, }; /// Argument that can be supplied to the `db_test` macro to be used in the `zksync_dal` crate. @@ -25,19 +25,6 @@ impl Parse for Args { } } -fn parse_connection_pool_arg_name(arg: Option<&syn::FnArg>) -> Result { - if let Some(syn::FnArg::Typed(arg)) = arg { - if let syn::Pat::Ident(ident) = arg.pat.as_ref() { - if let syn::Type::Path(path_type) = arg.ty.as_ref() { - if path_type.path.is_ident(TYPE_NAME) { - return Ok(ident.clone()); - } - } - } - } - Err(()) -} - fn parse_knobs(mut input: syn::ItemFn, inside_dal_crate: bool) -> Result { let sig = &mut input.sig; let body = &input.block; @@ -51,27 +38,24 @@ fn parse_knobs(mut input: syn::ItemFn, inside_dal_crate: bool) -> Result Result>, +} + +impl From for QueryClient { + fn from(transport: Http) -> Self { + Self { + web3: Arc::new(Web3::new(transport)), + } + } +} + +impl QueryClient { + /// Creates a new HTTP client. + pub fn new(node_url: &str) -> Result { + let transport = web3::transports::Http::new(node_url)?; + Ok(transport.into()) + } +} + +#[async_trait] +impl EthInterface for QueryClient { + async fn nonce_at_for_account( + &self, + account: Address, + block: BlockNumber, + component: &'static str, + ) -> Result { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "nonce_at_for_account"); + let start = Instant::now(); + let nonce = self + .web3 + .eth() + .transaction_count(account, Some(block)) + .await?; + metrics::histogram!("eth_client.direct.current_nonce", start.elapsed()); + Ok(nonce) + } + + async fn block_number(&self, component: &'static str) -> Result { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "block_number"); + let start = Instant::now(); + let block_number = self.web3.eth().block_number().await?; + metrics::histogram!("eth_client.direct.block_number", start.elapsed()); + Ok(block_number) + } + + async fn get_gas_price(&self, component: &'static str) -> Result { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "get_gas_price"); + let start = Instant::now(); + let network_gas_price = self.web3.eth().gas_price().await?; + metrics::histogram!("eth_client.direct.get_gas_price", start.elapsed()); + Ok(network_gas_price) + } + + async fn send_raw_tx(&self, tx: Vec) -> Result { + let start = Instant::now(); + let tx = self.web3.eth().send_raw_transaction(Bytes(tx)).await?; + metrics::histogram!("eth_client.direct.send_raw_tx", start.elapsed()); + Ok(tx) + } + + async fn base_fee_history( + &self, + upto_block: usize, + block_count: usize, + component: &'static str, + ) -> Result, Error> { + const MAX_REQUEST_CHUNK: usize = 1024; + + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "base_fee_history"); + let start = Instant::now(); + + let mut history = Vec::with_capacity(block_count); + let from_block = upto_block.saturating_sub(block_count); + + // Here we are requesting fee_history from blocks + // (from_block; upto_block] in chunks of size MAX_REQUEST_CHUNK + // starting from the oldest block. + for chunk_start in (from_block..=upto_block).step_by(MAX_REQUEST_CHUNK) { + let chunk_end = (chunk_start + MAX_REQUEST_CHUNK).min(upto_block); + let chunk_size = chunk_end - chunk_start; + let chunk = self + .web3 + .eth() + .fee_history(chunk_size.into(), chunk_end.into(), None) + .await? + .base_fee_per_gas; + + history.extend(chunk); + } + + metrics::histogram!("eth_client.direct.base_fee", start.elapsed()); + Ok(history.into_iter().map(|fee| fee.as_u64()).collect()) + } + + async fn get_pending_block_base_fee_per_gas( + &self, + component: &'static str, + ) -> Result { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "get_pending_block_base_fee_per_gas"); + let start = Instant::now(); + let block = self + .web3 + .eth() + .block(BlockId::Number(BlockNumber::Pending)) + .await? + .expect("Pending block should always exist"); + + metrics::histogram!("eth_client.direct.base_fee", start.elapsed()); + // base_fee_per_gas always exists after London fork + Ok(block.base_fee_per_gas.unwrap()) + } + + async fn get_tx_status( + &self, + hash: H256, + component: &'static str, + ) -> Result, Error> { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "get_tx_status"); + let start = Instant::now(); + + let receipt = self.tx_receipt(hash, component).await?; + let res = receipt.and_then(|receipt| match receipt.status { + Some(status) if receipt.block_number.is_some() => { + let success = status.as_u64() == 1; + + Some(ExecutedTxStatus { + tx_hash: receipt.transaction_hash, + success, + receipt, + }) + } + _ => None, + }); + + metrics::histogram!("eth_client.direct.get_tx_status", start.elapsed()); + Ok(res) + } + + async fn failure_reason(&self, tx_hash: H256) -> Result, Error> { + let start = Instant::now(); + let transaction = self.web3.eth().transaction(tx_hash.into()).await?; + let receipt = self.web3.eth().transaction_receipt(tx_hash).await?; + + match (transaction, receipt) { + (Some(transaction), Some(receipt)) => { + let gas_limit = transaction.gas; + let gas_used = receipt.gas_used; + + let call_request = web3::types::CallRequest { + from: transaction.from, + to: transaction.to, + gas: Some(transaction.gas), + gas_price: transaction.gas_price, + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + value: Some(transaction.value), + data: Some(transaction.input), + transaction_type: None, + access_list: None, + }; + + let call_error = self + .web3 + .eth() + .call(call_request, receipt.block_number.map(Into::into)) + .await + .err(); + + let failure_info = match call_error { + Some(web3::Error::Rpc(rpc_error)) => { + let revert_code = rpc_error.code.code(); + let message_len = + min("execution reverted: ".len(), rpc_error.message.len()); + let revert_reason = rpc_error.message[message_len..].to_string(); + + Ok(Some(FailureInfo { + revert_code, + revert_reason, + gas_used, + gas_limit, + })) + } + Some(err) => Err(err.into()), + None => Ok(None), + }; + + metrics::histogram!("eth_client.direct.failure_reason", start.elapsed()); + + failure_info + } + _ => Ok(None), + } + } + + async fn get_tx( + &self, + hash: H256, + component: &'static str, + ) -> Result, Error> { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "get_tx"); + let tx = self + .web3 + .eth() + .transaction(TransactionId::Hash(hash)) + .await?; + Ok(tx) + } + + #[allow(clippy::too_many_arguments)] + async fn call_contract_function( + &self, + func: &str, + params: P, + from: A, + options: Options, + block: B, + contract_address: Address, + contract_abi: ethabi::Contract, + ) -> Result + where + R: Detokenize + Unpin, + A: Into> + Send, + B: Into> + Send, + P: Tokenize + Send, + { + let start = Instant::now(); + let contract = Contract::new(self.web3.eth(), contract_address, contract_abi); + let res = contract.query(func, params, from, options, block).await?; + metrics::histogram!("eth_client.direct.call_contract_function", start.elapsed()); + Ok(res) + } + + async fn tx_receipt( + &self, + tx_hash: H256, + component: &'static str, + ) -> Result, Error> { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "tx_receipt"); + let start = Instant::now(); + let receipt = self.web3.eth().transaction_receipt(tx_hash).await?; + metrics::histogram!("eth_client.direct.tx_receipt", start.elapsed()); + Ok(receipt) + } + + async fn eth_balance(&self, address: Address, component: &'static str) -> Result { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "eth_balance"); + let start = Instant::now(); + let balance = self.web3.eth().balance(address, None).await?; + metrics::histogram!("eth_client.direct.eth_balance", start.elapsed()); + Ok(balance) + } + + async fn logs(&self, filter: Filter, component: &'static str) -> Result, Error> { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "logs"); + let start = Instant::now(); + let logs = self.web3.eth().logs(filter).await?; + metrics::histogram!("eth_client.direct.logs", start.elapsed()); + Ok(logs) + } +} diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs new file mode 100644 index 000000000000..a16f165b8c2c --- /dev/null +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -0,0 +1,353 @@ +use std::sync::Arc; +use std::{fmt, time::Instant}; + +use async_trait::async_trait; + +use zksync_config::ZkSyncConfig; +use zksync_contracts::zksync_contract; +use zksync_eth_signer::PrivateKeySigner; +use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; +use zksync_types::web3::{ + self, + contract::{ + tokens::{Detokenize, Tokenize}, + Options, + }, + ethabi, + transports::Http, + types::{ + Address, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, H160, H256, + U256, U64, + }, +}; +use zksync_types::{L1ChainId, PackedEthSignature, EIP_1559_TX_TYPE}; + +// Loal uses +use super::query::QueryClient; +use crate::{ + types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}, + BoundEthInterface, EthInterface, +}; + +/// HTTP-based Ethereum client, backed by a private key to sign transactions. +pub type PKSigningClient = SigningClient; + +impl PKSigningClient { + pub fn from_config(config: &ZkSyncConfig) -> Self { + // Gather required data from the config. + // It's done explicitly to simplify getting rid of this function later. + let main_node_url = &config.eth_client.web3_url; + let operator_private_key = config.eth_sender.sender.operator_private_key; + let operator_commit_eth_addr = config.eth_sender.sender.operator_commit_eth_addr; + let diamond_proxy_addr = config.contracts.diamond_proxy_addr; + let default_priority_fee_per_gas = + config.eth_sender.gas_adjuster.default_priority_fee_per_gas; + let l1_chain_id = config.eth_client.chain_id; + + let transport = + web3::transports::Http::new(main_node_url).expect("Failed to create transport"); + let operator_address = PackedEthSignature::address_from_private_key(&operator_private_key) + .expect("Failed to get address from private key"); + + vlog::info!("Operator address: {:?}", operator_address); + + SigningClient::new( + transport, + zksync_contract(), + operator_commit_eth_addr, + PrivateKeySigner::new(operator_private_key), + diamond_proxy_addr, + default_priority_fee_per_gas.into(), + L1ChainId(l1_chain_id), + ) + } +} + +/// Gas limit value to be used in transaction if for some reason +/// gas limit was not set for it. +/// +/// This is an emergency value, which will not be used normally. +const FALLBACK_GAS_LIMIT: u64 = 3_000_000; + +/// HTTP-based client, instantiated for a certain account. +/// This client is capable of signing transactions. +#[derive(Clone)] +pub struct SigningClient { + inner: Arc>, + query_client: QueryClient, +} + +struct ETHDirectClientInner { + eth_signer: S, + sender_account: Address, + contract_addr: H160, + contract: ethabi::Contract, + chain_id: L1ChainId, + default_priority_fee_per_gas: U256, +} + +impl fmt::Debug for SigningClient { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // We do not want to have a private key in the debug representation. + + f.debug_struct("ETHDirectClient") + .field("sender_account", &self.inner.sender_account) + .field("contract_addr", &self.inner.contract_addr) + .field("chain_id", &self.inner.chain_id) + .finish() + } +} + +#[async_trait] +impl EthInterface for SigningClient { + async fn nonce_at_for_account( + &self, + account: Address, + block: BlockNumber, + component: &'static str, + ) -> Result { + self.query_client + .nonce_at_for_account(account, block, component) + .await + } + + async fn block_number(&self, component: &'static str) -> Result { + self.query_client.block_number(component).await + } + + async fn get_gas_price(&self, component: &'static str) -> Result { + self.query_client.get_gas_price(component).await + } + + async fn send_raw_tx(&self, tx: Vec) -> Result { + self.query_client.send_raw_tx(tx).await + } + + async fn base_fee_history( + &self, + upto_block: usize, + block_count: usize, + component: &'static str, + ) -> Result, Error> { + self.query_client + .base_fee_history(upto_block, block_count, component) + .await + } + + async fn get_pending_block_base_fee_per_gas( + &self, + component: &'static str, + ) -> Result { + self.query_client + .get_pending_block_base_fee_per_gas(component) + .await + } + + async fn get_tx_status( + &self, + hash: H256, + component: &'static str, + ) -> Result, Error> { + self.query_client.get_tx_status(hash, component).await + } + + async fn failure_reason(&self, tx_hash: H256) -> Result, Error> { + self.query_client.failure_reason(tx_hash).await + } + + async fn get_tx( + &self, + hash: H256, + component: &'static str, + ) -> Result, Error> { + self.query_client.get_tx(hash, component).await + } + + #[allow(clippy::too_many_arguments)] + async fn call_contract_function( + &self, + func: &str, + params: P, + from: A, + options: Options, + block: B, + contract_address: Address, + contract_abi: ethabi::Contract, + ) -> Result + where + R: Detokenize + Unpin, + A: Into> + Send, + B: Into> + Send, + P: Tokenize + Send, + { + self.query_client + .call_contract_function( + func, + params, + from, + options, + block, + contract_address, + contract_abi, + ) + .await + } + + async fn tx_receipt( + &self, + tx_hash: H256, + component: &'static str, + ) -> Result, Error> { + self.query_client.tx_receipt(tx_hash, component).await + } + + async fn eth_balance(&self, address: Address, component: &'static str) -> Result { + self.query_client.eth_balance(address, component).await + } + + async fn logs(&self, filter: Filter, component: &'static str) -> Result, Error> { + self.query_client.logs(filter, component).await + } +} + +#[async_trait] +impl BoundEthInterface for SigningClient { + fn contract(&self) -> ðabi::Contract { + &self.inner.contract + } + + fn contract_addr(&self) -> H160 { + self.inner.contract_addr + } + + fn chain_id(&self) -> L1ChainId { + self.inner.chain_id + } + + fn sender_account(&self) -> Address { + self.inner.sender_account + } + + async fn sign_prepared_tx_for_addr( + &self, + data: Vec, + contract_addr: H160, + options: Options, + component: &'static str, + ) -> Result { + let start = Instant::now(); + + // fetch current max priority fee per gas + let max_priority_fee_per_gas = match options.max_priority_fee_per_gas { + Some(max_priority_fee_per_gas) => max_priority_fee_per_gas, + None => self.inner.default_priority_fee_per_gas, + }; + + // fetch current base fee and add max_priority_fee_per_gas + let max_fee_per_gas = match options.max_fee_per_gas { + Some(max_fee_per_gas) => max_fee_per_gas, + None => { + self.get_pending_block_base_fee_per_gas(component).await? + max_priority_fee_per_gas + } + }; + + if max_fee_per_gas < max_priority_fee_per_gas { + return Err(Error::WrongFeeProvided( + max_fee_per_gas, + max_priority_fee_per_gas, + )); + } + + let nonce = match options.nonce { + Some(nonce) => nonce, + None => self.pending_nonce(component).await?, + }; + + let gas = options.gas.unwrap_or_else(|| { + // Verbosity level is set to `error`, since we expect all the transactions to have + // a set limit, but don't want to crаsh the application if for some reason in some + // place limit was not set. + vlog::error!( + "No gas limit was set for transaction, using the default limit: {}", + FALLBACK_GAS_LIMIT + ); + + U256::from(FALLBACK_GAS_LIMIT) + }); + + let tx = TransactionParameters { + nonce, + to: Some(contract_addr), + gas, + value: options.value.unwrap_or_default(), + data, + chain_id: self.inner.chain_id.0, + max_priority_fee_per_gas, + gas_price: None, + transaction_type: Some(EIP_1559_TX_TYPE.into()), + access_list: None, + max_fee_per_gas, + }; + + let signed_tx = self.inner.eth_signer.sign_transaction(tx).await?; + let hash = zksync_types::web3::signing::keccak256(&signed_tx).into(); + + metrics::histogram!( + "eth_client.direct.sign_prepared_tx_for_addr", + start.elapsed() + ); + Ok(SignedCallResult { + raw_tx: signed_tx, + max_priority_fee_per_gas, + max_fee_per_gas, + nonce, + hash, + }) + } + + async fn allowance_on_account( + &self, + token_address: Address, + address: Address, + erc20_abi: ethabi::Contract, + ) -> Result { + let start = Instant::now(); + let res = self + .call_contract_function( + "allowance", + (self.inner.sender_account, address), + None, + Options::default(), + None, + token_address, + erc20_abi, + ) + .await?; + metrics::histogram!("eth_client.direct.allowance", start.elapsed()); + Ok(res) + } +} + +impl SigningClient { + pub fn new( + transport: Http, + contract: ethabi::Contract, + operator_eth_addr: H160, + eth_signer: S, + contract_eth_addr: H160, + default_priority_fee_per_gas: U256, + chain_id: L1ChainId, + ) -> Self { + Self { + inner: Arc::new(ETHDirectClientInner { + sender_account: operator_eth_addr, + eth_signer, + contract_addr: contract_eth_addr, + chain_id, + contract, + default_priority_fee_per_gas, + }), + query_client: transport.into(), + } + } +} diff --git a/core/lib/eth_client/src/clients/http_client.rs b/core/lib/eth_client/src/clients/http_client.rs deleted file mode 100644 index 4387c9317e53..000000000000 --- a/core/lib/eth_client/src/clients/http_client.rs +++ /dev/null @@ -1,649 +0,0 @@ -// Built-in deps -use std::cmp::min; -use std::sync::Arc; -use std::{fmt, time::Instant}; - -use async_trait::async_trait; -use zksync_config::ZkSyncConfig; -use zksync_contracts::zksync_contract; -use zksync_eth_signer::PrivateKeySigner; -// External uses -use zksync_types::web3::{ - self, - contract::{ - tokens::{Detokenize, Tokenize}, - Contract, Options, - }, - ethabi, - transports::Http, - types::{ - Address, BlockId, BlockNumber, Bytes, Filter, Log, Transaction, TransactionId, - TransactionReceipt, H160, H256, U256, U64, - }, - Web3, -}; -use zksync_types::{L1ChainId, PackedEthSignature, EIP_1559_TX_TYPE}; - -// Workspace uses -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; - -pub type EthereumClient = ETHDirectClient; - -/// Gas limit value to be used in transaction if for some reason -/// gas limit was not set for it. -/// -/// This is an emergency value, which will not be used normally. -const FALLBACK_GAS_LIMIT: u64 = 3_000_000; - -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("Request to ethereum gateway failed: {0}")] - EthereumGateway(#[from] zksync_types::web3::Error), - #[error("Call to contract failed: {0}")] - Contract(#[from] zksync_types::web3::contract::Error), - #[error("Transaction signing failed: {0}")] - Signer(#[from] zksync_eth_signer::error::SignerError), - #[error("Decoding revert reason failed: {0}")] - Decode(#[from] ethabi::Error), - #[error("Max fee {0} less than priority fee {1}")] - WrongFeeProvided(U256, U256), -} - -#[derive(Debug, Clone, PartialEq)] -pub struct SignedCallResult { - pub raw_tx: Vec, - pub max_priority_fee_per_gas: U256, - pub max_fee_per_gas: U256, - pub nonce: U256, - pub hash: H256, -} - -/// State of the executed Ethereum transaction. -#[derive(Debug, Clone)] -pub struct ExecutedTxStatus { - /// The hash of the executed L1 transaction. - pub tx_hash: H256, - /// Whether transaction was executed successfully or failed. - pub success: bool, - /// Receipt for a transaction. - pub receipt: TransactionReceipt, -} - -/// Information about transaction failure. -#[derive(Debug, Clone)] -pub struct FailureInfo { - pub revert_code: i64, - pub revert_reason: String, - pub gas_used: Option, - pub gas_limit: U256, -} - -#[async_trait] -pub trait EthInterface { - async fn nonce_at(&self, block: BlockNumber, component: &'static str) -> Result; - async fn current_nonce(&self, component: &'static str) -> Result { - self.nonce_at(BlockNumber::Latest, component).await - } - async fn pending_nonce(&self, component: &'static str) -> Result { - self.nonce_at(BlockNumber::Pending, component).await - } - async fn base_fee_history( - &self, - from_block: usize, - block_count: usize, - component: &'static str, - ) -> Result, Error>; - async fn get_gas_price(&self, component: &'static str) -> Result; - async fn block_number(&self, component: &'static str) -> Result; - async fn send_raw_tx(&self, tx: Vec) -> Result; - async fn sign_prepared_tx_for_addr( - &self, - data: Vec, - contract_addr: H160, - options: Options, - component: &'static str, - ) -> Result; - async fn get_tx_status( - &self, - hash: H256, - component: &'static str, - ) -> Result, Error>; - async fn failure_reason(&self, tx_hash: H256) -> Result, Error>; -} - -struct ETHDirectClientInner { - eth_signer: S, - sender_account: Address, - contract_addr: H160, - contract: ethabi::Contract, - chain_id: L1ChainId, - default_priority_fee_per_gas: U256, - web3: Web3, -} - -#[derive(Clone)] -pub struct ETHDirectClient { - inner: Arc>, -} - -impl fmt::Debug for ETHDirectClient { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // We do not want to have a private key in the debug representation. - - f.debug_struct("ETHDirectClient") - .field("sender_account", &self.inner.sender_account) - .field("contract_addr", &self.inner.contract_addr) - .field("chain_id", &self.inner.chain_id) - .finish() - } -} - -#[async_trait] -impl EthInterface for ETHDirectClient { - async fn nonce_at(&self, block: BlockNumber, component: &'static str) -> Result { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "nonce_at"); - let start = Instant::now(); - let nonce = self - .inner - .web3 - .eth() - .transaction_count(self.inner.sender_account, Some(block)) - .await?; - metrics::histogram!("eth_client.direct.current_nonce", start.elapsed()); - Ok(nonce) - } - - async fn block_number(&self, component: &'static str) -> Result { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "block_number"); - let start = Instant::now(); - let block_number = self.inner.web3.eth().block_number().await?; - metrics::histogram!("eth_client.direct.block_number", start.elapsed()); - Ok(block_number) - } - - async fn get_gas_price(&self, component: &'static str) -> Result { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "get_gas_price"); - let start = Instant::now(); - let network_gas_price = self.inner.web3.eth().gas_price().await?; - metrics::histogram!("eth_client.direct.get_gas_price", start.elapsed()); - Ok(network_gas_price) - } - - async fn sign_prepared_tx_for_addr( - &self, - data: Vec, - contract_addr: H160, - options: Options, - component: &'static str, - ) -> Result { - let start = Instant::now(); - - // fetch current max priority fee per gas - let max_priority_fee_per_gas = match options.max_priority_fee_per_gas { - Some(max_priority_fee_per_gas) => max_priority_fee_per_gas, - None => self.inner.default_priority_fee_per_gas, - }; - - // fetch current base fee and add max_priority_fee_per_gas - let max_fee_per_gas = match options.max_fee_per_gas { - Some(max_fee_per_gas) => max_fee_per_gas, - None => { - self.get_pending_block_base_fee_per_gas(component).await? + max_priority_fee_per_gas - } - }; - - if max_fee_per_gas < max_priority_fee_per_gas { - return Err(Error::WrongFeeProvided( - max_fee_per_gas, - max_priority_fee_per_gas, - )); - } - - let nonce = match options.nonce { - Some(nonce) => nonce, - None => self.pending_nonce(component).await?, - }; - - let gas = match options.gas { - Some(gas) => gas, - None => { - // Verbosity level is set to `error`, since we expect all the transactions to have - // a set limit, but don't want to crаsh the application if for some reason in some - // place limit was not set. - vlog::error!( - "No gas limit was set for transaction, using the default limit: {}", - FALLBACK_GAS_LIMIT - ); - - U256::from(FALLBACK_GAS_LIMIT) - } - }; - - let tx = TransactionParameters { - nonce, - to: Some(contract_addr), - gas, - value: options.value.unwrap_or_default(), - data, - chain_id: self.inner.chain_id.0 as u64, - max_priority_fee_per_gas, - gas_price: None, - transaction_type: Some(EIP_1559_TX_TYPE.into()), - access_list: None, - max_fee_per_gas, - }; - - let signed_tx = self.inner.eth_signer.sign_transaction(tx).await?; - let hash = self - .inner - .web3 - .web3() - .sha3(Bytes(signed_tx.clone())) - .await?; - - metrics::histogram!( - "eth_client.direct.sign_prepared_tx_for_addr", - start.elapsed() - ); - Ok(SignedCallResult { - raw_tx: signed_tx, - max_priority_fee_per_gas, - max_fee_per_gas, - nonce, - hash, - }) - } - - async fn send_raw_tx(&self, tx: Vec) -> Result { - let start = Instant::now(); - let tx = self - .inner - .web3 - .eth() - .send_raw_transaction(Bytes(tx)) - .await?; - metrics::histogram!("eth_client.direct.send_raw_tx", start.elapsed()); - Ok(tx) - } - - async fn base_fee_history( - &self, - upto_block: usize, - block_count: usize, - component: &'static str, - ) -> Result, Error> { - const MAX_REQUEST_CHUNK: usize = 1024; - - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "base_fee_history"); - let start = Instant::now(); - - let mut history = Vec::with_capacity(block_count); - let from_block = upto_block.saturating_sub(block_count); - - // Here we are requesting fee_history from blocks - // (from_block; upto_block] in chunks of size MAX_REQUEST_CHUNK - // starting from the oldest block. - for chunk_start in (from_block..=upto_block).step_by(MAX_REQUEST_CHUNK) { - let chunk_end = (chunk_start + MAX_REQUEST_CHUNK).min(upto_block); - let chunk_size = chunk_end - chunk_start; - let chunk = self - .inner - .web3 - .eth() - .fee_history(chunk_size.into(), chunk_end.into(), None) - .await? - .base_fee_per_gas; - - history.extend(chunk); - } - - metrics::histogram!("eth_client.direct.base_fee", start.elapsed()); - Ok(history.into_iter().map(|fee| fee.as_u64()).collect()) - } - async fn get_tx_status( - &self, - hash: H256, - component: &'static str, - ) -> Result, Error> { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "get_tx_status"); - let start = Instant::now(); - - let receipt = self.tx_receipt(hash, component).await?; - let res = match receipt { - Some(receipt) => match (receipt.status, receipt.block_number) { - (Some(status), Some(_)) => { - let success = status.as_u64() == 1; - - Some(ExecutedTxStatus { - tx_hash: receipt.transaction_hash, - success, - receipt, - }) - } - _ => None, - }, - _ => None, - }; - metrics::histogram!("eth_client.direct.get_tx_status", start.elapsed()); - Ok(res) - } - - async fn failure_reason(&self, tx_hash: H256) -> Result, Error> { - let start = Instant::now(); - let transaction = self.inner.web3.eth().transaction(tx_hash.into()).await?; - let receipt = self.inner.web3.eth().transaction_receipt(tx_hash).await?; - - match (transaction, receipt) { - (Some(transaction), Some(receipt)) => { - let gas_limit = transaction.gas; - let gas_used = receipt.gas_used; - - let call_request = web3::types::CallRequest { - from: transaction.from, - to: transaction.to, - gas: Some(transaction.gas), - gas_price: transaction.gas_price, - max_fee_per_gas: None, - max_priority_fee_per_gas: None, - value: Some(transaction.value), - data: Some(transaction.input), - transaction_type: None, - access_list: None, - }; - - let call_error = self - .inner - .web3 - .eth() - .call(call_request, receipt.block_number.map(Into::into)) - .await - .err(); - - let failure_info = match call_error { - Some(web3::Error::Rpc(rpc_error)) => { - let revert_code = rpc_error.code.code(); - let message_len = - min("execution reverted: ".len(), rpc_error.message.len()); - let revert_reason = rpc_error.message[message_len..].to_string(); - - Ok(Some(FailureInfo { - revert_code, - revert_reason, - gas_used, - gas_limit, - })) - } - Some(err) => Err(err.into()), - None => Ok(None), - }; - - metrics::histogram!("eth_client.direct.failure_reason", start.elapsed()); - - failure_info - } - _ => Ok(None), - } - } -} - -impl ETHDirectClient { - pub fn new( - transport: Http, - contract: ethabi::Contract, - operator_eth_addr: H160, - eth_signer: S, - contract_eth_addr: H160, - default_priority_fee_per_gas: U256, - chain_id: L1ChainId, - ) -> Self { - Self { - inner: Arc::new(ETHDirectClientInner { - sender_account: operator_eth_addr, - eth_signer, - contract_addr: contract_eth_addr, - chain_id, - contract, - default_priority_fee_per_gas, - web3: Web3::new(transport), - }), - } - } - - pub async fn get_pending_block_base_fee_per_gas( - &self, - component: &'static str, - ) -> Result { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "get_pending_block_base_fee_per_gas"); - let start = Instant::now(); - let block = self - .inner - .web3 - .eth() - .block(BlockId::Number(BlockNumber::Pending)) - .await? - .unwrap(); // Pending block should always exist - - metrics::histogram!("eth_client.direct.base_fee", start.elapsed()); - // base_fee_per_gas always exists after London fork - Ok(block.base_fee_per_gas.unwrap()) - } - - pub fn main_contract_with_address(&self, address: Address) -> Contract { - Contract::new(self.inner.web3.eth(), address, self.inner.contract.clone()) - } - - pub fn main_contract(&self) -> Contract { - self.main_contract_with_address(self.inner.contract_addr) - } - - pub fn create_contract(&self, address: Address, contract: ethabi::Contract) -> Contract { - Contract::new(self.inner.web3.eth(), address, contract) - } - - pub async fn block(&self, id: BlockId) -> Result>, Error> { - let start = Instant::now(); - let block = self.inner.web3.eth().block(id).await?; - metrics::histogram!("eth_client.direct.block", start.elapsed()); - Ok(block) - } - - pub async fn sign_prepared_tx( - &self, - data: Vec, - options: Options, - component: &'static str, - ) -> Result { - self.sign_prepared_tx_for_addr(data, self.inner.contract_addr, options, component) - .await - } - - pub async fn tx_receipt( - &self, - tx_hash: H256, - component: &'static str, - ) -> Result, Error> { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "tx_receipt"); - let start = Instant::now(); - let receipt = self.inner.web3.eth().transaction_receipt(tx_hash).await?; - metrics::histogram!("eth_client.direct.tx_receipt", start.elapsed()); - Ok(receipt) - } - - pub async fn eth_balance( - &self, - address: Address, - component: &'static str, - ) -> Result { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "eth_balance"); - let start = Instant::now(); - let balance = self.inner.web3.eth().balance(address, None).await?; - metrics::histogram!("eth_client.direct.eth_balance", start.elapsed()); - Ok(balance) - } - - pub async fn sender_eth_balance(&self, component: &'static str) -> Result { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "sender_eth_balance"); - self.eth_balance(self.inner.sender_account, component).await - } - - pub async fn allowance( - &self, - token_address: Address, - erc20_abi: ethabi::Contract, - ) -> Result { - self.allowance_on_contract(token_address, self.inner.contract_addr, erc20_abi) - .await - } - - pub async fn allowance_on_contract( - &self, - token_address: Address, - contract_address: Address, - erc20_abi: ethabi::Contract, - ) -> Result { - let start = Instant::now(); - let res = self - .call_contract_function( - "allowance", - (self.inner.sender_account, contract_address), - None, - Options::default(), - None, - token_address, - erc20_abi, - ) - .await?; - metrics::histogram!("eth_client.direct.allowance", start.elapsed()); - Ok(res) - } - - pub async fn call_main_contract_function( - &self, - func: &str, - params: P, - from: A, - options: Options, - block: B, - ) -> Result - where - R: Detokenize + Unpin, - A: Into>, - B: Into>, - P: Tokenize, - { - self.call_contract_function( - func, - params, - from, - options, - block, - self.inner.contract_addr, - self.inner.contract.clone(), - ) - .await - } - - #[allow(clippy::too_many_arguments)] - pub async fn call_contract_function( - &self, - func: &str, - params: P, - from: A, - options: Options, - block: B, - token_address: Address, - erc20_abi: ethabi::Contract, - ) -> Result - where - R: Detokenize + Unpin, - A: Into>, - B: Into>, - P: Tokenize, - { - let start = Instant::now(); - let contract = Contract::new(self.inner.web3.eth(), token_address, erc20_abi); - let res = contract.query(func, params, from, options, block).await?; - metrics::histogram!("eth_client.direct.call_contract_function", start.elapsed()); - Ok(res) - } - - pub async fn logs(&self, filter: Filter, component: &'static str) -> Result, Error> { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "logs"); - let start = Instant::now(); - let logs = self.inner.web3.eth().logs(filter).await?; - metrics::histogram!("eth_client.direct.logs", start.elapsed()); - Ok(logs) - } - - pub fn contract(&self) -> ðabi::Contract { - &self.inner.contract - } - - pub fn contract_addr(&self) -> H160 { - self.inner.contract_addr - } - - pub fn chain_id(&self) -> L1ChainId { - self.inner.chain_id - } - - pub fn sender_account(&self) -> Address { - self.inner.sender_account - } - - pub fn encode_tx_data(&self, func: &str, params: P) -> Vec { - let f = self - .contract() - .function(func) - .expect("failed to get function parameters"); - - f.encode_input(¶ms.into_tokens()) - .expect("failed to encode parameters") - } - - pub fn get_web3_transport(&self) -> &Http { - self.inner.web3.transport() - } - - pub async fn get_tx( - &self, - hash: H256, - component: &'static str, - ) -> Result, Error> { - metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "get_tx"); - let tx = self - .inner - .web3 - .eth() - .transaction(TransactionId::Hash(hash)) - .await?; - Ok(tx) - } -} - -impl EthereumClient { - pub fn from_config(config: &ZkSyncConfig) -> Self { - let transport = web3::transports::Http::new(&config.eth_client.web3_url).unwrap(); - - let operator_address = PackedEthSignature::address_from_private_key( - &config.eth_sender.sender.operator_private_key, - ) - .expect("Failed to get address from private key"); - - vlog::info!("Operator address: {:?}", operator_address); - - ETHDirectClient::new( - transport, - zksync_contract(), - config.eth_sender.sender.operator_commit_eth_addr, - PrivateKeySigner::new(config.eth_sender.sender.operator_private_key), - config.contracts.diamond_proxy_addr, - config - .eth_sender - .gas_adjuster - .default_priority_fee_per_gas - .into(), - L1ChainId(config.eth_client.chain_id), - ) - } -} diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index 154b4ab1bf28..62467088f39b 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -4,6 +4,8 @@ use async_trait::async_trait; use jsonrpc_core::types::error::Error as RpcError; use std::collections::{BTreeMap, HashMap}; use std::sync::RwLock; +use zksync_types::web3::contract::tokens::Detokenize; +use zksync_types::web3::types::{BlockId, Filter, Log, Transaction}; use zksync_types::web3::{ contract::tokens::Tokenize, contract::Options, @@ -11,10 +13,14 @@ use zksync_types::web3::{ types::{BlockNumber, U64}, Error as Web3Error, }; +use zksync_types::{Address, L1ChainId}; use zksync_types::{web3::types::TransactionReceipt, H160, H256, U256}; -use super::http_client::{Error, EthInterface, ExecutedTxStatus, FailureInfo, SignedCallResult}; +use crate::{ + types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}, + BoundEthInterface, EthInterface, +}; #[derive(Debug, Clone, Default, Copy)] pub struct MockTx { @@ -198,38 +204,13 @@ impl EthInterface for MockEthereum { Ok(mock_tx.hash) } - async fn pending_nonce(&self, _: &'static str) -> Result { - Ok(self.pending_nonce.load(Ordering::SeqCst).into()) - } - - async fn current_nonce(&self, _: &'static str) -> Result { - Ok(self.current_nonce.load(Ordering::SeqCst).into()) - } - - async fn nonce_at(&self, block: BlockNumber, _: &'static str) -> Result { - if let BlockNumber::Number(block_number) = block { - Ok((*self - .nonces - .read() - .unwrap() - .range(..=block_number.as_u64()) - .next_back() - .unwrap() - .1) - .into()) - } else { - panic!("MockEthereum::nonce_at called with non-number block tag"); - } - } - - async fn sign_prepared_tx_for_addr( + async fn nonce_at_for_account( &self, - data: Vec, - _contract_addr: H160, - options: Options, + _account: Address, + _block: BlockNumber, _: &'static str, - ) -> Result { - self.sign_prepared_tx(data, options) + ) -> Result { + unimplemented!("Getting nonce for custom account is not supported") } async fn get_gas_price(&self, _: &'static str) -> Result { @@ -247,6 +228,15 @@ impl EthInterface for MockEthereum { .to_vec()) } + async fn get_pending_block_base_fee_per_gas( + &self, + _component: &'static str, + ) -> Result { + Ok(U256::from( + *self.base_fee_history.read().unwrap().last().unwrap(), + )) + } + async fn failure_reason(&self, tx_hash: H256) -> Result, Error> { let tx_status = self.get_tx_status(tx_hash, "failure_reason").await.unwrap(); @@ -257,12 +247,129 @@ impl EthInterface for MockEthereum { gas_limit: U256::zero(), })) } + + async fn get_tx( + &self, + _hash: H256, + _component: &'static str, + ) -> Result, Error> { + unimplemented!("Not needed right now") + } + + #[allow(clippy::too_many_arguments)] + async fn call_contract_function( + &self, + _func: &str, + _params: P, + _from: A, + _options: Options, + _block: B, + _contract_address: Address, + _contract_abi: ethabi::Contract, + ) -> Result + where + R: Detokenize + Unpin, + A: Into> + Send, + B: Into> + Send, + P: Tokenize + Send, + { + unimplemented!("Not needed right now") + } + + async fn tx_receipt( + &self, + _tx_hash: H256, + _component: &'static str, + ) -> Result, Error> { + unimplemented!("Not needed right now") + } + + async fn eth_balance( + &self, + _address: Address, + _component: &'static str, + ) -> Result { + unimplemented!("Not needed right now") + } + + async fn logs(&self, _filter: Filter, _component: &'static str) -> Result, Error> { + unimplemented!("Not needed right now") + } +} + +#[async_trait::async_trait] +impl BoundEthInterface for MockEthereum { + fn contract(&self) -> ðabi::Contract { + unimplemented!("Not needed right now") + } + + fn contract_addr(&self) -> H160 { + H160::repeat_byte(0x22) + } + + fn chain_id(&self) -> L1ChainId { + unimplemented!("Not needed right now") + } + + fn sender_account(&self) -> Address { + Address::repeat_byte(0x11) + } + + async fn sign_prepared_tx_for_addr( + &self, + data: Vec, + _contract_addr: H160, + options: Options, + _component: &'static str, + ) -> Result { + self.sign_prepared_tx(data, options) + } + + async fn allowance_on_account( + &self, + _token_address: Address, + _contract_address: Address, + _erc20_abi: ethabi::Contract, + ) -> Result { + unimplemented!("Not needed right now") + } + + async fn nonce_at(&self, block: BlockNumber, _component: &'static str) -> Result { + if let BlockNumber::Number(block_number) = block { + Ok((*self + .nonces + .read() + .unwrap() + .range(..=block_number.as_u64()) + .next_back() + .unwrap() + .1) + .into()) + } else { + panic!("MockEthereum::nonce_at called with non-number block tag"); + } + } + + async fn pending_nonce(&self, _: &'static str) -> Result { + Ok(self.pending_nonce.load(Ordering::SeqCst).into()) + } + + async fn current_nonce(&self, _: &'static str) -> Result { + Ok(self.current_nonce.load(Ordering::SeqCst).into()) + } } #[async_trait] impl + Sync> EthInterface for T { - async fn current_nonce(&self, component: &'static str) -> Result { - self.as_ref().current_nonce(component).await + async fn nonce_at_for_account( + &self, + account: Address, + block: BlockNumber, + component: &'static str, + ) -> Result { + self.as_ref() + .nonce_at_for_account(account, block, component) + .await } async fn base_fee_history( @@ -276,16 +383,17 @@ impl + Sync> EthInterface for T { .await } - async fn get_gas_price(&self, component: &'static str) -> Result { - self.as_ref().get_gas_price(component).await - } - - async fn pending_nonce(&self, component: &'static str) -> Result { - self.as_ref().pending_nonce(component).await + async fn get_pending_block_base_fee_per_gas( + &self, + component: &'static str, + ) -> Result { + self.as_ref() + .get_pending_block_base_fee_per_gas(component) + .await } - async fn nonce_at(&self, block: BlockNumber, component: &'static str) -> Result { - self.as_ref().nonce_at(block, component).await + async fn get_gas_price(&self, component: &'static str) -> Result { + self.as_ref().get_gas_price(component).await } async fn block_number(&self, component: &'static str) -> Result { @@ -296,6 +404,91 @@ impl + Sync> EthInterface for T { self.as_ref().send_raw_tx(tx).await } + async fn failure_reason(&self, tx_hash: H256) -> Result, Error> { + self.as_ref().failure_reason(tx_hash).await + } + + async fn get_tx_status( + &self, + hash: H256, + component: &'static str, + ) -> Result, Error> { + self.as_ref().get_tx_status(hash, component).await + } + + async fn get_tx( + &self, + hash: H256, + component: &'static str, + ) -> Result, Error> { + self.as_ref().get_tx(hash, component).await + } + + #[allow(clippy::too_many_arguments)] + async fn call_contract_function( + &self, + func: &str, + params: P, + from: A, + options: Options, + block: B, + contract_address: Address, + contract_abi: ethabi::Contract, + ) -> Result + where + R: Detokenize + Unpin, + A: Into> + Send, + B: Into> + Send, + P: Tokenize + Send, + { + self.as_ref() + .call_contract_function( + func, + params, + from, + options, + block, + contract_address, + contract_abi, + ) + .await + } + + async fn tx_receipt( + &self, + tx_hash: H256, + component: &'static str, + ) -> Result, Error> { + self.as_ref().tx_receipt(tx_hash, component).await + } + + async fn eth_balance(&self, address: Address, component: &'static str) -> Result { + self.as_ref().eth_balance(address, component).await + } + + async fn logs(&self, filter: Filter, component: &'static str) -> Result, Error> { + self.as_ref().logs(filter, component).await + } +} + +#[async_trait::async_trait] +impl + Send + Sync> BoundEthInterface for T { + fn contract(&self) -> ðabi::Contract { + self.as_ref().contract() + } + + fn contract_addr(&self) -> H160 { + self.as_ref().contract_addr() + } + + fn chain_id(&self) -> L1ChainId { + self.as_ref().chain_id() + } + + fn sender_account(&self) -> Address { + self.as_ref().sender_account() + } + async fn sign_prepared_tx_for_addr( &self, data: Vec, @@ -308,15 +501,26 @@ impl + Sync> EthInterface for T { .await } - async fn failure_reason(&self, tx_hash: H256) -> Result, Error> { - self.as_ref().failure_reason(tx_hash).await + async fn allowance_on_account( + &self, + token_address: Address, + contract_address: Address, + erc20_abi: ethabi::Contract, + ) -> Result { + self.as_ref() + .allowance_on_account(token_address, contract_address, erc20_abi) + .await } - async fn get_tx_status( - &self, - hash: H256, - component: &'static str, - ) -> Result, Error> { - self.as_ref().get_tx_status(hash, component).await + async fn nonce_at(&self, block: BlockNumber, component: &'static str) -> Result { + self.as_ref().nonce_at(block, component).await + } + + async fn pending_nonce(&self, _: &'static str) -> Result { + self.as_ref().pending_nonce("").await + } + + async fn current_nonce(&self, _: &'static str) -> Result { + self.as_ref().current_nonce("").await } } diff --git a/core/lib/eth_client/src/clients/mod.rs b/core/lib/eth_client/src/clients/mod.rs index 3a73d0fb7e33..e992fac2eaf6 100644 --- a/core/lib/eth_client/src/clients/mod.rs +++ b/core/lib/eth_client/src/clients/mod.rs @@ -1,2 +1,2 @@ -pub mod http_client; +pub mod http; pub mod mock; diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index 5c802a66ff54..cd5888ba4e41 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -1,4 +1,260 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub mod clients; -pub use clients::http_client::{ETHDirectClient, EthInterface}; +pub mod types; + +use crate::types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}; +use async_trait::async_trait; +use zksync_types::{ + web3::{ + contract::{ + tokens::{Detokenize, Tokenize}, + Options, + }, + ethabi, + types::{ + Address, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, H160, + H256, U256, U64, + }, + }, + L1ChainId, +}; + +/// Common Web3 interface, as seen by the core applications. +/// Encapsulates the raw Web3 interction, providing a high-level interface. +/// +/// ## Trait contents +/// +/// This trait contains methods that perform the "abstract" queries to Web3. That is, +/// there are no assumptions about the contract or account that is used to perform the queries. +/// If you want to add a method to this trait, make sure that it doesn't depend on any particular +/// contract or account address. For that, you can use the `BoundEthInterface` trait. +/// +/// ## `component` method +/// +/// Most of the trait methods support the `component` parameter. This parameter is used to +/// describe the caller of the method. It may be useful to find the component that makes an +/// unnecessary high amount of Web3 calls. Implementations are advices to count invokations +/// per component and expose them to prometheus, e.g. via `metrics` crate. +#[async_trait] +pub trait EthInterface { + /// Returns the nonce of the provided account at the specified block. + async fn nonce_at_for_account( + &self, + account: Address, + block: BlockNumber, + component: &'static str, + ) -> Result; + + /// Collects the base fee history for the specified block range. + /// + /// Returns 1 value for each block in range, assuming that these blocks exist. + /// Will return an error if the `from_block + block_count` is beyond the head block. + async fn base_fee_history( + &self, + from_block: usize, + block_count: usize, + component: &'static str, + ) -> Result, Error>; + + /// Returns the `base_fee_per_gas` value for the currently pending L1 block. + async fn get_pending_block_base_fee_per_gas( + &self, + component: &'static str, + ) -> Result; + + /// Returns the current gas price. + async fn get_gas_price(&self, component: &'static str) -> Result; + + /// Returns the current block number. + async fn block_number(&self, component: &'static str) -> Result; + + /// Sends a transaction to the Ethereum network. + async fn send_raw_tx(&self, tx: Vec) -> Result; + + /// Fetches the transaction status for a specified transaction hash. + /// + /// Returns `Ok(None)` if the transaction is either not found or not executed yet. + /// Returns `Err` only if the request fails (e.g. due to network issues). + async fn get_tx_status( + &self, + hash: H256, + component: &'static str, + ) -> Result, Error>; + + /// For a reverted transaction, attempts to recover information on the revert reason. + /// + /// Returns `Ok(Some)` if the transaction is reverted. + /// Returns `Ok(None)` if the transaction isn't found, wasn't executed yet, or if it was + /// executed successfully. + /// Returns `Err` only if the request fails (e.g. due to network issues). + async fn failure_reason(&self, tx_hash: H256) -> Result, Error>; + + /// Returns the transaction for the specified hash. + async fn get_tx( + &self, + hash: H256, + component: &'static str, + ) -> Result, Error>; + + /// Returns the receipt for the specified transaction hash. + async fn tx_receipt( + &self, + tx_hash: H256, + component: &'static str, + ) -> Result, Error>; + + /// Returns the ETH balance of the specified token for the specified address. + async fn eth_balance(&self, address: Address, component: &'static str) -> Result; + + /// Invokes a function on a contract specified by `contract_address` / `contract_abi` using `eth_call`. + #[allow(clippy::too_many_arguments)] + async fn call_contract_function( + &self, + func: &str, + params: P, + from: A, + options: Options, + block: B, + contract_address: Address, + contract_abi: ethabi::Contract, + ) -> Result + where + R: Detokenize + Unpin, + A: Into> + Send, + B: Into> + Send, + P: Tokenize + Send; + + /// Returns the logs for the specified filter. + async fn logs(&self, filter: Filter, component: &'static str) -> Result, Error>; +} + +/// An extension of `EthInterface` trait, which is used to perform queries that are bound to +/// a certain contract and account. +/// +/// THe example use cases for this trait would be: +/// - An operator that sends transactions and interacts with zkSync contract. +/// - A wallet implementation in the SDK that is tied to a user's account. +/// +/// When adding a method to this trait, +/// 1. Make sure that it's indeed "bound". If not, add it to the `EthInterface` trait instead. +/// 2. Consider adding the "unbound" version to the `EthInterface` trait and create a default method +/// implementation that invokes `contract` / `contract_addr` / `sender_account` methods. +#[async_trait] +pub trait BoundEthInterface: EthInterface +where + Self: Sync + Send, +{ + /// ABI of the contract that is used by the implementor. + fn contract(&self) -> ðabi::Contract; + + /// Address of the contract that is used by the implementor. + fn contract_addr(&self) -> H160; + + /// Chain ID of the L1 network the client is *configured* to connected to. + /// + /// This value should be externally provided by the user rather than requested from the network + /// to avoid accidental network mismatch. + fn chain_id(&self) -> L1ChainId; + + /// Address of the account associated with the object implementing the trait. + fn sender_account(&self) -> Address; + + /// Returns the certain ERC20 token allowance for the pair (`Self::sender_account()`, `address`). + async fn allowance_on_account( + &self, + token_address: Address, + address: Address, + erc20_abi: ethabi::Contract, + ) -> Result; + + /// Signs the transaction and sends it to the Ethereum network. + /// Expected to use credentials associated with `Self::sender_account()`. + async fn sign_prepared_tx_for_addr( + &self, + data: Vec, + contract_addr: H160, + options: Options, + component: &'static str, + ) -> Result; + + /// Returns the nonce of the `Self::sender_account()` at the specified block. + async fn nonce_at(&self, block: BlockNumber, component: &'static str) -> Result { + self.nonce_at_for_account(self.sender_account(), block, component) + .await + } + + /// Returns the current nonce of the `Self::sender_account()`. + async fn current_nonce(&self, component: &'static str) -> Result { + self.nonce_at(BlockNumber::Latest, component).await + } + + /// Returns the pending nonce of the `Self::sender_account()`. + async fn pending_nonce(&self, component: &'static str) -> Result { + self.nonce_at(BlockNumber::Pending, component).await + } + + /// Similar to [`EthInterface::sign_prepared_tx_for_addr`], but is fixed over `Self::contract_addr()`. + async fn sign_prepared_tx( + &self, + data: Vec, + options: Options, + component: &'static str, + ) -> Result { + self.sign_prepared_tx_for_addr(data, self.contract_addr(), options, component) + .await + } + + /// Returns the ETH balance of `Self::sender_account()`. + async fn sender_eth_balance(&self, component: &'static str) -> Result { + self.eth_balance(self.sender_account(), component).await + } + + /// Returns the certain ERC20 token allowance for the `Self::sender_account()`. + async fn allowance( + &self, + token_address: Address, + erc20_abi: ethabi::Contract, + ) -> Result { + self.allowance_on_account(token_address, self.contract_addr(), erc20_abi) + .await + } + + /// Invokes a function on a contract specified by `Self::contract()` / `Self::contract_addr()`. + async fn call_main_contract_function( + &self, + func: &str, + params: P, + from: A, + options: Options, + block: B, + ) -> Result + where + R: Detokenize + Unpin, + A: Into> + Send, + P: Tokenize + Send, + B: Into> + Send, + { + self.call_contract_function( + func, + params, + from, + options, + block, + self.contract_addr(), + self.contract().clone(), + ) + .await + } + + /// Encodes a function using the `Self::contract()` ABI. + fn encode_tx_data(&self, func: &str, params: P) -> Vec { + let f = self + .contract() + .function(func) + .expect("failed to get function parameters"); + + f.encode_input(¶ms.into_tokens()) + .expect("failed to encode parameters") + } +} diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs new file mode 100644 index 000000000000..bef3b63b69a9 --- /dev/null +++ b/core/lib/eth_client/src/types.rs @@ -0,0 +1,71 @@ +// External uses +use zksync_types::web3::{ + ethabi, + types::{TransactionReceipt, H256, U256}, +}; + +/// Common error type exposed by the crate, +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Problem on the Ethereum client side (e.g. bad RPC call, network issues). + #[error("Request to ethereum gateway failed: {0}")] + EthereumGateway(#[from] zksync_types::web3::Error), + /// Problem with a contract call. + #[error("Call to contract failed: {0}")] + Contract(#[from] zksync_types::web3::contract::Error), + /// Problem with transaction signer. + #[error("Transaction signing failed: {0}")] + Signer(#[from] zksync_eth_signer::error::SignerError), + /// Problem with transaction decoding. + #[error("Decoding revert reason failed: {0}")] + Decode(#[from] ethabi::Error), + /// Incorrect fee provided for a transaction. + #[error("Max fee {0} less than priority fee {1}")] + WrongFeeProvided(U256, U256), +} + +/// Representation of a signed transaction. +#[derive(Debug, Clone, PartialEq)] +pub struct SignedCallResult { + /// Raw transaction bytes. + pub raw_tx: Vec, + /// `max_priority_fee_per_gas` field of transaction (EIP1559). + pub max_priority_fee_per_gas: U256, + /// `max_fee_per_gas` field of transaction (EIP1559). + pub max_fee_per_gas: U256, + /// `nonce` field of transaction. + pub nonce: U256, + /// Transaction hash. + pub hash: H256, +} + +/// State of the executed Ethereum transaction. +#[derive(Debug, Clone)] +pub struct ExecutedTxStatus { + /// The hash of the executed L1 transaction. + pub tx_hash: H256, + /// Whether transaction was executed successfully or failed. + pub success: bool, + /// Receipt for a transaction. + pub receipt: TransactionReceipt, +} + +/// Information about transaction failure. +/// +/// Two common reasons for transaction failure are: +/// - Revert +/// - Out of gas. +/// +/// This structure tries to provide information about both of them. +#[derive(Debug, Clone)] +pub struct FailureInfo { + /// RPC error code. + pub revert_code: i64, + /// RPC error message (normally, for a reverted transaction it would + /// include the revert reason). + pub revert_reason: String, + /// Amount of gas used by transaction (may be `None` for `eth_call` requests). + pub gas_used: Option, + /// Gas limit of the transaction. + pub gas_limit: U256, +} diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index c9e6350ec467..66ede9580f5f 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/health_check/Cargo.toml b/core/lib/health_check/Cargo.toml new file mode 100644 index 000000000000..4a7dd7e753d9 --- /dev/null +++ b/core/lib/health_check/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "zksync_health_check" +version = "0.1.0" +edition = "2021" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-2" +license = "Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +[dependencies] diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs new file mode 100644 index 000000000000..0fed16544152 --- /dev/null +++ b/core/lib/health_check/src/lib.rs @@ -0,0 +1,15 @@ +/// Interface to be used for healthchecks +/// There's a list of health checks that are looped in the /healthcheck endpoint to verify status +pub trait CheckHealth: Send + Sync + 'static { + fn check_health(&self) -> CheckHealthStatus; +} + +/// Used to return health status when checked. +/// States: +/// Ready => move forward +/// NotReady => check fails with message String -- to be passed to /healthcheck caller +#[derive(Debug, PartialEq)] +pub enum CheckHealthStatus { + Ready, + NotReady(String), +} diff --git a/core/lib/mempool/Cargo.toml b/core/lib/mempool/Cargo.toml index 39529fe73f59..45259f85f162 100644 --- a/core/lib/mempool/Cargo.toml +++ b/core/lib/mempool/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/mempool/src/types.rs b/core/lib/mempool/src/types.rs index 6140229ef90c..130f8ad0016a 100644 --- a/core/lib/mempool/src/types.rs +++ b/core/lib/mempool/src/types.rs @@ -128,7 +128,7 @@ pub(crate) struct InsertionMetadata { /// Structure that can be used by state keeper to describe /// criteria for transaction it wants to fetch. -#[derive(Debug, Default)] +#[derive(Debug, Default, PartialEq, Eq)] pub struct L2TxFilter { /// L1 gas price. pub l1_gas_price: u64, diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index 8811a660ba12..572a897f157c 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -20,20 +20,12 @@ vlog = { path = "../../lib/vlog", version = "1.0" } itertools = "0.10" rayon = "1.3.0" -anyhow = "1.0" -futures = "0.3" once_cell = "1.7" thiserror = "1.0" bincode = "1" -fnv = "1.0.3" serde = "1.0.90" -async-trait = "0.1" metrics = "0.20" byteorder = "1.3" -tokio = { version = "1", features = ["full"] } [dev-dependencies] -rand = "0.4" -serde_json = "1.0.0" -criterion = "0.3.0" tempfile = "3.0.2" diff --git a/core/lib/merkle_tree/README.md b/core/lib/merkle_tree/README.md new file mode 100644 index 000000000000..9ebe0d7db9ea --- /dev/null +++ b/core/lib/merkle_tree/README.md @@ -0,0 +1,9 @@ +# Merkle Tree + +This cargo contains the basic functions to create and modify the Merkle Tree. + +We're using a classic binary tree here (not Trie, not B-trees etc) to make it easier for the circuit creation. Also the +depth of the tree is fixed to 256. + +At any given moment, the storage keeps the tree only at a given block (and that block number is encoded in +`block_number` row) - it can be accessed via `ZkSyncTree` stuct. diff --git a/core/lib/merkle_tree/src/storage.rs b/core/lib/merkle_tree/src/storage.rs index 264bb4a86d56..30a1c62c04f0 100644 --- a/core/lib/merkle_tree/src/storage.rs +++ b/core/lib/merkle_tree/src/storage.rs @@ -122,11 +122,13 @@ impl Storage { tree_operation, ) { // revert of first occurrence - (_, TreeOperation::Delete) => { + (Some(_), TreeOperation::Delete) => { write_batch.delete_cf(cf, serialize_tree_leaf(leaf)); current_index -= 1; 0 } + // leaf doesn't exist + (None, TreeOperation::Delete) => 0, // existing leaf (Some(bytes), TreeOperation::Write { value, .. }) => { let index = deserialize_leaf_index(&bytes); diff --git a/core/lib/merkle_tree/src/tests.rs b/core/lib/merkle_tree/src/tests.rs index 6fda64372a5f..f71012e38d05 100644 --- a/core/lib/merkle_tree/src/tests.rs +++ b/core/lib/merkle_tree/src/tests.rs @@ -1,6 +1,6 @@ use crate::tree_config::TreeConfig; use crate::types::{TreeKey, ZkHash, ZkHasher}; -use crate::ZkSyncTree; +use crate::{TreeMetadata, ZkSyncTree}; use std::str::FromStr; use tempfile::TempDir; use zksync_config::constants::ACCOUNT_CODE_STORAGE_ADDRESS; @@ -28,10 +28,10 @@ fn basic_workflow() { assert_eq!( expected_root_hash, - [ + H256([ 125, 25, 107, 171, 182, 155, 32, 70, 138, 108, 238, 150, 140, 205, 193, 39, 90, 92, 122, 233, 118, 238, 248, 201, 160, 55, 58, 206, 244, 216, 188, 10 - ], + ]), ); let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); @@ -57,10 +57,10 @@ fn basic_workflow_multiblock() { assert_eq!( expected_root_hash, - [ + H256([ 125, 25, 107, 171, 182, 155, 32, 70, 138, 108, 238, 150, 140, 205, 193, 39, 90, 92, 122, 233, 118, 238, 248, 201, 160, 55, 58, 206, 244, 216, 188, 10 - ], + ]), ); let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); @@ -137,7 +137,7 @@ fn revert_blocks() { tree_metadata }; - let witness_tree = TestWitnessTree::deserialize(tree_metadata[3].witness_input.clone()); + let witness_tree = TestWitnessTree::new(tree_metadata[3].clone()); assert!(witness_tree.get_leaf((4 * block_size - 1) as u64).is_some()); // revert last block @@ -176,6 +176,22 @@ fn revert_blocks() { tree.save().unwrap(); } + // revert two more blocks second time + // The result should be the same + let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let logs_to_revert = mirror_logs + .iter() + .skip(2 * block_size) + .take(2 * block_size) + .map(|witness_log| (witness_log.storage_log.key.hashed_key_u256(), None)) + .collect(); + { + let mut tree = ZkSyncTree::new(storage); + tree.revert_logs(L1BatchNumber(1), logs_to_revert); + assert_eq!(tree.root_hash(), tree_metadata[1].root_hash); + tree.save().unwrap(); + } + // reapply one of the reverted logs and verify that indexing is correct let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); { @@ -183,7 +199,7 @@ fn revert_blocks() { let mut tree = ZkSyncTree::new(storage); let metadata = tree.process_block(vec![storage_log]); - let witness_tree = TestWitnessTree::deserialize(metadata.witness_input); + let witness_tree = TestWitnessTree::new(metadata); assert!(witness_tree.get_leaf((2 * block_size + 1) as u64).is_some()); tree.save().unwrap(); } @@ -201,10 +217,11 @@ fn reset_tree() { let logs = gen_storage_logs(); let mut tree = ZkSyncTree::new(storage); let config = TreeConfig::new(ZkHasher::default()); + let empty_tree_hash = H256::from_slice(&config.default_root_hash()); logs.chunks(5) .into_iter() - .fold(config.default_root_hash(), |hash, chunk| { + .fold(empty_tree_hash, |hash, chunk| { let _ = tree.process_block(chunk); tree.reset(); assert_eq!(tree.root_hash(), hash); @@ -260,13 +277,14 @@ pub struct TestWitnessTree { } impl TestWitnessTree { - pub fn deserialize(bytes: Vec) -> Self { - let storage_logs = bincode::deserialize(&bytes).expect("failed to deserialize witness"); + pub fn new(metadata: TreeMetadata) -> Self { + let witness_input = metadata.witness_input.unwrap(); + let storage_logs = witness_input.into_merkle_paths().collect(); Self { storage_logs } } pub fn root(&self) -> ZkHash { - self.storage_logs.last().unwrap().root_hash.clone() + self.storage_logs.last().unwrap().root_hash.to_vec() } pub fn get_leaf(&self, index: u64) -> Option { @@ -289,7 +307,7 @@ fn basic_witness_workflow() { let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new(db); let metadata = tree.process_block(first_chunk); - let witness_tree = TestWitnessTree::deserialize(metadata.witness_input); + let witness_tree = TestWitnessTree::new(metadata); assert_eq!( witness_tree.get_leaf(1), @@ -305,7 +323,7 @@ fn basic_witness_workflow() { let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new(db); let metadata = tree.process_block(second_chunk); - let witness_tree = TestWitnessTree::deserialize(metadata.witness_input); + let witness_tree = TestWitnessTree::new(metadata); assert_eq!( witness_tree.root(), [ @@ -343,7 +361,7 @@ fn read_logs() { let read_metadata = tree.process_block(convert_logs(read_logs)); assert_eq!(read_metadata.root_hash, write_metadata.root_hash); - let witness_tree = TestWitnessTree::deserialize(read_metadata.witness_input); + let witness_tree = TestWitnessTree::new(read_metadata); assert!(witness_tree.get_leaf(1).is_some()); assert!(witness_tree.get_leaf(2).is_some()); } @@ -355,10 +373,10 @@ fn root_hash_compatibility() { let mut tree = ZkSyncTree::new(db); assert_eq!( tree.root_hash(), - [ + H256([ 152, 164, 142, 78, 209, 115, 97, 136, 56, 74, 232, 167, 157, 210, 28, 77, 102, 135, 229, 253, 34, 202, 24, 20, 137, 6, 215, 135, 54, 192, 216, 106 - ], + ]), ); let storage_logs = vec![ WitnessStorageLog { @@ -445,10 +463,10 @@ fn root_hash_compatibility() { let metadata = tree.process_block(storage_logs); assert_eq!( metadata.root_hash, - [ + H256([ 35, 191, 235, 50, 17, 223, 143, 160, 240, 38, 139, 111, 221, 156, 42, 29, 72, 90, 196, 198, 72, 13, 219, 88, 59, 250, 94, 112, 221, 3, 44, 171 - ] + ]) ); } diff --git a/core/lib/merkle_tree/src/types.rs b/core/lib/merkle_tree/src/types.rs index 4490dd31fc64..0fbb43c5cb01 100644 --- a/core/lib/merkle_tree/src/types.rs +++ b/core/lib/merkle_tree/src/types.rs @@ -5,9 +5,14 @@ use serde::Serialize; use std::collections::HashMap; use zksync_crypto::hasher::blake2::Blake2Hasher; pub use zksync_types::writes::{InitialStorageWrite, RepeatedStorageWrite}; -use zksync_types::H256; +use zksync_types::{proofs::PrepareBasicCircuitsJob, H256}; use zksync_utils::impl_from_wrapper; +/// Position of a node in a tree. +/// The first argument (u16) is the depth, and the second (u256) is the index on this depth. +/// So the root, will have (0, 0) +/// Its children: (1,0), (1,1) +/// Their children: (2, 0), (2,1), (2,2), (2,3) etc. #[derive(PartialEq, Eq, Hash, Clone, Debug, Serialize)] pub struct LevelIndex(pub (u16, U256)); @@ -73,9 +78,9 @@ pub type ZkHash = Bytes; /// Includes root hash, current tree location and serialized merkle paths for each storage log #[derive(Debug, Clone, Default)] pub struct TreeMetadata { - pub root_hash: ZkHash, + pub root_hash: H256, pub rollup_last_leaf_index: u64, - pub witness_input: Vec, + pub witness_input: Option, pub initial_writes: Vec, pub repeated_writes: Vec, } diff --git a/core/lib/merkle_tree/src/zksync_tree.rs b/core/lib/merkle_tree/src/zksync_tree.rs index 72aea1ab366e..fd53836c7c60 100644 --- a/core/lib/merkle_tree/src/zksync_tree.rs +++ b/core/lib/merkle_tree/src/zksync_tree.rs @@ -11,13 +11,14 @@ use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use std::borrow::Borrow; use std::collections::{hash_map::Entry, HashMap, HashSet}; +use std::convert::TryInto; use std::iter::once; use std::sync::Arc; -use tokio::time::Instant; +use std::time::Instant; use zksync_config::constants::ROOT_TREE_DEPTH; use zksync_crypto::hasher::Hasher; use zksync_storage::RocksDB; -use zksync_types::proofs::StorageLogMetadata; +use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; use zksync_types::{L1BatchNumber, StorageLogKind, WitnessStorageLog, H256}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] @@ -60,8 +61,12 @@ impl ZkSyncTree { Self::new_with_mode(db, TreeMode::Lightweight) } - pub fn root_hash(&self) -> ZkHash { - self.root_hash.clone() + pub fn mode(&self) -> TreeMode { + self.mode + } + + pub fn root_hash(&self) -> H256 { + H256::from_slice(&self.root_hash) } pub fn is_empty(&self) -> bool { @@ -188,7 +193,7 @@ impl ZkSyncTree { self.root_hash = patch_metadata .last() - .map(|metadata| metadata.root_hash.clone()) + .map(|metadata| metadata.root_hash.to_vec()) .unwrap_or_else(|| self.root_hash.clone()); patch_metadata @@ -207,14 +212,17 @@ impl ZkSyncTree { let metadata: Vec<_> = group.into_iter().map(|(metadata, _)| metadata).collect(); - let root_hash = metadata.last().unwrap().root_hash.clone(); - let witness_input = bincode::serialize(&(metadata, previous_index)) - .expect("witness serialization failed"); + let root_hash = metadata.last().unwrap().root_hash; + let mut witness_input = PrepareBasicCircuitsJob::new(previous_index); + witness_input.reserve(metadata.len()); + for merkle_path in metadata { + witness_input.push_merkle_path(merkle_path); + } TreeMetadata { - root_hash, + root_hash: H256::from_slice(&root_hash), rollup_last_leaf_index: last_index, - witness_input, + witness_input: Some(witness_input), initial_writes, repeated_writes, } @@ -232,9 +240,9 @@ impl ZkSyncTree { } = std::mem::take(&mut leaf_indices[0]); vec![TreeMetadata { - root_hash: self.root_hash.clone(), + root_hash: H256::from_slice(&self.root_hash), rollup_last_leaf_index: last_index, - witness_input: Vec::new(), + witness_input: None, initial_writes, repeated_writes, }] @@ -403,7 +411,7 @@ impl ZkSyncTree { } else { left_hash }; - merkle_paths.push(witness_hash.clone()); + merkle_paths.push(witness_hash.as_slice().try_into().unwrap()); } Self::make_node(level, key, node) })); @@ -421,7 +429,7 @@ impl ZkSyncTree { TreeOperation::Delete => H256::zero(), }; let metadata_log = StorageLogMetadata { - root_hash, + root_hash: root_hash.try_into().unwrap(), is_write, first_write, merkle_paths, @@ -449,8 +457,11 @@ impl ZkSyncTree { let empty_tree = self.config.empty_tree().to_vec(); let hasher = self.hasher().clone(); - let mut current_level = - vec![(self.root_hash(), (1, 0.into()).into(), (1, 1.into()).into())]; + let mut current_level = vec![( + self.root_hash.clone(), + (1, 0.into()).into(), + (1, 1.into()).into(), + )]; for node in empty_tree.iter().take(ROOT_TREE_DEPTH + 1).skip(1) { let default_hash = node.hash().to_vec(); diff --git a/core/lib/merkle_tree2/Cargo.toml b/core/lib/merkle_tree2/Cargo.toml new file mode 100644 index 000000000000..462321d29bbe --- /dev/null +++ b/core/lib/merkle_tree2/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "zksync_merkle_tree2" +version = "1.0.0" +edition = "2021" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-2" +license = "Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +[dependencies] +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_crypto = { path = "../../lib/crypto", version = "1.0" } +zksync_storage = { path = "../../lib/storage", version = "1.0", default-features = false } + +leb128 = "0.2.5" +metrics = "0.20.1" +once_cell = "1.17.1" +rayon = "1.3.1" +thiserror = "1.0" + +[dev-dependencies] +zksync_config = { path = "../../lib/config", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } + +assert_matches = "1.5.0" +clap = { version = "4.2.2", features = ["derive"] } +rand = "0.8.5" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +serde_with = { version = "1", features = ["hex"] } +tempfile = "3.0.2" diff --git a/core/lib/merkle_tree2/README.md b/core/lib/merkle_tree2/README.md new file mode 100644 index 000000000000..0e18ae47903d --- /dev/null +++ b/core/lib/merkle_tree2/README.md @@ -0,0 +1,75 @@ +# Merkle Tree + +Binary Merkle tree implementation based on amortized radix-16 Merkle tree (AR16MT) described in the [Jellyfish Merkle +tree] white paper. Unlike Jellyfish Merkle tree, our construction uses vanilla binary tree hashing algorithm to make it +easier for the circuit creation. The depth of the tree is 256, and Blake2 is used as the hashing function. + +## Benchmarking + +The `loadtest` example is a CLI app allowing to measure tree performance. It allows using the in-memory or RocksDB +storage backend, and Blake2 or no-op hashing functions. For example, the following command launches a benchmark with 75 +blocks each containing 150,000 insertion operations. + +```shell +cargo run --release -p zksync_merkle_tree2 --example loadtest -- \ + --chunk-size=500 75 150000 +``` + +The order of timings should be as follows (measured on MacBook Pro with 12-core Apple M2 Max CPU and 32 GB DDR5 RAM +using the command line above): + +```text +Processing block #74 +[metric] merkle_tree.load_nodes = 0.400870959 seconds +[metric] merkle_tree.extend_patch = 0.119743375 seconds +[metric] merkle_tree.extend_patch.new_leaves = 150000 +[metric] merkle_tree.extend_patch.new_internal_nodes = 57588 +[metric] merkle_tree.extend_patch.moved_leaves = 53976 +[metric] merkle_tree.extend_patch.updated_leaves = 0 +[metric] merkle_tree.extend_patch.avg_leaf_level = 26.74396987880927 +[metric] merkle_tree.extend_patch.max_leaf_level = 44 +[metric] merkle_tree.extend_patch.db_reads = 278133 +[metric] merkle_tree.extend_patch.patch_reads = 96024 +[metric] merkle_tree.finalize_patch = 0.707021 seconds +[metric] merkle_tree.leaf_count = 11250000 +[metric] merkle_tree.finalize_patch.hashed_bytes = 3205548448 bytes +Processed block #74 in 1.228553208s, root hash = 0x1ddec3794d0a1c5b44c2d9c7aa985cc61c70e988da2e6f2a810e0eb37f4322c0 +Committed block #74 in 571.588041ms +Verifying tree consistency... +Verified tree consistency in 37.478218666s +``` + +Full tree mode (with proofs) launched with the following command: + +```shell +cargo run --release -p zksync_merkle_tree2 --example loadtest -- \ + --chunk-size=500 --proofs --reads=50000 75 150000 +``` + +...has the following order of timings: + +```text +Processing block #74 +[metric] merkle_tree.load_nodes = 0.5310345 seconds +[metric] merkle_tree.extend_patch = 0.905285834 seconds +[metric] merkle_tree.extend_patch.new_leaves = 150000 +[metric] merkle_tree.extend_patch.new_internal_nodes = 57588 +[metric] merkle_tree.extend_patch.moved_leaves = 53976 +[metric] merkle_tree.extend_patch.updated_leaves = 0 +[metric] merkle_tree.extend_patch.avg_leaf_level = 26.74396987880927 +[metric] merkle_tree.extend_patch.max_leaf_level = 44 +[metric] merkle_tree.extend_patch.key_reads = 50000 +[metric] merkle_tree.extend_patch.db_reads = 400271 +[metric] merkle_tree.extend_patch.patch_reads = 96024 +[metric] merkle_tree.leaf_count = 11250000 +[metric] merkle_tree.finalize_patch = 0.302226041 seconds +[metric] merkle_tree.finalize_patch.hashed_bytes = 3439057088 bytes +Processed block #74 in 1.814916125s, root hash = 0x1ddec3794d0a1c5b44c2d9c7aa985cc61c70e988da2e6f2a810e0eb37f4322c0 +Committed block #74 in 904.560667ms +Verifying tree consistency... +Verified tree consistency in 37.935639292s +``` + +Launch the example with the `--help` flag for more details. + +[jellyfish merkle tree]: https://developers.diem.com/papers/jellyfish-merkle-tree/2021-01-14.pdf diff --git a/core/lib/merkle_tree2/examples/loadtest/main.rs b/core/lib/merkle_tree2/examples/loadtest/main.rs new file mode 100644 index 000000000000..52ef2e5695f7 --- /dev/null +++ b/core/lib/merkle_tree2/examples/loadtest/main.rs @@ -0,0 +1,145 @@ +//! Load-testing for the Merkle tree implementation. +//! +//! Should be compiled with the release profile, otherwise hashing and other ops would be +//! prohibitively slow. + +use clap::Parser; +use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; +use tempfile::TempDir; + +use std::time::Instant; + +use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_merkle_tree2::{ + Database, HashTree, MerkleTree, PatchSet, RocksDBWrapper, TreeInstruction, +}; +use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; + +mod recorder; + +use crate::recorder::PrintingRecorder; + +/// CLI for load-testing for the Merkle tree implementation. +#[derive(Debug, Parser)] +#[command(author, version, about, long_about = None)] +struct Cli { + /// Number of commits to perform. + #[arg(name = "commits")] + commit_count: u64, + /// Number of inserts / updates per commit. + #[arg(name = "ops")] + writes_per_commit: usize, + /// Generate Merkle proofs for each operation. + #[arg(name = "proofs", long)] + proofs: bool, + /// Additional number of reads of previously written keys per commit. + #[arg(name = "reads", long, default_value = "0", requires = "proofs")] + reads_per_commit: usize, + /// Additional number of updates of previously written keys per commit. + #[arg(name = "updates", long, default_value = "0")] + updates_per_commit: usize, + /// Use a no-op hashing function. + #[arg(name = "no-hash", long)] + no_hashing: bool, + /// Perform testing on in-memory DB rather than RocksDB (i.e., with focus on hashing logic). + #[arg(long = "in-memory", short = 'M')] + in_memory: bool, + /// Chunk size for RocksDB multi-get operations. + #[arg(long = "chunk-size", conflicts_with = "in_memory")] + chunk_size: Option, + /// Seed to use in the RNG for reproducibility. + #[arg(long = "rng-seed", default_value = "0")] + rng_seed: u64, +} + +impl Cli { + fn run(self) { + println!("Launched with options: {self:?}"); + PrintingRecorder::install(); + + let (mut mock_db, mut rocksdb); + let mut _temp_dir = None; + let db: &mut dyn Database = if self.in_memory { + mock_db = PatchSet::default(); + &mut mock_db + } else { + let dir = TempDir::new().expect("failed creating temp dir for RocksDB"); + println!( + "Created temp dir for RocksDB: {}", + dir.path().to_string_lossy() + ); + rocksdb = RocksDBWrapper::new(&dir); + if let Some(chunk_size) = self.chunk_size { + rocksdb.set_multi_get_chunk_size(chunk_size); + } + + _temp_dir = Some(dir); + &mut rocksdb + }; + + let hasher: &dyn HashTree = if self.no_hashing { &() } else { &Blake2Hasher }; + let mut rng = StdRng::seed_from_u64(self.rng_seed); + + let mut next_key_idx = 0_u64; + let mut next_value_idx = 0_u64; + for version in 0..self.commit_count { + let new_keys: Vec<_> = Self::generate_keys(next_key_idx..) + .take(self.writes_per_commit) + .collect(); + let read_indices = (0..=next_key_idx).choose_multiple(&mut rng, self.reads_per_commit); + let updated_indices = + (0..=next_key_idx).choose_multiple(&mut rng, self.updates_per_commit); + next_key_idx += new_keys.len() as u64; + + next_value_idx += (new_keys.len() + updated_indices.len()) as u64; + let values = (next_value_idx..).map(H256::from_low_u64_be); + let updated_keys = Self::generate_keys(updated_indices.into_iter()); + let kvs = new_keys.into_iter().chain(updated_keys).zip(values); + + println!("Processing block #{version}"); + let start = Instant::now(); + let tree = MerkleTree::with_hasher(&*db, hasher); + let (root_hash, patch) = if self.proofs { + let reads = Self::generate_keys(read_indices.into_iter()) + .map(|key| (key, TreeInstruction::Read)); + let instructions = kvs + .map(|(key, hash)| (key, TreeInstruction::Write(hash))) + .chain(reads) + .collect(); + let (output, patch) = tree.extend_with_proofs(instructions); + (output.root_hash().unwrap(), patch) + } else { + let (output, patch) = tree.extend(kvs.collect()); + (output.root_hash, patch) + }; + let elapsed = start.elapsed(); + println!("Processed block #{version} in {elapsed:?}, root hash = {root_hash:?}"); + + let start = Instant::now(); + db.apply_patch(patch); + let elapsed = start.elapsed(); + println!("Committed block #{version} in {elapsed:?}"); + } + + println!("Verifying tree consistency..."); + let start = Instant::now(); + MerkleTree::with_hasher(&*db, hasher) + .verify_consistency(self.commit_count - 1) + .expect("tree consistency check failed"); + let elapsed = start.elapsed(); + println!("Verified tree consistency in {elapsed:?}"); + } + + fn generate_keys(key_indexes: impl Iterator) -> impl Iterator { + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + key_indexes.map(move |idx| { + let key = H256::from_low_u64_be(idx); + let key = StorageKey::new(AccountTreeId::new(address), key); + key.hashed_key_u256() + }) + } +} + +fn main() { + Cli::parse().run() +} diff --git a/core/lib/merkle_tree2/examples/loadtest/recorder.rs b/core/lib/merkle_tree2/examples/loadtest/recorder.rs new file mode 100644 index 000000000000..1b0dddff164e --- /dev/null +++ b/core/lib/merkle_tree2/examples/loadtest/recorder.rs @@ -0,0 +1,124 @@ +//! Simple `metrics::Recorder` implementation that prints information to stdout. + +use metrics::{ + Counter, Gauge, GaugeFn, Histogram, HistogramFn, Key, KeyName, Recorder, SharedString, Unit, +}; + +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, Mutex, + }, +}; + +type SharedMetadata = Mutex>; + +#[derive(Debug)] +struct PrintingMetric { + key: KeyName, + value: AtomicU64, + unit: Option, +} + +impl PrintingMetric { + fn new(key: KeyName, unit: Option) -> Self { + Self { + key, + value: AtomicU64::new(0), + unit, + } + } + + fn report_value(&self) { + let value = f64::from_bits(self.value.load(Ordering::Relaxed)); + let unit = match &self.unit { + None | Some(Unit::Count) => "", + Some(other) => other.as_str(), + }; + let space = if unit.is_empty() { "" } else { " " }; + println!( + "[metric] {key} = {value}{space}{unit}", + key = self.key.as_str() + ); + } +} + +impl GaugeFn for PrintingMetric { + fn increment(&self, value: f64) { + self.value.increment(value); + self.report_value(); + // ^ These calls are non-atomic, but in practice values are updated infrequently, + // so we're OK with it. + } + + fn decrement(&self, value: f64) { + self.value.decrement(value); + self.report_value(); + } + + fn set(&self, value: f64) { + self.value.set(value); + self.report_value(); + } +} + +impl HistogramFn for PrintingMetric { + fn record(&self, value: f64) { + self.set(value); + } +} + +#[derive(Debug, Default)] +struct MetricMetadata { + unit: Option, +} + +#[derive(Debug, Default)] +pub struct PrintingRecorder { + metadata: SharedMetadata, +} + +impl PrintingRecorder { + pub fn install() { + let this = Self::default(); + metrics::set_boxed_recorder(Box::new(this)) + .expect("failed setting printing metrics recorder") + } + + fn create_metric(&self, key: &Key) -> Arc { + let (key_name, _) = key.clone().into_parts(); + let mut metadata = self.metadata.lock().unwrap(); + let metadata = metadata.entry(key_name.clone()).or_default(); + let gauge = PrintingMetric::new(key_name, metadata.unit); + Arc::new(gauge) + } +} + +impl Recorder for PrintingRecorder { + fn describe_counter(&self, key: KeyName, unit: Option, _description: SharedString) { + let mut metadata = self.metadata.lock().unwrap(); + let metadata = metadata.entry(key).or_default(); + metadata.unit = unit.or(metadata.unit); + } + + fn describe_gauge(&self, key: KeyName, unit: Option, description: SharedString) { + self.describe_counter(key, unit, description); + } + + fn describe_histogram(&self, key: KeyName, unit: Option, description: SharedString) { + self.describe_counter(key, unit, description); + } + + fn register_counter(&self, _key: &Key) -> Counter { + Counter::noop() // counters are not used + } + + fn register_gauge(&self, key: &Key) -> Gauge { + Gauge::from_arc(self.create_metric(key)) + } + + fn register_histogram(&self, key: &Key) -> Histogram { + Histogram::from_arc(self.create_metric(key)) + } +} diff --git a/core/lib/merkle_tree2/src/consistency.rs b/core/lib/merkle_tree2/src/consistency.rs new file mode 100644 index 000000000000..568ec5b55e9c --- /dev/null +++ b/core/lib/merkle_tree2/src/consistency.rs @@ -0,0 +1,415 @@ +//! Consistency verification for the Merkle tree. + +use rayon::prelude::*; + +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::{ + errors::DeserializeError, + types::{LeafNode, Nibbles, Node, NodeKey}, + Database, Key, MerkleTree, Root, ValueHash, +}; + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum ConsistencyError { + #[error("failed deserializing node from DB: {0}")] + Deserialize(#[from] DeserializeError), + #[error("tree version {0} does not exist")] + MissingVersion(u64), + #[error("missing root for tree version {0}")] + MissingRoot(u64), + #[error( + "missing {node_str} at {key}", + node_str = if *is_leaf { "leaf" } else { "internal node" } + )] + MissingNode { key: NodeKey, is_leaf: bool }, + #[error("internal node at terminal tree level {key}")] + TerminalInternalNode { key: NodeKey }, + #[error("tree root specifies that tree has {expected} leaves, but it actually has {actual}")] + LeafCountMismatch { expected: u64, actual: u64 }, + #[error( + "internal node at {key} specifies that child hash at `{nibble:x}` \ + is {expected}, but it actually is {actual}" + )] + HashMismatch { + key: NodeKey, + nibble: u8, + expected: ValueHash, + actual: ValueHash, + }, + #[error( + "leaf at {key} specifies its full key as {full_key}, which doesn't start with the node key" + )] + FullKeyMismatch { key: NodeKey, full_key: Key }, + #[error("leaf with key {full_key} has zero index, while leaf indices must start with 1")] + ZeroIndex { full_key: Key }, + #[error( + "leaf with key {full_key} has index {index}, which is greater than \ + leaf count {leaf_count} specified at tree root" + )] + LeafIndexOverflow { + index: u64, + leaf_count: u64, + full_key: Key, + }, + #[error("leaf with key {full_key} has same index {index} as another key")] + DuplicateLeafIndex { index: u64, full_key: Key }, +} + +impl MerkleTree<'_, DB> +where + DB: Database + ?Sized, +{ + /// Verifies the internal tree consistency as stored in the database. + /// + /// # Errors + /// + /// Returns an error (the first encountered one if there are multiple). + pub fn verify_consistency(&self, version: u64) -> Result<(), ConsistencyError> { + let manifest = self.db.try_manifest()?; + let manifest = manifest.ok_or(ConsistencyError::MissingVersion(version))?; + if version >= manifest.version_count { + return Err(ConsistencyError::MissingVersion(version)); + } + + let root = self + .db + .try_root(version)? + .ok_or(ConsistencyError::MissingRoot(version))?; + let (leaf_count, root_node) = match root { + Root::Empty => return Ok(()), + Root::Filled { leaf_count, node } => (leaf_count.get(), node), + }; + + // We want to perform a depth-first walk of the tree in order to not keep + // much in memory. + let root_key = Nibbles::EMPTY.with_version(version); + let leaf_data = LeafConsistencyData::new(leaf_count); + self.validate_node(&root_node, root_key, &leaf_data)?; + leaf_data.validate_count() + } + + fn validate_node( + &self, + node: &Node, + key: NodeKey, + leaf_data: &LeafConsistencyData, + ) -> Result { + match node { + Node::Leaf(leaf) => { + let full_key_nibbles = Nibbles::new(&leaf.full_key, key.nibbles.nibble_count()); + if full_key_nibbles != key.nibbles { + return Err(ConsistencyError::FullKeyMismatch { + key, + full_key: leaf.full_key, + }); + } + leaf_data.insert_leaf(leaf)?; + } + + Node::Internal(node) => { + // `.into_par_iter()` below is the only place where `rayon`-based parallelism + // is used in tree verification. + let children: Vec<_> = node.children().collect(); + children + .into_par_iter() + .try_for_each(|(nibble, child_ref)| { + let child_key = key + .nibbles + .push(nibble) + .ok_or(ConsistencyError::TerminalInternalNode { key })?; + let child_key = child_key.with_version(child_ref.version); + let child = self + .db + .try_tree_node(&child_key, child_ref.is_leaf)? + .ok_or(ConsistencyError::MissingNode { + key: child_key, + is_leaf: child_ref.is_leaf, + })?; + + // Recursion here is OK; the tree isn't that deep (~8 nibbles for a tree with + // ~1B entries). + let child_hash = self.validate_node(&child, child_key, leaf_data)?; + if child_hash == child_ref.hash { + Ok(()) + } else { + Err(ConsistencyError::HashMismatch { + key, + nibble, + expected: child_ref.hash, + actual: child_hash, + }) + } + })?; + } + } + + let level = key.nibbles.nibble_count() * 4; + Ok(node.hash(&mut self.hasher.into(), level)) + } +} + +#[derive(Debug)] +struct LeafConsistencyData { + expected_leaf_count: u64, + actual_leaf_count: AtomicU64, + leaf_indices_set: AtomicBitSet, +} + +#[allow(clippy::cast_possible_truncation)] // expected leaf count is quite small +impl LeafConsistencyData { + fn new(expected_leaf_count: u64) -> Self { + Self { + expected_leaf_count, + actual_leaf_count: AtomicU64::new(0), + leaf_indices_set: AtomicBitSet::new(expected_leaf_count as usize), + } + } + + fn insert_leaf(&self, leaf: &LeafNode) -> Result<(), ConsistencyError> { + if leaf.leaf_index == 0 { + return Err(ConsistencyError::ZeroIndex { + full_key: leaf.full_key, + }); + } + if leaf.leaf_index > self.expected_leaf_count { + return Err(ConsistencyError::LeafIndexOverflow { + index: leaf.leaf_index, + leaf_count: self.expected_leaf_count, + full_key: leaf.full_key, + }); + } + + let index = (leaf.leaf_index - 1) as usize; + if self.leaf_indices_set.set(index) { + return Err(ConsistencyError::DuplicateLeafIndex { + index: leaf.leaf_index, + full_key: leaf.full_key, + }); + } + self.actual_leaf_count.fetch_add(1, Ordering::Relaxed); + Ok(()) + } + + fn validate_count(mut self) -> Result<(), ConsistencyError> { + let actual_leaf_count = *self.actual_leaf_count.get_mut(); + if actual_leaf_count == self.expected_leaf_count { + Ok(()) + } else { + Err(ConsistencyError::LeafCountMismatch { + expected: self.expected_leaf_count, + actual: actual_leaf_count, + }) + } + } +} + +/// Primitive atomic bit set implementation that only supports setting bits. +#[derive(Debug)] +struct AtomicBitSet { + bits: Vec, +} + +impl AtomicBitSet { + const BITS_PER_ATOMIC: usize = 8; + + fn new(len: usize) -> Self { + let atomic_count = (len + Self::BITS_PER_ATOMIC - 1) / Self::BITS_PER_ATOMIC; + let mut bits = Vec::with_capacity(atomic_count); + bits.resize_with(atomic_count, AtomicU64::default); + Self { bits } + } + + /// Returns the previous bit value. + fn set(&self, bit_index: usize) -> bool { + let atomic_index = bit_index / Self::BITS_PER_ATOMIC; + let shift_in_atomic = bit_index % Self::BITS_PER_ATOMIC; + let atomic = &self.bits[atomic_index]; + let mask = 1 << (shift_in_atomic as u64); + let prev_value = atomic.fetch_or(mask, Ordering::SeqCst); + prev_value & mask != 0 + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use std::num::NonZeroU64; + + use super::*; + use crate::PatchSet; + use zksync_types::{H256, U256}; + + const FIRST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); + const SECOND_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0100_0000]); + + fn prepare_database() -> PatchSet { + let mut db = PatchSet::default(); + let tree = MerkleTree::new(&db); + let (_, patch) = tree.extend(vec![ + (FIRST_KEY, H256([1; 32])), + (SECOND_KEY, H256([2; 32])), + ]); + db.apply_patch(patch); + db + } + + #[test] + fn atomic_bit_set_basics() { + let bit_set = AtomicBitSet::new(10); + assert!(!bit_set.set(3)); + assert!(!bit_set.set(7)); + assert!(!bit_set.set(6)); + assert!(!bit_set.set(9)); + assert!(bit_set.set(3)); + assert!(bit_set.set(7)); + assert!(!bit_set.set(0)); + } + + #[test] + fn basic_consistency_checks() { + let db = prepare_database(); + MerkleTree::new(&db).verify_consistency(0).unwrap(); + } + + #[test] + fn missing_version_error() { + let mut db = prepare_database(); + + db.manifest_mut().version_count = 0; + + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + assert_matches!(err, ConsistencyError::MissingVersion(0)); + } + + #[test] + fn missing_root_error() { + let mut db = prepare_database(); + + db.roots_mut().remove(&0); + + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + assert_matches!(err, ConsistencyError::MissingRoot(0)); + } + + #[test] + fn missing_node_error() { + let mut db = prepare_database(); + + let leaf_key = db + .nodes_mut() + .find_map(|(key, node)| matches!(node, Node::Leaf(_)).then(|| *key)); + let leaf_key = leaf_key.unwrap(); + db.remove_node(&leaf_key); + + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + assert_matches!( + err, + ConsistencyError::MissingNode { key, is_leaf: true } if key == leaf_key + ); + } + + #[test] + fn leaf_count_mismatch_error() { + let mut db = prepare_database(); + + let root = db.roots_mut().get_mut(&0).unwrap(); + let Root::Filled { leaf_count, .. } = root else { + panic!("unexpected root: {root:?}"); + }; + *leaf_count = NonZeroU64::new(42).unwrap(); + + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + assert_matches!( + err, + ConsistencyError::LeafCountMismatch { + expected: 42, + actual: 2 + } + ); + } + + #[test] + fn hash_mismatch_error() { + let mut db = prepare_database(); + + let root = db.roots_mut().get_mut(&0).unwrap(); + let Root::Filled { node: Node::Internal(node), .. } = root else { + panic!("unexpected root: {root:?}"); + }; + let child_ref = node.child_ref_mut(0xd).unwrap(); + child_ref.hash = ValueHash::zero(); + + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + assert_matches!( + err, + ConsistencyError::HashMismatch { + key, + nibble: 0xd, + expected, + .. + } if key == NodeKey::empty(0) && expected == ValueHash::zero() + ); + } + + #[test] + fn full_key_mismatch_error() { + let mut db = prepare_database(); + + let leaf_key = db.nodes_mut().find_map(|(key, node)| { + if let Node::Leaf(leaf) = node { + leaf.full_key = U256::zero(); + return Some(*key); + } + None + }); + let leaf_key = leaf_key.unwrap(); + + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + assert_matches!( + err, + ConsistencyError::FullKeyMismatch { key, full_key } + if key == leaf_key && full_key == U256::zero() + ); + } + + #[test] + fn leaf_index_overflow_error() { + let mut db = prepare_database(); + + let leaf_key = db.nodes_mut().find_map(|(key, node)| { + if let Node::Leaf(leaf) = node { + leaf.leaf_index = 42; + return Some(*key); + } + None + }); + leaf_key.unwrap(); + + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + assert_matches!( + err, + ConsistencyError::LeafIndexOverflow { + index: 42, + leaf_count: 2, + .. + } + ); + } + + #[test] + fn duplicate_leaf_index_error() { + let mut db = prepare_database(); + + for (_, node) in db.nodes_mut() { + if let Node::Leaf(leaf) = node { + leaf.leaf_index = 1; + } + } + + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + assert_matches!(err, ConsistencyError::DuplicateLeafIndex { index: 1, .. }); + } +} diff --git a/core/lib/merkle_tree2/src/domain.rs b/core/lib/merkle_tree2/src/domain.rs new file mode 100644 index 000000000000..71d5c6ea7694 --- /dev/null +++ b/core/lib/merkle_tree2/src/domain.rs @@ -0,0 +1,310 @@ +//! Tying the Merkle tree implementation to the problem domain. + +use rayon::{ThreadPool, ThreadPoolBuilder}; + +use std::{borrow::Borrow, num::NonZeroU32}; + +use crate::{ + types::TREE_DEPTH, Database, HashTree, Key, MerkleTree, Patched, RocksDBWrapper, Root, + TreeInstruction, TreeLogEntry, ValueHash, +}; +use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_storage::RocksDB; +use zksync_types::{ + proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, + writes::{InitialStorageWrite, RepeatedStorageWrite}, + L1BatchNumber, StorageLogKind, WitnessStorageLog, +}; + +/// Metadata for the current tree state. +#[derive(Debug, Clone)] +pub struct TreeMetadata { + /// Current root hash of the tree. + pub root_hash: ValueHash, + /// 1-based index of the next leaf to be inserted in the tree. + pub rollup_last_leaf_index: u64, + /// Initial writes performed in the processed block. + pub initial_writes: Vec, + /// Repeated writes performed in the processed block. + pub repeated_writes: Vec, + /// Witness information. + pub witness: Option, +} + +#[derive(Debug, PartialEq, Eq)] +enum TreeMode { + Lightweight, + Full, +} + +/// Domain-specific wrapper of the Merkle tree. +/// +/// This wrapper will accumulate changes introduced by [`Self::process_block()`], +/// [`Self::process_blocks()`] and [`Self::revert_logs()`] in RAM without saving them +/// to RocksDB. The accumulated changes can be saved to RocksDB via [`Self::save()`] +/// or discarded via [`Self::reset()`]. +#[derive(Debug)] +pub struct ZkSyncTree { + database: Patched, + thread_pool: Option, + mode: TreeMode, +} + +impl ZkSyncTree { + // A reasonable chunk size for RocksDB multi-get operations. Obtained as a result + // of local benchmarking. + const MULTI_GET_CHUNK_SIZE: usize = 500; + + fn create_thread_pool(thread_count: usize) -> ThreadPool { + ThreadPoolBuilder::new() + .thread_name(|idx| format!("new-merkle-tree-{idx}")) + .num_threads(thread_count) + .build() + .expect("failed initializing `rayon` thread pool") + } + + /// Creates a tree with the full processing mode. + pub fn new(db: RocksDB) -> Self { + Self::new_with_mode(db, TreeMode::Full) + } + + /// Creates a tree with the lightweight processing mode. + pub fn new_lightweight(db: RocksDB) -> Self { + Self::new_with_mode(db, TreeMode::Lightweight) + } + + fn new_with_mode(db: RocksDB, mode: TreeMode) -> Self { + let mut wrapper = RocksDBWrapper::from(db); + wrapper.set_multi_get_chunk_size(Self::MULTI_GET_CHUNK_SIZE); + Self { + database: Patched::new(wrapper), + thread_pool: None, + mode, + } + } + + /// Signals that the tree should use a dedicated `rayon` thread pool for parallel operations + /// (for now, hash computations). + /// + /// If `thread_count` is 0, the default number of threads will be used; see `rayon` docs + /// for details. + pub fn use_dedicated_thread_pool(&mut self, thread_count: usize) { + self.thread_pool = Some(Self::create_thread_pool(thread_count)); + } + + /// Returns the current root hash of this tree. + pub fn root_hash(&self) -> ValueHash { + let tree = MerkleTree::new(&self.database); + tree.latest_root_hash() + } + + /// Checks whether this tree is empty. + pub fn is_empty(&self) -> bool { + let tree = MerkleTree::new(&self.database); + let Some(version) = tree.latest_version() else { + return true; + }; + tree.root(version) + .map_or(true, |root| matches!(root, Root::Empty)) + } + + /// Returns the current block number. + pub fn block_number(&self) -> u32 { + let tree = MerkleTree::new(&self.database); + tree.latest_version().map_or(0, |version| { + u32::try_from(version + 1).expect("integer overflow for block number") + }) + } + + /// Verifies tree consistency. `block_number`, if provided, specifies the version of the tree + /// to be checked, expressed as the number of blocks applied to the tree. By default, + /// the latest tree version is checked. + /// + /// # Panics + /// + /// Panics if an inconsistency is detected. + pub fn verify_consistency(&self, block_number: NonZeroU32) { + let tree = MerkleTree::new(&self.database); + let version = u64::from(block_number.get() - 1); + tree.verify_consistency(version).unwrap_or_else(|err| { + panic!("Tree at version {version} is inconsistent: {err}"); + }); + } + + /// Processes an iterator of block logs comprising a single block. + pub fn process_block(&mut self, storage_logs: &[WitnessStorageLog]) -> TreeMetadata { + match self.mode { + TreeMode::Full => self.process_block_full(storage_logs), + TreeMode::Lightweight => self.process_block_lightweight(storage_logs), + } + } + + fn process_block_full(&mut self, storage_logs: &[WitnessStorageLog]) -> TreeMetadata { + let instructions = Self::transform_logs(storage_logs); + let tree = MerkleTree::new(&self.database); + let starting_leaf_count = tree.latest_root().leaf_count(); + + let (output, patch) = if let Some(thread_pool) = &self.thread_pool { + thread_pool.install(|| tree.extend_with_proofs(instructions.clone())) + } else { + tree.extend_with_proofs(instructions.clone()) + }; + self.database.apply_patch(patch); + + let mut witness = PrepareBasicCircuitsJob::new(starting_leaf_count + 1); + witness.reserve(output.logs.len()); + for (log, (key, instruction)) in output.logs.iter().zip(&instructions) { + let empty_levels_end = TREE_DEPTH - log.merkle_path.len(); + let empty_subtree_hashes = + (0..empty_levels_end).map(|i| Blake2Hasher.empty_subtree_hash(i)); + let merkle_paths = log.merkle_path.iter().copied(); + let merkle_paths = empty_subtree_hashes + .chain(merkle_paths) + .map(|hash| hash.0) + .collect(); + + let log = StorageLogMetadata { + root_hash: log.root_hash.0, + is_write: !log.base.is_read(), + first_write: matches!(log.base, TreeLogEntry::Inserted { .. }), + merkle_paths, + leaf_hashed_key: *key, + leaf_enumeration_index: match log.base { + TreeLogEntry::Updated { leaf_index, .. } + | TreeLogEntry::Inserted { leaf_index } + | TreeLogEntry::Read { leaf_index, .. } => leaf_index, + TreeLogEntry::ReadMissingKey => 0, + }, + value_written: match instruction { + TreeInstruction::Write(value) => value.0, + TreeInstruction::Read => [0_u8; 32], + }, + value_read: match log.base { + TreeLogEntry::Updated { previous_value, .. } => previous_value.0, + TreeLogEntry::Read { value, .. } => value.0, + TreeLogEntry::Inserted { .. } | TreeLogEntry::ReadMissingKey => [0_u8; 32], + }, + }; + witness.push_merkle_path(log); + } + + let root_hash = output.root_hash().unwrap(); + let logs = output + .logs + .into_iter() + .filter_map(|log| (!log.base.is_read()).then_some(log.base)); + let kvs = instructions.into_iter().filter_map(|(key, instruction)| { + let TreeInstruction::Write(value) = instruction else { + return None; + }; + Some((key, value)) + }); + let (initial_writes, repeated_writes) = Self::extract_writes(logs, kvs); + + TreeMetadata { + root_hash, + rollup_last_leaf_index: output.leaf_count + 1, + initial_writes, + repeated_writes, + witness: Some(witness), + } + } + + fn transform_logs(storage_logs: &[WitnessStorageLog]) -> Vec<(Key, TreeInstruction)> { + let instructions = storage_logs.iter().map(|log| { + let log = &log.storage_log; + let key = log.key.hashed_key_u256(); + let instruction = match log.kind { + StorageLogKind::Write => TreeInstruction::Write(log.value), + StorageLogKind::Read => TreeInstruction::Read, + }; + (key, instruction) + }); + instructions.collect() + } + + fn extract_writes( + logs: impl Iterator, + kvs: impl Iterator, + ) -> (Vec, Vec) { + let mut initial_writes = vec![]; + let mut repeated_writes = vec![]; + for (log_entry, (key, value)) in logs.zip(kvs) { + match log_entry { + TreeLogEntry::Inserted { .. } => { + initial_writes.push(InitialStorageWrite { key, value }); + } + TreeLogEntry::Updated { leaf_index, .. } => { + repeated_writes.push(RepeatedStorageWrite { + index: leaf_index, + value, + }); + } + TreeLogEntry::Read { .. } | TreeLogEntry::ReadMissingKey => {} + } + } + (initial_writes, repeated_writes) + } + + fn process_block_lightweight(&mut self, storage_logs: &[WitnessStorageLog]) -> TreeMetadata { + let kvs = Self::filter_write_logs(storage_logs); + let tree = MerkleTree::new(&self.database); + let (output, patch) = if let Some(thread_pool) = &self.thread_pool { + thread_pool.install(|| tree.extend(kvs.clone())) + } else { + tree.extend(kvs.clone()) + }; + self.database.apply_patch(patch); + let (initial_writes, repeated_writes) = + Self::extract_writes(output.logs.into_iter(), kvs.into_iter()); + + TreeMetadata { + root_hash: output.root_hash, + rollup_last_leaf_index: output.leaf_count + 1, + initial_writes, + repeated_writes, + witness: None, + } + } + + fn filter_write_logs(storage_logs: &[WitnessStorageLog]) -> Vec<(Key, ValueHash)> { + let kvs = storage_logs.iter().filter_map(|log| { + let log = &log.borrow().storage_log; + match log.kind { + StorageLogKind::Write => { + let key = log.key.hashed_key_u256(); + Some((key, log.value)) + } + StorageLogKind::Read => None, + } + }); + kvs.collect() + } + + /// Reverts the tree to a previous state. + /// + /// Just like [`Self::process_block()`], this method will overwrite all unsaved changes + /// in the tree. + pub fn revert_logs(&mut self, block_number: L1BatchNumber) { + self.database.reset(); + + let block_number = u64::from(block_number.0 + 1); + let tree = MerkleTree::new(&self.database); + if let Some(patch) = tree.truncate_versions(block_number) { + self.database.apply_patch(patch); + } + } + + /// Saves the accumulated changes in the tree to RocksDB. This method or [`Self::reset()`] + /// should be called after each `process_block()` / `process_blocks()` / `revert_logs()` + /// call; otherwise, the changes produced by the processing method call will be lost + /// on subsequent calls. + pub fn save(&mut self) { + self.database.flush(); + } + + /// Resets the tree to the latest database state. + pub fn reset(&mut self) { + self.database.reset(); + } +} diff --git a/core/lib/merkle_tree2/src/errors.rs b/core/lib/merkle_tree2/src/errors.rs new file mode 100644 index 000000000000..92e6fba45931 --- /dev/null +++ b/core/lib/merkle_tree2/src/errors.rs @@ -0,0 +1,175 @@ +//! Error types. + +use std::{error, fmt, str::Utf8Error}; + +use crate::types::NodeKey; + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum DeserializeErrorKind { + /// Unexpected end-of-input was encountered. + #[error("unexpected end of input")] + UnexpectedEof, + /// Error reading a LEB128-encoded value. + #[error("failed reading LEB128-encoded value: {0}")] + Leb128(#[source] leb128::read::Error), + /// Error reading a UTF-8 string. + #[error("failed reading UTF-8 string: {0}")] + Utf8(#[source] Utf8Error), + /// An internal node is empty (has no children). + #[error("empty internal node")] + EmptyInternalNode, + /// Bit mask specifying a child kind in an internal tree node is invalid. + #[error("invalid bit mask specifying a child kind in an internal tree node")] + InvalidChildKind, + + /// Missing required tag in the tree manifest. + #[error("missing required tag `{0}` in tree manifest")] + MissingTag(&'static str), + /// Unknown tag in the tree manifest. + #[error("unknown tag `{0}` in tree manifest")] + UnknownTag(String), + /// Malformed tag in the tree manifest. + #[error("malformed tag `{name}` in tree manifest: {err}")] + MalformedTag { + /// Tag name. + name: &'static str, + /// Error that has occurred parsing the tag. + #[source] + err: Box, + }, +} + +impl DeserializeErrorKind { + /// Appends a context to this error. + pub fn with_context(self, context: ErrorContext) -> DeserializeError { + DeserializeError { + kind: self, + contexts: vec![context], + } + } +} + +/// Context in which a [`DeserializeError`] can occur. +#[derive(Debug)] +#[non_exhaustive] +pub enum ErrorContext { + /// Tree manifest. + Manifest, + /// Root with the specified version. + Root(u64), + /// Leaf node with the specified storage key. + Leaf(NodeKey), + /// Internal node with the specified storage key. + InternalNode(NodeKey), + /// Hash value of a child reference in an internal tree node. + ChildRefHash, + /// Mask in an internal node specifying children existence and type. + ChildrenMask, + + /// Number of leaf nodes in a tree root. + LeafCount, + /// Leaf index in a leaf node. + LeafIndex, + /// Version of a child in an internal node. + Version, +} + +impl fmt::Display for ErrorContext { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Manifest => formatter.write_str("tree manifest"), + Self::Root(version) => write!(formatter, "root at version {version}"), + Self::Leaf(key) => write!(formatter, "leaf at `{key}`"), + Self::InternalNode(key) => write!(formatter, "internal node at `{key}`"), + Self::ChildRefHash => formatter.write_str("hash value of a child reference"), + Self::ChildrenMask => formatter.write_str("children mask"), + Self::LeafCount => formatter.write_str("number of leaf nodes"), + Self::LeafIndex => formatter.write_str("leaf index"), + Self::Version => formatter.write_str("version of a child"), + } + } +} + +/// Error deserializing data from RocksDB. +#[derive(Debug)] +pub struct DeserializeError { + kind: DeserializeErrorKind, + contexts: Vec, +} + +impl From for DeserializeError { + fn from(kind: DeserializeErrorKind) -> Self { + Self { + kind, + contexts: Vec::new(), + } + } +} + +impl DeserializeError { + /// Appends a context to this error. + pub fn with_context(mut self, context: ErrorContext) -> Self { + self.contexts.push(context); + self + } +} + +impl fmt::Display for DeserializeError { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + // `self.contexts` are ordered from the most specific one to the most general one + if !self.contexts.is_empty() { + write!(formatter, "[in ")?; + for (i, context) in self.contexts.iter().enumerate() { + write!(formatter, "{context}")?; + if i + 1 < self.contexts.len() { + write!(formatter, ", ")?; + } + } + write!(formatter, "] ")?; + } + write!(formatter, "{}", self.kind) + } +} + +impl error::Error for DeserializeError {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{types::Nibbles, Key}; + use zksync_types::U256; + + const TEST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); + + #[test] + fn displaying_deserialize_error_for_leaf() { + let err = leb128::read::Error::Overflow; + let key = Nibbles::new(&TEST_KEY, 5).with_version(5); + let err = DeserializeErrorKind::Leb128(err) + .with_context(ErrorContext::LeafIndex) + .with_context(ErrorContext::Leaf(key)); + let err = err.to_string(); + + assert!( + err.starts_with("[in leaf index, leaf at `5:deadb`]"), + "{err}" + ); + assert!(err.contains("failed reading LEB128-encoded value"), "{err}"); + } + + #[test] + fn displaying_deserialize_error_for_internal_node() { + let key = Nibbles::new(&TEST_KEY, 4).with_version(7); + let err = DeserializeErrorKind::UnexpectedEof + .with_context(ErrorContext::ChildrenMask) + .with_context(ErrorContext::InternalNode(key)); + let err = err.to_string(); + + assert!( + err.starts_with("[in children mask, internal node at `7:dead`]"), + "{err}" + ); + assert!(err.ends_with("unexpected end of input"), "{err}"); + } +} diff --git a/core/lib/merkle_tree2/src/hasher.rs b/core/lib/merkle_tree2/src/hasher.rs new file mode 100644 index 000000000000..bbf2eb5fc95a --- /dev/null +++ b/core/lib/merkle_tree2/src/hasher.rs @@ -0,0 +1,708 @@ +//! Hashing operations on the Merkle tree. + +use metrics::Unit; +use once_cell::sync::Lazy; + +use std::{ + fmt, iter, slice, + sync::atomic::{AtomicU64, Ordering}, +}; + +use crate::types::{ + BlockOutputWithProofs, ChildRef, InternalNode, Key, LeafNode, Node, TreeInstruction, + TreeLogEntry, ValueHash, TREE_DEPTH, +}; +use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; + +/// Tree hashing functionality. +pub trait HashTree: Send + Sync { + /// Returns the unique name of the hasher. This is used in Merkle tree tags to ensure + /// that the tree remains consistent. + fn name(&self) -> &'static str; + + /// Hashes a leaf node. + fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash; + /// Compresses hashes in an intermediate node of a binary Merkle tree. + fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash; + + /// Returns the hash of an empty subtree with the given depth. Implementations + /// are encouraged to cache the returned values. + fn empty_subtree_hash(&self, depth: usize) -> ValueHash; +} + +impl dyn HashTree + '_ { + pub(crate) fn empty_tree_hash(&self) -> ValueHash { + self.empty_subtree_hash(TREE_DEPTH) + } + + fn fold_merkle_path( + &self, + path: &[ValueHash], + key: Key, + value_hash: ValueHash, + leaf_index: u64, + ) -> ValueHash { + let mut hash = self.hash_leaf(&value_hash, leaf_index); + let empty_hash_count = TREE_DEPTH - path.len(); + let empty_hashes = (0..empty_hash_count).map(|depth| self.empty_subtree_hash(depth)); + let full_path = empty_hashes.chain(path.iter().copied()); + for (depth, adjacent_hash) in full_path.enumerate() { + hash = if key.bit(depth) { + self.hash_branch(&adjacent_hash, &hash) + } else { + self.hash_branch(&hash, &adjacent_hash) + }; + } + hash + } + + pub(crate) fn with_stats<'a>(&'a self, stats: &'a HashingStats) -> HasherWithStats<'a> { + HasherWithStats { + shared_stats: Some(stats), + ..HasherWithStats::from(self) + } + } +} + +impl fmt::Debug for dyn HashTree + '_ { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.debug_struct("HashTree").finish_non_exhaustive() + } +} + +/// No-op hasher that returns `H256::zero()` for all operations. +impl HashTree for () { + fn name(&self) -> &'static str { + "no_op256" + } + + fn hash_leaf(&self, _value_hash: &ValueHash, _leaf_index: u64) -> ValueHash { + ValueHash::zero() + } + + fn hash_branch(&self, _lhs: &ValueHash, _rhs: &ValueHash) -> ValueHash { + ValueHash::zero() + } + + fn empty_subtree_hash(&self, _depth: usize) -> ValueHash { + ValueHash::zero() + } +} + +impl HashTree for Blake2Hasher { + fn name(&self) -> &'static str { + "blake2s256" + } + + fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { + let mut bytes = [0_u8; 40]; + bytes[..8].copy_from_slice(&leaf_index.to_be_bytes()); + bytes[8..].copy_from_slice(value_hash.as_ref()); + self.hash_bytes(bytes) + } + + /// Compresses the hashes of 2 children in a branch node. + fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { + self.compress(lhs, rhs) + } + + /// Returns the hash of an empty subtree with the given depth. + fn empty_subtree_hash(&self, depth: usize) -> ValueHash { + static EMPTY_TREE_HASHES: Lazy> = Lazy::new(compute_empty_tree_hashes); + EMPTY_TREE_HASHES[depth] + } +} + +fn compute_empty_tree_hashes() -> Vec { + let empty_leaf_hash = Blake2Hasher.hash_bytes([0_u8; 40]); + iter::successors(Some(empty_leaf_hash), |hash| { + Some(Blake2Hasher.hash_branch(hash, hash)) + }) + .take(TREE_DEPTH + 1) + .collect() +} + +/// Hashing-related statistics reported as metrics for each block of operations. +#[derive(Debug, Default)] +#[must_use = "Stats should be `report()`ed"] +pub(crate) struct HashingStats { + hashed_bytes: AtomicU64, +} + +impl HashingStats { + pub fn describe() { + metrics::describe_gauge!( + "merkle_tree.finalize_patch.hashed_bytes", + Unit::Bytes, + "Total amount of hashing input performed while processing a single block" + ); + } + + #[allow(clippy::cast_precision_loss)] + pub fn report(self) { + let hashed_bytes = self.hashed_bytes.into_inner(); + metrics::gauge!( + "merkle_tree.finalize_patch.hashed_bytes", + hashed_bytes as f64 + ); + } +} + +/// Hasher that keeps track of hashing metrics. +/// +/// On drop, the metrics are merged into `shared_stats` (if present). Such roundabout handling +/// is motivated by efficiency; if atomics were to be used to track metrics (e.g., +/// via a wrapping `HashTree` implementation), this would tank performance because of contention. +#[derive(Debug)] +pub(crate) struct HasherWithStats<'a> { + inner: &'a dyn HashTree, + shared_stats: Option<&'a HashingStats>, + local_hashed_bytes: u64, +} + +impl<'a> From<&'a dyn HashTree> for HasherWithStats<'a> { + fn from(inner: &'a dyn HashTree) -> Self { + Self { + inner, + shared_stats: None, + local_hashed_bytes: 0, + } + } +} + +impl<'a> AsRef for HasherWithStats<'a> { + fn as_ref(&self) -> &(dyn HashTree + 'a) { + self.inner + } +} + +impl Drop for HasherWithStats<'_> { + fn drop(&mut self) { + if let Some(shared_stats) = self.shared_stats { + shared_stats + .hashed_bytes + .fetch_add(self.local_hashed_bytes, Ordering::Relaxed); + } + } +} + +impl HasherWithStats<'_> { + fn hash_leaf(&mut self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { + const HASHED_BYTES: u64 = 8 + ValueHash::len_bytes() as u64; + + self.local_hashed_bytes += HASHED_BYTES; + self.inner.hash_leaf(value_hash, leaf_index) + } + + fn hash_branch(&mut self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { + const HASHED_BYTES: u64 = 2 * ValueHash::len_bytes() as u64; + + self.local_hashed_bytes += HASHED_BYTES; + self.inner.hash_branch(lhs, rhs) + } + + fn hash_optional_branch( + &mut self, + subtree_depth: usize, + lhs: Option, + rhs: Option, + ) -> Option { + match (lhs, rhs) { + (None, None) => None, + (Some(lhs), None) => { + let empty_hash = self.empty_subtree_hash(subtree_depth); + Some(self.hash_branch(&lhs, &empty_hash)) + } + (None, Some(rhs)) => { + let empty_hash = self.empty_subtree_hash(subtree_depth); + Some(self.hash_branch(&empty_hash, &rhs)) + } + (Some(lhs), Some(rhs)) => Some(self.hash_branch(&lhs, &rhs)), + } + } + + pub fn empty_subtree_hash(&self, depth: usize) -> ValueHash { + self.inner.empty_subtree_hash(depth) + } +} + +impl LeafNode { + pub(crate) fn hash(&self, hasher: &mut HasherWithStats<'_>, level: usize) -> ValueHash { + let hashing_iterations = TREE_DEPTH - level; + let mut hash = hasher.hash_leaf(&self.value_hash, self.leaf_index); + for depth in 0..hashing_iterations { + let empty_tree_hash = hasher.empty_subtree_hash(depth); + hash = if self.full_key.bit(depth) { + hasher.hash_branch(&empty_tree_hash, &hash) + } else { + hasher.hash_branch(&hash, &empty_tree_hash) + }; + } + hash + } +} + +#[derive(Debug)] +pub(crate) struct MerklePath { + current_level: usize, + hashes: Vec, +} + +impl MerklePath { + pub fn new(level: usize) -> Self { + Self { + current_level: level, + hashes: Vec::new(), + } + } + + pub(crate) fn push(&mut self, hasher: &mut HasherWithStats<'_>, maybe_hash: Option) { + if let Some(hash) = maybe_hash { + self.hashes.push(hash); + } else if !self.hashes.is_empty() { + let depth = TREE_DEPTH - self.current_level; + let empty_subtree_hash = hasher.empty_subtree_hash(depth); + self.hashes.push(empty_subtree_hash); + } + self.current_level -= 1; + } + + pub fn into_inner(self) -> Vec { + debug_assert_eq!(self.current_level, 0); + self.hashes + } +} + +/// Cache of internal node hashes in an [`InternalNode`]. This cache is only used +/// in the full tree operation mode, when Merkle proofs are obtained for each operation. +#[derive(Debug, Default, Clone, Copy)] +pub(crate) struct InternalNodeCache { + // `None` corresponds to the hash of an empty subtree at the corresponding level. + // This allows reducing the number of hash operations at the cost of additional + // memory consumption. + level1: [Option; 2], + level2: [Option; 4], + level3: [Option; 8], +} + +impl InternalNodeCache { + #[cfg(test)] + fn level(&self, level_in_tree: usize) -> &[Option] { + match level_in_tree { + 1 => &self.level1, + 2 => &self.level2, + 3 => &self.level3, + _ => unreachable!(), + } + } + + fn set_level(&mut self, level_in_tree: usize, source: &[Option]) { + match level_in_tree { + 0 => { /* do nothing */ } + 1 => self.level1.copy_from_slice(&source[..2]), + 2 => self.level2.copy_from_slice(&source[..4]), + 3 => self.level3.copy_from_slice(&source[..8]), + _ => unreachable!("Level in tree must be in 0..=3"), + } + } + + fn update_nibble( + &mut self, + level_hashes: &[Option], + hasher: &mut HasherWithStats<'_>, + level: usize, + nibble: u8, + ) -> ValueHash { + let mut idx = usize::from(nibble); + let mut node_hash = None; + let levels = [ + self.level3.as_mut_slice(), + self.level2.as_mut_slice(), + self.level1.as_mut_slice(), + slice::from_mut(&mut node_hash), + ]; + let mut level_hashes = level_hashes; + + for (level_in_tree, next_level_hashes) in (1..=4).rev().zip(levels) { + let overall_level = level + level_in_tree; + // Depth of a potential empty subtree rooted at the current level. + let subtree_depth = TREE_DEPTH - overall_level; + + let left_idx = idx - idx % 2; + let right_idx = left_idx + 1; + let branch_hash = hasher.hash_optional_branch( + subtree_depth, + level_hashes[left_idx], + level_hashes[right_idx], + ); + + idx /= 2; + next_level_hashes[idx] = branch_hash; + level_hashes = next_level_hashes; + } + node_hash.unwrap() // `unwrap()` is safe since we must have at least 1 child + } + + fn extend_merkle_path( + &self, + hasher: &mut HasherWithStats<'_>, + merkle_path: &mut MerklePath, + nibble: u8, + ) { + let mut idx = usize::from(nibble) / 2; + merkle_path.push(hasher, self.level3[idx ^ 1]); + idx /= 2; + merkle_path.push(hasher, self.level2[idx ^ 1]); + idx /= 2; + merkle_path.push(hasher, self.level1[idx ^ 1]); + } +} + +impl InternalNode { + /// Hashes this tree given the 0-based level of its tip. + fn hash_inner( + mut level_hashes: [Option; Self::CHILD_COUNT as usize], + hasher: &mut HasherWithStats<'_>, + level: usize, + mut cache: Option<&mut InternalNodeCache>, + ) -> ValueHash { + let mut next_level_len = level_hashes.len() / 2; + for level_in_tree in (1..=4).rev() { + let overall_level = level + level_in_tree; + // Depth of a potential empty subtree rooted at the current level. + let subtree_depth = TREE_DEPTH - overall_level; + + for i in 0..next_level_len { + level_hashes[i] = hasher.hash_optional_branch( + subtree_depth, + level_hashes[2 * i], + level_hashes[2 * i + 1], + ); + } + next_level_len /= 2; + + if let Some(cache) = cache.as_deref_mut() { + cache.set_level(level_in_tree - 1, &level_hashes); + } + } + level_hashes[0].unwrap_or_else(|| hasher.empty_subtree_hash(TREE_DEPTH - level)) + } + + pub(crate) fn hash(&self, hasher: &mut HasherWithStats<'_>, level: usize) -> ValueHash { + Self::hash_inner(self.child_hashes(), hasher, level, None) + } + + pub(crate) fn updater<'s, 'h>( + &'s mut self, + hasher: &'s mut HasherWithStats<'h>, + level: usize, + nibble: u8, + ) -> InternalNodeUpdater<'s, 'h> { + InternalNodeUpdater { + node: self, + hasher, + level, + nibble, + } + } +} + +#[derive(Debug)] +pub(crate) struct InternalNodeUpdater<'a, 'h> { + node: &'a mut InternalNode, + hasher: &'a mut HasherWithStats<'h>, + level: usize, + nibble: u8, +} + +impl InternalNodeUpdater<'_, '_> { + /// Ensures that the child reference for the affected nibble exists. Creates a new reference + /// with if necessary. + pub fn ensure_child_ref(&mut self, version: u64, is_leaf: bool) { + if let Some(child_ref) = self.node.child_ref_mut(self.nibble) { + child_ref.version = version; + child_ref.is_leaf = is_leaf; + } else { + let child_ref = if is_leaf { + ChildRef::leaf(version) + } else { + ChildRef::internal(version) + }; + self.node.insert_child_ref(self.nibble, child_ref); + } + } + + pub fn update_child_hash(&mut self, child_hash: ValueHash) -> ValueHash { + let child_ref = self.node.child_ref_mut(self.nibble).unwrap(); + child_ref.hash = child_hash; + let child_hashes = self.node.child_hashes(); + + if let Some(cache) = self.node.cache_mut() { + cache.update_nibble(&child_hashes, self.hasher, self.level, self.nibble) + } else { + let mut cache = Box::default(); + let node_hash = + InternalNode::hash_inner(child_hashes, self.hasher, self.level, Some(&mut cache)); + self.node.set_cache(cache); + node_hash + } + } + + pub fn extend_merkle_path(self, merkle_path: &mut MerklePath) { + merkle_path.hashes.reserve(4); + let adjacent_ref = self.node.child_ref(self.nibble ^ 1); + let adjacent_hash = adjacent_ref.map(|child| child.hash); + merkle_path.push(self.hasher, adjacent_hash); + + let cache = if let Some(cache) = self.node.cache_mut() { + cache + } else { + let child_hashes = self.node.child_hashes(); + let mut cache = Box::default(); + InternalNode::hash_inner(child_hashes, self.hasher, self.level, Some(&mut cache)); + self.node.set_cache(cache) + }; + cache.extend_merkle_path(self.hasher, merkle_path, self.nibble); + } +} + +impl Node { + pub(crate) fn hash(&self, hasher: &mut HasherWithStats<'_>, level: usize) -> ValueHash { + match self { + Self::Internal(node) => node.hash(hasher, level), + Self::Leaf(leaf) => leaf.hash(hasher, level), + } + } +} + +impl BlockOutputWithProofs { + /// Verifies this output against the trusted old root hash of the tree and + /// the applied instructions. + /// + /// # Panics + /// + /// Panics if the proof doesn't verify. + pub fn verify_proofs( + &self, + hasher: &dyn HashTree, + old_root_hash: ValueHash, + instructions: &[(Key, TreeInstruction)], + ) { + assert_eq!(instructions.len(), self.logs.len()); + + let mut root_hash = old_root_hash; + for (op, &(key, instruction)) in self.logs.iter().zip(instructions) { + assert!(op.merkle_path.len() <= TREE_DEPTH); + if matches!(instruction, TreeInstruction::Read) { + assert_eq!(op.root_hash, root_hash); + assert!(op.base.is_read()); + } else { + assert!(!op.base.is_read()); + } + + let (prev_leaf_index, leaf_index, prev_value) = match op.base { + TreeLogEntry::Inserted { leaf_index } => (0, leaf_index, ValueHash::zero()), + TreeLogEntry::Updated { + leaf_index, + previous_value, + } => (leaf_index, leaf_index, previous_value), + + TreeLogEntry::Read { leaf_index, value } => (leaf_index, leaf_index, value), + TreeLogEntry::ReadMissingKey => (0, 0, ValueHash::zero()), + }; + + let prev_hash = + hasher.fold_merkle_path(&op.merkle_path, key, prev_value, prev_leaf_index); + assert_eq!(prev_hash, root_hash); + if let TreeInstruction::Write(value) = instruction { + let next_hash = hasher.fold_merkle_path(&op.merkle_path, key, value, leaf_index); + assert_eq!(next_hash, op.root_hash); + } + root_hash = op.root_hash; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::ChildRef; + use zksync_types::{AccountTreeId, Address, StorageKey, H256}; + + #[test] + fn empty_tree_hash_is_as_expected() { + const EXPECTED_HASH: ValueHash = H256([ + 152, 164, 142, 78, 209, 115, 97, 136, 56, 74, 232, 167, 157, 210, 28, 77, 102, 135, + 229, 253, 34, 202, 24, 20, 137, 6, 215, 135, 54, 192, 216, 106, + ]); + + let hasher: &dyn HashTree = &Blake2Hasher; + assert_eq!(hasher.empty_tree_hash(), EXPECTED_HASH); + } + + #[test] + fn leaf_is_hashed_as_expected() { + // Reference value taken from the previous implementation. + const EXPECTED_HASH: ValueHash = H256([ + 127, 0, 166, 178, 238, 222, 150, 8, 87, 112, 60, 140, 185, 233, 111, 40, 185, 16, 230, + 105, 52, 18, 206, 164, 176, 6, 242, 66, 57, 182, 129, 224, + ]); + + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); + let key = key.hashed_key_u256(); + let leaf = LeafNode::new(key, H256([1; 32]), 1); + + let stats = HashingStats::default(); + let mut hasher = (&Blake2Hasher as &dyn HashTree).with_stats(&stats); + let leaf_hash = leaf.hash(&mut hasher, 0); + assert_eq!(leaf_hash, EXPECTED_HASH); + + drop(hasher); + assert!(stats.hashed_bytes.into_inner() > 100); + + let hasher: &dyn HashTree = &Blake2Hasher; + let folded_hash = hasher.fold_merkle_path(&[], key, H256([1; 32]), 1); + assert_eq!(folded_hash, EXPECTED_HASH); + } + + #[test] + fn folding_merkle_path() { + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); + let key = key.hashed_key_u256(); + let leaf = LeafNode::new(key, H256([1; 32]), 1); + + let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); + let leaf_hash = leaf.hash(&mut hasher, 2); + assert!(key.bit(254) && !key.bit(255)); + let merkle_path = [H256([2; 32]), H256([3; 32])]; + let expected_hash = hasher.hash_branch(&merkle_path[0], &leaf_hash); + let expected_hash = hasher.hash_branch(&expected_hash, &merkle_path[1]); + + let folded_hash = hasher + .inner + .fold_merkle_path(&merkle_path, key, H256([1; 32]), 1); + assert_eq!(folded_hash, expected_hash); + } + + fn test_internal_node_hashing(child_indexes: &[u8]) { + println!("Testing indices: {child_indexes:?}"); + + let mut internal_node = InternalNode::default(); + for &nibble in child_indexes { + internal_node.insert_child_ref(nibble, ChildRef::leaf(1)); + internal_node.child_ref_mut(nibble).unwrap().hash = H256([nibble; 32]); + } + + let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); + let node_hash = + InternalNode::hash_inner(internal_node.child_hashes(), &mut hasher, 252, None); + + // Compute the expected hash manually. + let mut level = [hasher.empty_subtree_hash(0); 16]; + for &nibble in child_indexes { + level[nibble as usize] = H256([nibble; 32]); + } + for half_len in [8, 4, 2, 1] { + for i in 0..half_len { + level[i] = Blake2Hasher.compress(&level[2 * i], &level[2 * i + 1]); + } + } + + assert_eq!(node_hash, level[0]); + } + + #[test] + fn hashing_internal_node() { + for idx in 0..16 { + test_internal_node_hashing(&[idx]); + } + for idx in 0..15 { + for other_idx in (idx + 1)..16 { + test_internal_node_hashing(&[idx, other_idx]); + } + } + + test_internal_node_hashing(&[5, 7, 8]); + test_internal_node_hashing(&[8, 13, 15]); + test_internal_node_hashing(&[0, 1, 2, 3, 5]); + test_internal_node_hashing(&[1, 2, 3, 4, 5, 6, 7]); + test_internal_node_hashing(&[0, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + test_internal_node_hashing(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + } + + fn test_updating_child_hash_in_internal_node(child_indexes: &[u8]) { + let mut internal_node = InternalNode::default(); + let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); + + for (child_idx, &nibble) in child_indexes.iter().enumerate() { + internal_node.insert_child_ref(nibble, ChildRef::leaf(1)); + + let mut updater = internal_node.updater(&mut hasher, 252, nibble); + let node_hash = updater.update_child_hash(H256([nibble; 32])); + let mut merkle_path = MerklePath::new(TREE_DEPTH); + updater.extend_merkle_path(&mut merkle_path); + let merkle_path = merkle_path.hashes; + assert!(merkle_path.len() <= 4); + + // Compute the expected hashes in the cache manually. + let cache = *internal_node.cache_mut().unwrap(); + let mut level = [hasher.empty_subtree_hash(0); 16]; + for &nibble in &child_indexes[..=child_idx] { + level[nibble as usize] = H256([nibble; 32]); + } + + for (half_len, level_in_tree) in [(8, 3), (4, 2), (2, 1), (1, 0)] { + let idx_in_merkle_path = merkle_path.len().checked_sub(level_in_tree + 1); + let hash_from_merkle_path = idx_in_merkle_path.map(|idx| merkle_path[idx]); + let nibble_idx = usize::from(nibble) >> (3 - level_in_tree); + let adjacent_hash = level[nibble_idx ^ 1]; + if let Some(hash) = hash_from_merkle_path { + assert_eq!(hash, adjacent_hash); + } else { + assert_eq!(adjacent_hash, hasher.empty_subtree_hash(3 - level_in_tree)); + } + + for i in 0..half_len { + level[i] = Blake2Hasher.compress(&level[2 * i], &level[2 * i + 1]); + } + + if level_in_tree == 0 { + assert_eq!(node_hash, level[0]); + } else { + let cache_level = cache.level(level_in_tree); + assert_eq!(cache_level.len(), half_len); + for (cached, computed) in cache_level.iter().zip(&level[..half_len]) { + if let Some(cached) = cached { + assert_eq!(cached, computed); + } else { + assert_eq!(*computed, hasher.empty_subtree_hash(4 - level_in_tree)); + } + } + } + } + } + } + + #[test] + fn updating_internal_node_cache() { + for idx in 0..16 { + test_updating_child_hash_in_internal_node(&[idx]); + } + for idx in 0..15 { + for other_idx in (idx + 1)..16 { + test_updating_child_hash_in_internal_node(&[idx, other_idx]); + } + } + + test_updating_child_hash_in_internal_node(&[5, 7, 8]); + test_updating_child_hash_in_internal_node(&[8, 13, 15]); + test_updating_child_hash_in_internal_node(&[0, 1, 2, 3, 5]); + test_updating_child_hash_in_internal_node(&[1, 2, 3, 4, 5, 6, 7]); + test_updating_child_hash_in_internal_node(&[0, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + test_updating_child_hash_in_internal_node(&[ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + ]); + } +} diff --git a/core/lib/merkle_tree2/src/lib.rs b/core/lib/merkle_tree2/src/lib.rs new file mode 100644 index 000000000000..6951289e91df --- /dev/null +++ b/core/lib/merkle_tree2/src/lib.rs @@ -0,0 +1,271 @@ +//! Sparse Merkle tree implementation based on Diem [Jellyfish Merkle tree]. +//! +//! # Overview +//! +//! The crate provides two major abstractions: domain-independent [`MerkleTree`] and +//! domain-specific [`ZkSyncTree`](domain::ZkSyncTree); the latter wraps `MerkleTree`. +//! +//! The database backend is abstracted via the [`Database`] trait (a key-value storage), which has +//! the following implementations: +//! +//! - [`RocksDBWrapper`] is a wrapper around RocksDB +//! - [`PatchSet`] is an in-memory implementation useful for testing / benchmarking +//! - [`Patched`] is a wrapper combining the persistent backend and a [`PatchSet`]. It's used +//! in `ZkSyncTree` to accumulate changes before flushing them to RocksDB. +//! +//! The hashing backend is abstracted via the [`HashTree`] trait, which has the following +//! implementations: +//! +//! - [`Blake2Hasher`] is the main implementation based on Blake2s-256 +//! - `()` provides a no-op implementation useful for benchmarking. +//! +//! # Tree hashing specification +//! +//! A tree is hashed as if it was a full binary Merkle tree with `2^256` leaves: +//! +//! - Hash of a vacant leaf is `hash([0_u8; 40])`, where `hash` is the hash function used +//! (Blake2s-256). +//! - Hash of an occupied leaf is `hash(u64::to_be_bytes(leaf_index) ++ value_hash)`, +//! where `leaf_index` is the 1-based index of the leaf key in the order of insertion, +//! `++` is byte concatenation. +//! - Hash of an internal node is `hash(left_child_hash ++ right_child_hash)`. +//! +//! [Jellyfish Merkle tree]: https://developers.diem.com/papers/jellyfish-merkle-tree/2021-01-14.pdf + +// Linter settings. +#![warn(missing_debug_implementations, missing_docs, bare_trait_objects)] +#![warn(clippy::all, clippy::pedantic)] +#![allow( + clippy::must_use_candidate, + clippy::module_name_repetitions, + clippy::doc_markdown // frequent false positive: RocksDB +)] + +mod consistency; +pub mod domain; +mod errors; +mod hasher; +mod storage; +mod types; +mod utils; + +pub use crate::{ + hasher::HashTree, + storage::{Database, PatchSet, Patched, RocksDBWrapper}, + types::{ + BlockOutput, BlockOutputWithProofs, Key, Root, TreeInstruction, TreeLogEntry, + TreeLogEntryWithProof, ValueHash, + }, +}; + +use crate::storage::Storage; +use zksync_crypto::hasher::blake2::Blake2Hasher; + +/// Binary Merkle tree implemented using AR16MT from Diem [Jellyfish Merkle tree] white paper. +/// +/// A tree is persistent and is backed by a key-value store (the `DB` type param). It is versioned, +/// meaning that the store retains *all* versions of the tree since its inception. A version +/// corresponds to a block number in the domain model; it is a `u64` counter incremented each time +/// a block of changes is committed into the tree via [`Self::extend()`]. It is possible to reset +/// the tree to a previous version via [`Self::truncate_versions()`]. +/// +/// # Tree structure +/// +/// The tree store principally contains the following information: +/// +/// - The tree *manifest* specifying version-independent information (right now, this is just +/// the number of versions). +/// - For each of the stored versions: tree *root* containing the number of leaves +/// and the root node of the tree. +/// - *Nodes* of the particular version of the tree keyed by version + the path from the root +/// of the tree to the node. +/// +/// To be more I/O-efficient (at the cost of some additional hashing operations), the tree +/// is stored in the radix-16 format. That is, each internal node may have up to 16 children. +/// From the storage perspective, an internal node contains *child references*. A reference +/// consists of the following data: +/// +/// - Version of the tree the child first appeared in +/// - Node type (internal node or leaf; used for deserialization) +/// - Node hash +/// +/// Tree nodes are immutable; that's why addressing a child by version works, and a hash +/// mentioned in a child reference cannot become outdated. Immutability and structuring storage +/// keys for tree nodes so that nodes of the same version are grouped together makes +/// DB read / write patterns optimal for RocksDB. +/// +/// Another optimization is that paths of internal nodes that do not fork (i.e., lead to +/// a single child) are removed. In other words, a leaf node may be placed at any tree level, +/// not just the lowest possible one. Correspondingly, a leaf node besides a value hash +/// stores the full key, since it cannot be restored from other information. +/// +/// The I/O optimizations do not influence tree hashing. +/// +/// [Jellyfish Merkle tree]: https://developers.diem.com/papers/jellyfish-merkle-tree/2021-01-14.pdf +#[derive(Debug)] +pub struct MerkleTree<'a, DB: ?Sized> { + db: &'a DB, + hasher: &'a dyn HashTree, +} + +impl<'a, DB: Database + ?Sized> MerkleTree<'a, DB> { + /// Loads a tree with the default Blake2 hasher. + /// + /// # Panics + /// + /// Panics in the same situations as [`Self::with_hasher()`]. + pub fn new(db: &'a DB) -> Self { + Self::with_hasher(db, &Blake2Hasher) + } +} + +impl<'a, DB> MerkleTree<'a, DB> +where + DB: Database + ?Sized, +{ + /// Loads a tree with the specified hasher. + /// + /// # Panics + /// + /// Panics if the hasher or basic tree parameters (e.g., the tree depth) + /// do not match those of the tree loaded from the database. + pub fn with_hasher(db: &'a DB, hasher: &'a dyn HashTree) -> Self { + let tags = db.manifest().and_then(|manifest| manifest.tags); + if let Some(tags) = tags { + tags.assert_consistency(hasher); + } + // If there are currently no tags in the tree, we consider that it fits + // for backward compatibility. The tags will be added the next time the tree is saved. + + Self { db, hasher } + } + + /// Returns the root hash of a tree at the specified `version`, or `None` if the version + /// was not written yet. + pub fn root_hash(&self, version: u64) -> Option { + let root = self.root(version)?; + let Root::Filled { node, .. } = root else { + return Some(self.hasher.empty_tree_hash()); + }; + Some(node.hash(&mut self.hasher.into(), 0)) + } + + pub(crate) fn root(&self, version: u64) -> Option { + self.db.root(version) + } + + /// Returns the latest version of the tree present in the database, or `None` if + /// no versions are present yet. + pub fn latest_version(&self) -> Option { + self.db.manifest()?.version_count.checked_sub(1) + } + + /// Returns the root hash for the latest version of the tree. + pub fn latest_root_hash(&self) -> ValueHash { + let root_hash = self + .latest_version() + .and_then(|version| self.root_hash(version)); + root_hash.unwrap_or_else(|| self.hasher.empty_tree_hash()) + } + + /// Returns the latest-versioned root node. + pub(crate) fn latest_root(&self) -> Root { + let root = self.latest_version().and_then(|version| self.root(version)); + root.unwrap_or(Root::Empty) + } + + /// Removes the most recent versions from the database and returns the patch set + /// that should be applied to the database in order for the changes to take effect. + /// + /// The current implementation does not actually remove node data for the removed versions + /// since it's likely to be reused in the future (especially upper-level internal nodes). + pub fn truncate_versions(self, retained_version_count: u64) -> Option { + let mut manifest = self.db.manifest().unwrap_or_default(); + if manifest.version_count <= retained_version_count { + None + } else { + manifest.version_count = retained_version_count; + Some(PatchSet::from_manifest(manifest)) + } + } + + /// Extends this tree by creating its new version. + /// + /// # Return value + /// + /// Returns a pair consisting of: + /// + /// - Information about the update such as the final tree hash. + /// - [`PatchSet`] with the changes to tree nodes. The patch must be applied to the database + /// using [`Database::apply_patch()`] before the next `version` of changes is processed. + pub fn extend(self, key_value_pairs: Vec<(Key, ValueHash)>) -> (BlockOutput, PatchSet) { + let next_version = self.db.manifest().unwrap_or_default().version_count; + let storage = Storage::new(self.db, next_version); + storage.extend(self.hasher, key_value_pairs) + } + + /// Extends this tree by creating its new version, computing an authenticity Merkle proof + /// for each provided instruction. + /// + /// # Return value + /// + /// Returns a pair consisting of: + /// + /// - Information about the update such as the final tree hash and proofs for each input + /// instruction. + /// - [`PatchSet`] with the changes to tree nodes. The patch must be applied to the database + /// using [`Database::apply_patch()`] before the next `version` of changes is processed. + pub fn extend_with_proofs( + self, + instructions: Vec<(Key, TreeInstruction)>, + ) -> (BlockOutputWithProofs, PatchSet) { + let next_version = self.db.manifest().unwrap_or_default().version_count; + let storage = Storage::new(self.db, next_version); + storage.extend_with_proofs(self.hasher, instructions) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::TreeTags; + + #[test] + #[should_panic(expected = "Unsupported tree architecture `AR64MT`, expected `AR16MT`")] + fn tree_architecture_mismatch() { + let mut db = PatchSet::default(); + db.manifest_mut().tags = Some(TreeTags { + architecture: "AR64MT".to_owned(), + depth: 256, + hasher: "blake2s256".to_string(), + }); + + MerkleTree::new(&db); + } + + #[test] + #[should_panic(expected = "Unexpected tree depth: expected 256, got 128")] + fn tree_depth_mismatch() { + let mut db = PatchSet::default(); + db.manifest_mut().tags = Some(TreeTags { + architecture: "AR16MT".to_owned(), + depth: 128, + hasher: "blake2s256".to_string(), + }); + + MerkleTree::new(&db); + } + + #[test] + #[should_panic(expected = "Mismatch between the provided tree hasher `blake2s256`")] + fn hasher_mismatch() { + let mut db = PatchSet::default(); + db.manifest_mut().tags = Some(TreeTags { + architecture: "AR16MT".to_owned(), + depth: 256, + hasher: "sha256".to_string(), + }); + + MerkleTree::new(&db); + } +} diff --git a/core/lib/merkle_tree2/src/storage/database.rs b/core/lib/merkle_tree2/src/storage/database.rs new file mode 100644 index 000000000000..b4ecf1852a35 --- /dev/null +++ b/core/lib/merkle_tree2/src/storage/database.rs @@ -0,0 +1,565 @@ +//! `Database` trait and its implementations. + +use rayon::prelude::*; + +use std::path::Path; + +use crate::{ + errors::{DeserializeError, ErrorContext}, + storage::patch::PatchSet, + types::{InternalNode, LeafNode, Manifest, Node, NodeKey, Root}, +}; +use zksync_storage::{ + db::{self, MerkleTreeColumnFamily}, + rocksdb::WriteBatch, + RocksDB, +}; + +/// Slice of node keys together with an indicator whether a node at the requested key is a leaf. +/// Used in [`Database::tree_nodes()`]. +pub type NodeKeys = [(NodeKey, bool)]; + +/// Generic database functionality. Its main implementation is [`RocksDB`]. +pub trait Database: Send + Sync { + /// Tries to read the tree [`Manifest`]. + /// + /// # Errors + /// + /// Returns a deserialization error if any. + fn try_manifest(&self) -> Result, DeserializeError>; + /// Returns the tree manifest. + /// + /// # Panics + /// + /// Panics on deserialization errors. + fn manifest(&self) -> Option { + self.try_manifest().unwrap_or_else(|err| panic!("{err}")) + } + + /// Tries to obtain a root from this storage. + /// + /// # Errors + /// + /// Returns a deserialization error if any. + fn try_root(&self, version: u64) -> Result, DeserializeError>; + /// Obtains a root from the tree storage. + /// + /// # Panics + /// + /// Panics on deserialization errors. + fn root(&self, version: u64) -> Option { + self.try_root(version).unwrap_or_else(|err| panic!("{err}")) + } + + /// Obtains a node with the specified key from the tree storage. Root nodes are obtained + /// using [`Self::root()`], never this method. + /// + /// # Errors + /// + /// Returns a deserialization error if any. + fn try_tree_node(&self, key: &NodeKey, is_leaf: bool) + -> Result, DeserializeError>; + /// Obtains a node with the specified key from the tree storage. + /// + /// # Panics + /// + /// Panics on deserialization errors. + fn tree_node(&self, key: &NodeKey, is_leaf: bool) -> Option { + self.try_tree_node(key, is_leaf) + .unwrap_or_else(|err| panic!("{err}")) + } + + /// Obtains nodes with the specified keys from the tree storage. The nodes + /// are returned in a `Vec` in the same order as requested. + /// + /// # Panics + /// + /// Panics on deserialization errors. + fn tree_nodes(&self, keys: &NodeKeys) -> Vec> { + let nodes = keys + .iter() + .map(|(key, is_leaf)| self.try_tree_node(key, *is_leaf)); + nodes + .collect::>() + .unwrap_or_else(|err| panic!("{err}")) + } + + /// Applies changes in the `patch` to this database. This operation should be atomic. + fn apply_patch(&mut self, patch: PatchSet); +} + +/// Main [`Database`] implementation wrapping a [`RocksDB`] reference. +#[derive(Debug)] +pub struct RocksDBWrapper { + db: RocksDB, + multi_get_chunk_size: usize, +} + +impl RocksDBWrapper { + /// Key to store the tree [`Manifest`]. + // This key must not overlap with keys for nodes; easy to see that it's true, + // since the minimum node key is [0, 0, 0, 0, 0, 0, 0, 0]. + const MANIFEST_KEY: &'static [u8] = &[0]; + + /// Creates a new wrapper, initializing RocksDB at the specified directory. + pub fn new(path: impl AsRef) -> Self { + let db = RocksDB::new(db::Database::MerkleTree, path, true); + Self::from(db) + } + + /// Sets the chunk size for multi-get operations. The requested keys will be split + /// into chunks of this size and requested in parallel using `rayon`. Setting chunk size + /// to a large value (e.g., `usize::MAX`) will effectively disable parallelism. + /// + /// [RocksDB docs] claim that multi-get operations may be parallelized internally, + /// but this seems to be dependent on the env; it may be the case that (single-threaded) + /// I/O parallelization is only achieved using `liburing`, which requires enabling + /// the `io-uring` feature of `rocksdb` crate and is only available on Linux. + /// Thus, setting this value to around `100..1_000` can still lead to substantial + /// performance boost (order of 2x) in some environments. + /// + /// [RocksDB docs]: https://github.com/facebook/rocksdb/wiki/MultiGet-Performance + pub fn set_multi_get_chunk_size(&mut self, chunk_size: usize) { + self.multi_get_chunk_size = chunk_size; + } + + fn raw_node(&self, key: &[u8]) -> Option> { + let tree_cf = self.db.cf_merkle_tree_handle(MerkleTreeColumnFamily::Tree); + self.db + .get_cf(tree_cf, key) + .expect("Failed reading from RocksDB") + } + + fn raw_nodes(&self, keys: &NodeKeys) -> Vec>> { + // `par_chunks()` below uses `rayon` to speed up multi-get I/O; + // see `Self::set_multi_get_chunk_size()` docs for an explanation why this makes sense. + keys.par_chunks(self.multi_get_chunk_size) + .map(|chunk| { + let tree_cf = self.db.cf_merkle_tree_handle(MerkleTreeColumnFamily::Tree); + let keys = chunk.iter().map(|(key, _)| (tree_cf, key.to_db_key())); + + let results = self.db.multi_get_cf(keys); + results + .into_iter() + .map(|result| result.expect("Failed reading from RocksDB")) + }) + .flatten_iter() + .collect() + } + + fn deserialize_node( + raw_node: &[u8], + key: &NodeKey, + is_leaf: bool, + ) -> Result { + // If we didn't succeed with the patch set, or the key version is old, + // access the underlying storage. + let node = if is_leaf { + LeafNode::deserialize(raw_node).map(Node::Leaf) + } else { + InternalNode::deserialize(raw_node).map(Node::Internal) + }; + node.map_err(|err| { + err.with_context(if is_leaf { + ErrorContext::Leaf(*key) + } else { + ErrorContext::InternalNode(*key) + }) + }) + } + + /// Returns the wrapped RocksDB instance. + pub fn into_inner(self) -> RocksDB { + self.db + } +} + +impl From for RocksDBWrapper { + fn from(db: RocksDB) -> Self { + Self { + db, + multi_get_chunk_size: usize::MAX, + } + } +} + +impl Database for RocksDBWrapper { + fn try_manifest(&self) -> Result, DeserializeError> { + let Some(raw_manifest) = self.raw_node(Self::MANIFEST_KEY) else { + return Ok(None); + }; + Manifest::deserialize(&raw_manifest) + .map(Some) + .map_err(|err| err.with_context(ErrorContext::Manifest)) + } + + fn try_root(&self, version: u64) -> Result, DeserializeError> { + let Some(raw_root) = self.raw_node(&NodeKey::empty(version).to_db_key()) else { + return Ok(None); + }; + Root::deserialize(&raw_root) + .map(Some) + .map_err(|err| err.with_context(ErrorContext::Root(version))) + } + + fn try_tree_node( + &self, + key: &NodeKey, + is_leaf: bool, + ) -> Result, DeserializeError> { + let Some(raw_node) = self.raw_node(&key.to_db_key()) else { + return Ok(None); + }; + Self::deserialize_node(&raw_node, key, is_leaf).map(Some) + } + + fn tree_nodes(&self, keys: &NodeKeys) -> Vec> { + let raw_nodes = self.raw_nodes(keys).into_iter().zip(keys); + + let nodes = raw_nodes.map(|(maybe_node, (key, is_leaf))| { + maybe_node + .map(|raw_node| Self::deserialize_node(&raw_node, key, *is_leaf)) + .transpose() + }); + nodes + .collect::>() + .unwrap_or_else(|err| panic!("{err}")) + } + + fn apply_patch(&mut self, patch: PatchSet) { + let tree_cf = self.db.cf_merkle_tree_handle(MerkleTreeColumnFamily::Tree); + let mut write_batch = WriteBatch::default(); + let mut node_bytes = Vec::with_capacity(128); + // ^ 128 looks somewhat reasonable as node capacity + + patch.manifest.serialize(&mut node_bytes); + write_batch.put_cf(tree_cf, Self::MANIFEST_KEY, &node_bytes); + + for (root_version, root) in patch.roots { + node_bytes.clear(); + let root_key = NodeKey::empty(root_version); + // Delete the key range corresponding to the entire new version. This removes + // potential garbage left after reverting the tree to a previous version. + let next_root_key = NodeKey::empty(root_version + 1); + write_batch.delete_range_cf(tree_cf, root_key.to_db_key(), next_root_key.to_db_key()); + + root.serialize(&mut node_bytes); + write_batch.put_cf(tree_cf, root_key.to_db_key(), &node_bytes); + } + + let all_nodes = patch.nodes_by_version.into_values().flatten(); + for (node_key, node) in all_nodes { + node_bytes.clear(); + node.serialize(&mut node_bytes); + write_batch.put_cf(tree_cf, node_key.to_db_key(), &node_bytes); + } + + self.db + .write(write_batch) + .expect("Failed writing a batch to RocksDB"); + } +} + +impl Database for PatchSet { + fn try_manifest(&self) -> Result, DeserializeError> { + Ok(Some(self.manifest.clone())) + } + + fn try_root(&self, version: u64) -> Result, DeserializeError> { + Ok(self.roots.get(&version).cloned()) + } + + fn try_tree_node( + &self, + key: &NodeKey, + is_leaf: bool, + ) -> Result, DeserializeError> { + let node = self + .nodes_by_version + .get(&key.version) + .and_then(|nodes| nodes.get(key)); + let Some(node) = node.cloned() else { + return Ok(None); + }; + debug_assert_eq!( + matches!(node, Node::Leaf(_)), + is_leaf, + "Internal check failed: node at {key:?} is requested as {ty}, \ + but has the opposite type", + ty = if is_leaf { "leaf" } else { "internal node" } + ); + Ok(Some(node)) + } + + fn apply_patch(&mut self, other: PatchSet) { + let new_version_count = other.manifest.version_count; + if new_version_count < self.manifest.version_count { + // Remove obsolete roots and nodes from the patch. + self.roots.retain(|&version, _| version < new_version_count); + self.nodes_by_version + .retain(|&version, _| version < new_version_count); + } + self.manifest = other.manifest; + self.roots.extend(other.roots); + self.nodes_by_version.extend(other.nodes_by_version); + } +} + +/// Wrapper for a [`Database`] that also contains in-memory [`PatchSet`] on top of it. +// We need to be careful to not over-delegate to the wrapped DB when the `PatchSet` contains +// an instruction to truncate tree versions. In order to do this, we use the +// `is_responsible_for_version()` in `PatchSet`, which is based not only on the contained +// tree roots, but on the manifest as well. +#[derive(Debug)] +pub struct Patched { + inner: DB, + patch: Option, +} + +impl Patched { + /// Wraps the provided database. + pub fn new(inner: DB) -> Self { + Self { inner, patch: None } + } + + /// Flushes changes from RAM to the wrapped database. + pub fn flush(&mut self) { + if let Some(patch) = self.patch.take() { + self.inner.apply_patch(patch); + } + } + + /// Forgets about changes held in RAM. + pub fn reset(&mut self) { + self.patch = None; + } + + /// Returns the wrapped database. + /// + /// # Panics + /// + /// Panics if the database contains uncommitted changes. Call [`Self::flush()`] + /// or [`Self::reset()`] beforehand to avoid this panic. + pub fn into_inner(self) -> DB { + assert!( + self.patch.is_none(), + "The `Patched` database contains uncommitted changes" + ); + self.inner + } +} + +impl Database for Patched { + fn try_manifest(&self) -> Result, DeserializeError> { + if let Some(patch) = &self.patch { + Ok(Some(patch.manifest.clone())) + } else { + self.inner.try_manifest() + } + } + + fn try_root(&self, version: u64) -> Result, DeserializeError> { + if let Some(patch) = &self.patch { + if patch.is_responsible_for_version(version) { + return Ok(patch.roots.get(&version).cloned()); + } + } + self.inner.try_root(version) + } + + fn try_tree_node( + &self, + key: &NodeKey, + is_leaf: bool, + ) -> Result, DeserializeError> { + let Some(patch) = &self.patch else { + return self.inner.try_tree_node(key, is_leaf); + }; + + if patch.is_responsible_for_version(key.version) { + patch.try_tree_node(key, is_leaf) // take use of debug assertions + } else { + self.inner.try_tree_node(key, is_leaf) + } + } + + fn tree_nodes(&self, keys: &NodeKeys) -> Vec> { + let Some(patch) = &self.patch else { + return self.inner.tree_nodes(keys); + }; + + let mut is_in_patch = Vec::with_capacity(keys.len()); + let (patch_keys, db_keys): (Vec<_>, Vec<_>) = keys.iter().partition(|(key, _)| { + let flag = patch.is_responsible_for_version(key.version); + is_in_patch.push(flag); + flag + }); + + let mut patch_values = patch.tree_nodes(&patch_keys).into_iter(); + let mut db_values = self.inner.tree_nodes(&db_keys).into_iter(); + + let values = is_in_patch.into_iter().map(|is_in_patch| { + if is_in_patch { + patch_values.next().unwrap() + } else { + db_values.next().unwrap() + } + }); + values.collect() + } + + fn apply_patch(&mut self, patch: PatchSet) { + if let Some(existing_patch) = &mut self.patch { + existing_patch.apply_patch(patch); + } else { + self.patch = Some(patch); + } + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use tempfile::TempDir; + + use std::collections::{HashMap, HashSet}; + + use super::*; + use crate::types::Nibbles; + use zksync_types::{H256, U256}; + + const TEST_KEY: U256 = U256([0, 0, 0, 0x_dead_beef_0000_0000]); + + fn generate_nodes(version: u64, nibble_counts: &[usize]) -> HashMap { + let nodes = nibble_counts.iter().map(|&count| { + assert_ne!(count, 0); + let key = Nibbles::new(&TEST_KEY, count).with_version(version); + let node = LeafNode::new(TEST_KEY, H256::zero(), count as u64); + (key, node.into()) + }); + nodes.collect() + } + + #[test] + fn requesting_nodes_in_patched_db() { + let root = Root::new(2, Node::Internal(InternalNode::default())); + let old_nodes = generate_nodes(0, &[1, 2]); + // ^ These `nodes` and `root` do not comprise a valid tree, but this is fine + // for the purposes of this test. + let db = PatchSet::new(0, &(), root, old_nodes.clone()); + let mut patched = Patched::new(db); + + let new_root = Root::new(3, Node::Internal(InternalNode::default())); + let new_nodes = generate_nodes(1, &[3, 4, 5]); + let patch = PatchSet::new(1, &(), new_root, new_nodes.clone()); + patched.apply_patch(patch); + + let (&old_key, expected_node) = old_nodes.iter().next().unwrap(); + let node = patched.tree_node(&old_key, true).unwrap(); + assert_eq!(node, *expected_node); + let (&new_key, expected_new_node) = new_nodes.iter().next().unwrap(); + let node = patched.tree_node(&new_key, true).unwrap(); + assert_eq!(node, *expected_new_node); + + let missing_keys = [ + Nibbles::new(&TEST_KEY, 3).with_version(0), + Nibbles::new(&TEST_KEY, 1).with_version(1), + Nibbles::new(&TEST_KEY, 7).with_version(1), + Nibbles::new(&TEST_KEY, 2).with_version(2), + ]; + for missing_key in missing_keys { + assert!(patched.tree_node(&missing_key, true).is_none()); + } + + let requested_keys = [(old_key, true), (new_key, true)]; + let nodes = patched.tree_nodes(&requested_keys); + assert_matches!( + nodes.as_slice(), + [Some(a), Some(b)] if a == expected_node && b == expected_new_node + ); + + let requested_keys = [(new_key, true), (missing_keys[0], false), (old_key, true)]; + let nodes = patched.tree_nodes(&requested_keys); + assert_matches!( + nodes.as_slice(), + [Some(a), None, Some(b)] if a == expected_new_node && b == expected_node + ); + + let requested_keys = missing_keys.map(|key| (key, true)); + let nodes = patched.tree_nodes(&requested_keys); + assert_eq!(nodes.as_slice(), [None, None, None, None]); + + let requested_keys: Vec<_> = old_nodes + .keys() + .chain(&missing_keys) + .chain(new_nodes.keys()) + .map(|&key| (key, true)) + .collect(); + let nodes = patched.tree_nodes(&requested_keys); + + #[rustfmt::skip] // formatting array with one item per line looks uglier + assert_matches!( + nodes.as_slice(), + [Some(_), Some(_), None, None, None, None, Some(_), Some(_), Some(_)] + ); + } + + #[test] + fn garbage_is_removed_on_db_reverts() { + let dir = TempDir::new().expect("failed creating temporary dir for RocksDB"); + let mut db = RocksDBWrapper::new(&dir); + + // Insert some data to the database. + let mut expected_keys = HashSet::new(); + let root = Root::new(2, Node::Internal(InternalNode::default())); + expected_keys.insert(NodeKey::empty(0)); + let nodes = generate_nodes(0, &[1, 2]); + expected_keys.extend(nodes.keys().copied()); + let patch = PatchSet::new(0, &(), root, nodes); + db.apply_patch(patch); + + assert_contains_exactly_keys(&db, &expected_keys); + + // Overwrite data by inserting a root / nodes with the same version. + let mut expected_keys = HashSet::new(); + let root = Root::new(3, Node::Internal(InternalNode::default())); + expected_keys.insert(NodeKey::empty(0)); + let nodes = generate_nodes(0, &[3, 4, 5]); + expected_keys.extend(nodes.keys().copied()); + let mut patch = PatchSet::new(0, &(), root, nodes); + + // Insert a new version of the tree as well. + let root = Root::new(4, Node::Internal(InternalNode::default())); + expected_keys.insert(NodeKey::empty(1)); + let nodes = generate_nodes(1, &[6]); + expected_keys.extend(nodes.keys().copied()); + patch.apply_patch(PatchSet::new(1, &(), root, nodes)); + db.apply_patch(patch); + + assert_contains_exactly_keys(&db, &expected_keys); + + // Overwrite both versions of the tree again. + let patch = PatchSet::new(0, &(), Root::Empty, HashMap::new()); + db.apply_patch(patch); + let patch = PatchSet::new(1, &(), Root::Empty, HashMap::new()); + db.apply_patch(patch); + + let expected_keys = HashSet::from_iter([NodeKey::empty(0), NodeKey::empty(1)]); + assert_contains_exactly_keys(&db, &expected_keys); + } + + fn assert_contains_exactly_keys(db: &RocksDBWrapper, expected_keys: &HashSet) { + let cf = db.db.cf_merkle_tree_handle(MerkleTreeColumnFamily::Tree); + let actual_keys: HashSet<_> = db + .db + .prefix_iterator_cf(cf, [0; 8]) + .map(|(key, _)| key) + .collect(); + + let expected_raw_keys: HashSet<_> = expected_keys + .iter() + .map(|key| key.to_db_key().into_boxed_slice()) + .collect(); + assert_eq!(actual_keys, expected_raw_keys); + } +} diff --git a/core/lib/merkle_tree2/src/storage/mod.rs b/core/lib/merkle_tree2/src/storage/mod.rs new file mode 100644 index 000000000000..3ee877abe8a5 --- /dev/null +++ b/core/lib/merkle_tree2/src/storage/mod.rs @@ -0,0 +1,608 @@ +//! Storage-related logic. + +use metrics::Unit; + +use std::{mem, ops, sync::Once, time::Instant}; + +mod database; +mod patch; +mod proofs; +mod serialization; +#[cfg(test)] +mod tests; + +pub use self::{ + database::{Database, NodeKeys, Patched, RocksDBWrapper}, + patch::PatchSet, +}; + +use self::patch::WorkingPatchSet; +use crate::{ + hasher::{HashTree, HashingStats}, + types::{ + BlockOutput, ChildRef, InternalNode, Key, LeafNode, Nibbles, Node, Root, TreeLogEntry, + ValueHash, + }, + utils::increment_counter, +}; + +#[derive(Debug, Clone, Copy, Default)] +struct StorageMetrics { + // Metrics related to the AR16MT tree architecture + new_leaves: u64, + new_internal_nodes: u64, + moved_leaves: u64, + updated_leaves: u64, + leaf_level_sum: u64, + max_leaf_level: u64, + // Metrics related to input instructions + key_reads: u64, + missing_key_reads: u64, + db_reads: u64, + patch_reads: u64, +} + +impl StorageMetrics { + fn describe() { + metrics::describe_gauge!( + "merkle_tree.extend_patch.new_leaves", + Unit::Count, + "Number of new leaves inserted during tree traversal while processing a single block" + ); + metrics::describe_gauge!( + "merkle_tree.extend_patch.new_internal_nodes", + Unit::Count, + "Number of new internal nodes inserted during tree traversal while processing \ + a single block" + ); + metrics::describe_gauge!( + "merkle_tree.extend_patch.moved_leaves", + Unit::Count, + "Number of existing leaves moved to a new location while processing \ + a single block" + ); + metrics::describe_gauge!( + "merkle_tree.extend_patch.updated_leaves", + Unit::Count, + "Number of existing leaves updated while processing a single block" + ); + metrics::describe_gauge!( + "merkle_tree.extend_patch.avg_leaf_level", + Unit::Count, + "Average level of leaves moved or created while processing a single block" + ); + metrics::describe_gauge!( + "merkle_tree.extend_patch.max_leaf_level", + Unit::Count, + "Maximum level of leaves moved or created while processing a single block" + ); + + metrics::describe_gauge!( + "merkle_tree.extend_patch.key_reads", + Unit::Count, + "Number of keys read while processing a single block (only applicable \ + to the full operation mode)" + ); + metrics::describe_gauge!( + "merkle_tree.extend_patch.missing_key_reads", + Unit::Count, + "Number of missing keys read while processing a single block (only applicable \ + to the full operation mode)" + ); + metrics::describe_gauge!( + "merkle_tree.extend_patch.db_reads", + Unit::Count, + "Number of nodes of previous versions read from the DB while processing \ + a single block" + ); + metrics::describe_gauge!( + "merkle_tree.extend_patch.patch_reads", + Unit::Count, + "Number of nodes of the current version re-read from the patch set while processing \ + a single block" + ); + } + + fn update_leaf_levels(&mut self, nibble_count: usize) { + let leaf_level = nibble_count as u64 * 4; + self.leaf_level_sum += leaf_level; + self.max_leaf_level = self.max_leaf_level.max(leaf_level); + } + + #[allow(clippy::cast_precision_loss)] // unlikely to happen given magnitudes of values + fn report(self) { + metrics::gauge!( + "merkle_tree.extend_patch.new_leaves", + self.new_leaves as f64 + ); + metrics::gauge!( + "merkle_tree.extend_patch.new_internal_nodes", + self.new_internal_nodes as f64 + ); + metrics::gauge!( + "merkle_tree.extend_patch.moved_leaves", + self.moved_leaves as f64 + ); + metrics::gauge!( + "merkle_tree.extend_patch.updated_leaves", + self.updated_leaves as f64 + ); + + let touched_leaves = self.new_leaves + self.moved_leaves; + let avg_leaf_level = if touched_leaves > 0 { + self.leaf_level_sum as f64 / touched_leaves as f64 + } else { + 0.0 + }; + metrics::gauge!("merkle_tree.extend_patch.avg_leaf_level", avg_leaf_level); + metrics::gauge!( + "merkle_tree.extend_patch.max_leaf_level", + self.max_leaf_level as f64 + ); + + if self.key_reads > 0 { + metrics::gauge!("merkle_tree.extend_patch.key_reads", self.key_reads as f64); + } + if self.missing_key_reads > 0 { + metrics::gauge!( + "merkle_tree.extend_patch.missing_key_reads", + self.missing_key_reads as f64 + ); + } + metrics::gauge!("merkle_tree.extend_patch.db_reads", self.db_reads as f64); + metrics::gauge!( + "merkle_tree.extend_patch.patch_reads", + self.patch_reads as f64 + ); + } +} + +impl ops::AddAssign for StorageMetrics { + fn add_assign(&mut self, rhs: Self) { + self.new_leaves += rhs.new_leaves; + self.new_internal_nodes += rhs.new_internal_nodes; + self.moved_leaves += rhs.moved_leaves; + self.updated_leaves += rhs.updated_leaves; + self.leaf_level_sum += rhs.leaf_level_sum; + self.max_leaf_level = self.max_leaf_level.max(rhs.max_leaf_level); + + self.key_reads += rhs.key_reads; + self.missing_key_reads += rhs.missing_key_reads; + self.db_reads += rhs.db_reads; + self.patch_reads += rhs.patch_reads; + } +} + +/// Mutable storage encapsulating AR16MT update logic. +#[derive(Debug)] +struct TreeUpdater { + metrics: StorageMetrics, + patch_set: WorkingPatchSet, +} + +impl TreeUpdater { + fn describe_metrics() { + metrics::describe_histogram!( + "merkle_tree.load_nodes", + Unit::Seconds, + "Time spent loading tree nodes from DB per block" + ); + metrics::describe_histogram!( + "merkle_tree.extend_patch", + Unit::Seconds, + "Time spent traversing the tree and creating new nodes per block" + ); + metrics::describe_histogram!( + "merkle_tree.finalize_patch", + Unit::Seconds, + "Time spent finalizing the block (mainly hash computations)" + ); + metrics::describe_gauge!( + "merkle_tree.leaf_count", + Unit::Count, + "Current number of leaves in the tree" + ); + StorageMetrics::describe(); + HashingStats::describe(); + } + + fn new(version: u64, root: Root) -> Self { + static METRICS_INITIALIZER: Once = Once::new(); + + METRICS_INITIALIZER.call_once(Self::describe_metrics); + + Self { + metrics: StorageMetrics::default(), + patch_set: WorkingPatchSet::new(version, root), + } + } + + fn root_node_mut(&mut self) -> Option<&mut Node> { + self.patch_set.get_mut(&Nibbles::EMPTY) + } + + fn set_root_node(&mut self, node: Node) { + self.patch_set.insert(Nibbles::EMPTY, node); + } + + /// Gets a node to be mutated. + fn get_mut(&mut self, nibbles: &Nibbles) -> Option<&mut Node> { + self.metrics.patch_reads += 1; + self.patch_set.get_mut(nibbles) + } + + fn insert_node(&mut self, nibbles: Nibbles, node: impl Into, is_new: bool) { + let node = node.into(); + match (&node, is_new) { + (Node::Leaf(_), false) => { + self.metrics.update_leaf_levels(nibbles.nibble_count()); + self.metrics.moved_leaves += 1; + } + (Node::Leaf(_), true) => { + self.metrics.update_leaf_levels(nibbles.nibble_count()); + self.metrics.new_leaves += 1; + } + (Node::Internal(_), _) => { + debug_assert!(is_new); // internal nodes are never moved + self.metrics.new_internal_nodes += 1; + } + } + self.patch_set.insert(nibbles, node); + } + + /// Loads ancestor nodes for all keys in `key_value_pairs`. Returns the longest prefix + /// present in the tree currently for each inserted / updated key. + /// + /// # Implementation notes + /// + /// This method works by traversing the tree level by level. It uses [`Database::tree_nodes()`] + /// (translating to multi-get in RocksDB) for each level to expedite node loading. + /// + /// It may seem that the loaded leaf nodes may just increase the patch size. However, + /// each leaf node will actually be modified by [`Self::insert()`], either by changing + /// its `value_hash` (on full key match), or by moving the leaf node down the tree + /// (in which case the node in the patch will be overwritten by an `InternalNode`). + fn load_ancestors( + &mut self, + sorted_keys: &SortedKeys, + db: &DB, + ) -> Vec { + let Some(Node::Internal(_)) = self.root_node_mut() else { + return vec![Nibbles::EMPTY; sorted_keys.0.len()]; + }; + let patch_set = &mut self.patch_set; + let version = patch_set.version(); + + // Longest prefix for each key in `key_value_pairs` (i.e., what we'll return from + // this method). `None` indicates that the longest prefix for a key is not determined yet. + let mut longest_prefixes = vec![None; sorted_keys.0.len()]; + // Previous encountered when iterating by `sorted_keys` below. + let mut prev_nibbles = None; + for nibble_count in 1.. { + // Extract `nibble_count` nibbles from each key for which we haven't found the parent + // yet. Note that nibbles in `requested_keys` are sorted. + let requested_keys = sorted_keys.0.iter().filter_map(|(idx, key)| { + if longest_prefixes[*idx].is_some() { + return None; + } + let nibbles = Nibbles::new(key, nibble_count); + let (this_parent_nibbles, last_nibble) = nibbles.split_last().unwrap(); + // ^ `unwrap()` is safe by construction; `nibble_count` is positive + let this_ref = patch_set.child_ref_mut(&this_parent_nibbles, last_nibble); + let Some(this_ref) = this_ref else { + longest_prefixes[*idx] = Some(this_parent_nibbles); + return None; + }; + + // Deduplicate by `nibbles`. We do it at the end to properly + // assign `parent_nibbles` for all keys, and before the version is updated + // for `ChildRef`s, in order to update it only once. + if prev_nibbles == Some(nibbles) { + return None; + } + prev_nibbles = Some(nibbles); + + // Update `ChildRef.version` for all nodes that we traverse. + let ref_version = mem::replace(&mut this_ref.version, version); + debug_assert!(ref_version < version); + Some((nibbles.with_version(ref_version), this_ref.is_leaf)) + }); + let requested_keys: Vec<_> = requested_keys.collect(); + + if requested_keys.is_empty() { + break; + } + let new_nodes = db.tree_nodes(&requested_keys); + self.metrics.db_reads += new_nodes.len() as u64; + + // Since we load nodes level by level, we can update `patch_set` more efficiently + // by pushing entire `HashMap`s into `changes_by_nibble_count`. + let level = requested_keys + .iter() + .zip(new_nodes) + .map(|((key, _), node)| { + (*key.nibbles.bytes(), node.unwrap()) + // ^ `unwrap()` is safe: all requested nodes are referenced by their parents + }); + patch_set.push_level(level.collect()); + } + + // All parents must be set at this point. + longest_prefixes.into_iter().map(Option::unwrap).collect() + } + + fn traverse(&self, key: Key, parent_nibbles: &Nibbles) -> TraverseOutcome { + for nibble_idx in parent_nibbles.nibble_count().. { + let nibbles = Nibbles::new(&key, nibble_idx); + match self.patch_set.get(&nibbles) { + Some(Node::Internal(_)) => { /* continue descent */ } + Some(Node::Leaf(leaf)) if leaf.full_key == key => { + return TraverseOutcome::LeafMatch(nibbles, *leaf); + } + Some(Node::Leaf(leaf)) => { + return TraverseOutcome::LeafMismatch(nibbles, *leaf); + } + None => return TraverseOutcome::MissingChild(nibbles), + } + } + unreachable!("We must have encountered a leaf or missing node when traversing"); + } + + /// Inserts or updates a value hash for the specified `key`. This implementation + /// is almost verbatim the algorithm described in the Jellyfish Merkle tree white paper. + /// The algorithm from the paper is as follows: + /// + /// 1. Walk from the root of the tree along the inserted `key` while we can. + /// 2. If the node we've stopped at is an internal node, it means it doesn't have + /// a child at the corresponding nibble from `key`. Create a new leaf node with `key` and + /// `value_hash` and insert it as a new child of the found internal node. + /// 3. Else the node we've stopped is a leaf. If the full key stored in this leaf is `key`, + /// we just need to update `value_hash` stored in the leaf. + /// 4. Else (the node we've stopped is a leaf with `other_key != key`) we need to create + /// one or more internal nodes that would contain the common prefix between `key` + /// and `other_key` and a "fork" where these keys differ. + /// + /// We change step 1 by starting not from the root, but rather from the node ancestor + /// we've found in [`Self::load_ancestors()`] for a (moderate) performance boost. Note that + /// due to previous `insert`ions, we may still need to perform more than 1 traversal iteration. + /// + /// We don't update node hashes; this would lead to a significant compute overhead (internal + /// nodes on upper levels are updated multiple times in a block). Instead, we recompute + /// hashes for all updated nodes in [`Self::finalize()`]. + fn insert( + &mut self, + key: Key, + value_hash: ValueHash, + parent_nibbles: &Nibbles, + leaf_index_fn: impl FnOnce() -> u64, + ) -> (TreeLogEntry, NewLeafData) { + let version = self.patch_set.version(); + let traverse_outcome = self.traverse(key, parent_nibbles); + let (log, leaf_data) = match traverse_outcome { + TraverseOutcome::LeafMatch(nibbles, mut leaf) => { + let log = TreeLogEntry::update(leaf.value_hash, leaf.leaf_index); + leaf.value_hash = value_hash; + self.patch_set.insert(nibbles, leaf.into()); + self.metrics.updated_leaves += 1; + (log, NewLeafData::new(nibbles, leaf)) + } + + TraverseOutcome::LeafMismatch(nibbles, leaf) => { + if let Some((parent_nibbles, last_nibble)) = nibbles.split_last() { + self.patch_set + .child_ref_mut(&parent_nibbles, last_nibble) + .unwrap() + .is_leaf = false; + } + + let mut nibble_idx = nibbles.nibble_count(); + loop { + let moved_leaf_nibble = Nibbles::nibble(&leaf.full_key, nibble_idx); + let new_leaf_nibble = Nibbles::nibble(&key, nibble_idx); + let mut node = InternalNode::default(); + if moved_leaf_nibble == new_leaf_nibble { + // Insert a path of internal nodes with a single child. + node.insert_child_ref(new_leaf_nibble, ChildRef::internal(version)); + } else { + // Insert a diverging internal node with 2 children for the existing + // and the new leaf. + node.insert_child_ref(new_leaf_nibble, ChildRef::leaf(version)); + node.insert_child_ref(moved_leaf_nibble, ChildRef::leaf(version)); + } + let node_nibbles = Nibbles::new(&key, nibble_idx); + self.insert_node(node_nibbles, node, true); + if moved_leaf_nibble != new_leaf_nibble { + break; + } + nibble_idx += 1; + } + + let leaf_index = leaf_index_fn(); + let new_leaf = LeafNode::new(key, value_hash, leaf_index); + let new_leaf_nibbles = Nibbles::new(&key, nibble_idx + 1); + let leaf_data = NewLeafData::new(new_leaf_nibbles, new_leaf); + let moved_leaf_nibbles = Nibbles::new(&leaf.full_key, nibble_idx + 1); + let leaf_data = leaf_data.with_adjacent_leaf(moved_leaf_nibbles, leaf); + (TreeLogEntry::insert(leaf_index), leaf_data) + } + + TraverseOutcome::MissingChild(nibbles) if nibbles.nibble_count() == 0 => { + // The root is currently empty; we replace it with a leaf. + let leaf_index = leaf_index_fn(); + debug_assert_eq!(leaf_index, 1); + let root_leaf = LeafNode::new(key, value_hash, leaf_index); + self.set_root_node(root_leaf.into()); + let leaf_data = NewLeafData::new(Nibbles::EMPTY, root_leaf); + (TreeLogEntry::insert(1), leaf_data) + } + + TraverseOutcome::MissingChild(nibbles) => { + let (parent_nibbles, last_nibble) = nibbles.split_last().unwrap(); + let Some(Node::Internal(parent)) = self.get_mut(&parent_nibbles) else { + unreachable!("Node parent must be an internal node"); + }; + parent.insert_child_ref(last_nibble, ChildRef::leaf(version)); + let leaf_index = leaf_index_fn(); + let new_leaf = LeafNode::new(key, value_hash, leaf_index); + let leaf_data = NewLeafData::new(nibbles, new_leaf); + (TreeLogEntry::insert(leaf_index), leaf_data) + } + }; + + if matches!(log, TreeLogEntry::Inserted { .. }) { + self.insert_node(leaf_data.nibbles, leaf_data.leaf, true); + } + if let Some((nibbles, leaf)) = leaf_data.adjacent_leaf { + self.insert_node(nibbles, leaf, false); + } + + // Traverse nodes up to the `parent_nibbles` level and update `ChildRef.version`. + // (For nodes before the `parent_nibbles` level, the version is updated when the nodes + // are loaded.) + let mut cursor = traverse_outcome.position(); + let stop_count = parent_nibbles.nibble_count(); + while let Some((parent_nibbles, last_nibble)) = cursor.split_last() { + if parent_nibbles.nibble_count() < stop_count { + break; + } + self.patch_set + .child_ref_mut(&parent_nibbles, last_nibble) + .unwrap() + .version = version; + cursor = parent_nibbles; + } + + (log, leaf_data) + } +} + +/// [`TreeUpdater`] together with a link to the database. +#[derive(Debug)] +pub(crate) struct Storage<'a, DB: ?Sized> { + db: &'a DB, + leaf_count: u64, + updater: TreeUpdater, +} + +impl<'a, DB: Database + ?Sized> Storage<'a, DB> { + /// Creates storage for a new version of the tree. + pub fn new(db: &'a DB, version: u64) -> Self { + let root = if version == 0 { + Root::Empty + } else { + db.root(version - 1).expect("no previous root") + }; + + Self { + db, + leaf_count: root.leaf_count(), + updater: TreeUpdater::new(version, root), + } + } + + /// Extends the Merkle tree in the lightweight operation mode, without intermediate hash + /// computations. + pub fn extend( + mut self, + hasher: &dyn HashTree, + key_value_pairs: Vec<(Key, ValueHash)>, + ) -> (BlockOutput, PatchSet) { + let start = Instant::now(); + let sorted_keys = SortedKeys::new(key_value_pairs.iter().map(|(key, _)| *key)); + let parent_nibbles = self.updater.load_ancestors(&sorted_keys, self.db); + metrics::histogram!("merkle_tree.load_nodes", start.elapsed()); + + let start = Instant::now(); + let mut logs = Vec::with_capacity(key_value_pairs.len()); + for ((key, value_hash), parent_nibbles) in key_value_pairs.into_iter().zip(parent_nibbles) { + let (log, _) = self.updater.insert(key, value_hash, &parent_nibbles, || { + increment_counter(&mut self.leaf_count) + }); + logs.push(log); + } + metrics::histogram!("merkle_tree.extend_patch", start.elapsed()); + + let leaf_count = self.leaf_count; + let (root_hash, patch) = self.finalize(hasher); + let output = BlockOutput { + root_hash, + leaf_count, + logs, + }; + (output, patch) + } + + #[allow(clippy::cast_precision_loss)] // unlikely to happen given practical leaf counts + fn finalize(self, hasher: &dyn HashTree) -> (ValueHash, PatchSet) { + self.updater.metrics.report(); + + let start = Instant::now(); + let (root_hash, patch, stats) = self.updater.patch_set.finalize(self.leaf_count, hasher); + metrics::histogram!("merkle_tree.finalize_patch", start.elapsed()); + metrics::gauge!("merkle_tree.leaf_count", self.leaf_count as f64); + stats.report(); + + (root_hash, patch) + } +} + +/// Sorted [`Key`]s together with their indices in the block. +#[derive(Debug)] +struct SortedKeys(Vec<(usize, Key)>); + +impl SortedKeys { + fn new(keys: impl Iterator) -> Self { + let mut keys: Vec<_> = keys.enumerate().collect(); + keys.sort_unstable_by_key(|(_, key)| *key); + Self(keys) + } +} + +/// Outcome of traversing a tree for a specific key. +#[derive(Debug)] +enum TraverseOutcome { + /// The matching leaf is present in the tree. + LeafMatch(Nibbles, LeafNode), + /// There traversal ends in a leaf with mismatched full key. + LeafMismatch(Nibbles, LeafNode), + /// The traversal cannot proceed because of a missing child ref in an internal node. + MissingChild(Nibbles), +} + +impl TraverseOutcome { + /// Returns the final position during the traversal. + fn position(&self) -> Nibbles { + match self { + Self::LeafMatch(nibbles, _) + | Self::LeafMismatch(nibbles, _) + | Self::MissingChild(nibbles) => *nibbles, + } + } +} + +/// Information about the newly inserted / updated leaf. Can also include information about +/// an adjacent leaf moved down the tree. +#[derive(Debug)] +struct NewLeafData { + /// Nibbles for the new leaf node. + nibbles: Nibbles, + /// The new leaf node. + leaf: LeafNode, + /// Nibbles and node for the adjacent leaf moved down the tree. + adjacent_leaf: Option<(Nibbles, LeafNode)>, +} + +impl NewLeafData { + fn new(nibbles: Nibbles, leaf: LeafNode) -> Self { + Self { + nibbles, + leaf, + adjacent_leaf: None, + } + } + + fn with_adjacent_leaf(mut self, nibbles: Nibbles, leaf: LeafNode) -> Self { + self.adjacent_leaf = Some((nibbles, leaf)); + self + } +} diff --git a/core/lib/merkle_tree2/src/storage/patch.rs b/core/lib/merkle_tree2/src/storage/patch.rs new file mode 100644 index 000000000000..2c4336d7768d --- /dev/null +++ b/core/lib/merkle_tree2/src/storage/patch.rs @@ -0,0 +1,349 @@ +//! Types related to DB patches: `PatchSet` and `WorkingPatchSet`. + +use rayon::prelude::*; + +use std::collections::HashMap; + +use crate::{ + hasher::{HashTree, HashingStats}, + storage::proofs::SUBTREE_COUNT, + types::{ + ChildRef, InternalNode, Manifest, Nibbles, NibblesBytes, Node, NodeKey, Root, ValueHash, + }, +}; + +/// Raw set of database changes. +#[derive(Debug, Default)] +#[cfg_attr(test, derive(Clone))] // Used in tree consistency tests +pub struct PatchSet { + pub(super) manifest: Manifest, + pub(super) roots: HashMap, + pub(super) nodes_by_version: HashMap>, +} + +impl PatchSet { + pub(crate) fn from_manifest(manifest: Manifest) -> Self { + Self { + manifest, + roots: HashMap::new(), + nodes_by_version: HashMap::new(), + } + } + + pub(super) fn for_empty_root(version: u64, hasher: &dyn HashTree) -> Self { + Self::new(version, hasher, Root::Empty, HashMap::new()) + } + + pub(super) fn new( + version: u64, + hasher: &dyn HashTree, + root: Root, + mut nodes: HashMap, + ) -> Self { + nodes.shrink_to_fit(); // We never insert into `nodes` later + Self { + manifest: Manifest::new(version + 1, hasher), + roots: HashMap::from_iter([(version, root)]), + nodes_by_version: HashMap::from_iter([(version, nodes)]), + } + } + + pub(super) fn is_responsible_for_version(&self, version: u64) -> bool { + version >= self.manifest.version_count // this patch truncates `version` + || self.roots.contains_key(&version) + } +} + +#[cfg(test)] // extensions to test tree consistency +impl PatchSet { + pub(crate) fn manifest_mut(&mut self) -> &mut Manifest { + &mut self.manifest + } + + pub(crate) fn roots_mut(&mut self) -> &mut HashMap { + &mut self.roots + } + + pub(crate) fn nodes_mut(&mut self) -> impl Iterator + '_ { + self.nodes_by_version.values_mut().flatten() + } + + pub(crate) fn remove_node(&mut self, key: &NodeKey) { + let nodes = self.nodes_by_version.get_mut(&key.version).unwrap(); + nodes.remove(key); + } +} + +/// Mutable version of [`PatchSet`] where we insert all changed nodes when updating +/// a Merkle tree. +#[derive(Debug)] +pub(super) struct WorkingPatchSet { + version: u64, + // Group changes by `nibble_count` (which is linearly tied to the tree depth: + // `depth == nibble_count * 4`) so that we can compute hashes for all changed nodes + // in a single traversal in `Self::finalize()`. + changes_by_nibble_count: Vec>, +} + +impl WorkingPatchSet { + pub fn new(version: u64, root: Root) -> Self { + let changes_by_nibble_count = match root { + Root::Filled { node, .. } => { + let root_level = [(*Nibbles::EMPTY.bytes(), node)]; + vec![HashMap::from_iter(root_level)] + } + Root::Empty => Vec::new(), + }; + Self { + version, + changes_by_nibble_count, + } + } + + pub fn version(&self) -> u64 { + self.version + } + + pub fn get(&self, nibbles: &Nibbles) -> Option<&Node> { + self.changes_by_nibble_count + .get(nibbles.nibble_count())? + .get(nibbles.bytes()) + } + + pub fn insert(&mut self, key: Nibbles, node: Node) -> &mut Node { + if key.nibble_count() >= self.changes_by_nibble_count.len() { + self.changes_by_nibble_count + .resize_with(key.nibble_count() + 1, HashMap::new); + } + + let level = &mut self.changes_by_nibble_count[key.nibble_count()]; + level.insert(*key.bytes(), node); + level.get_mut(key.bytes()).unwrap() + } + + pub fn get_mut(&mut self, key: &Nibbles) -> Option<&mut Node> { + let level = self.changes_by_nibble_count.get_mut(key.nibble_count())?; + level.get_mut(key.bytes()) + } + + pub fn child_ref_mut(&mut self, key: &Nibbles, child_nibble: u8) -> Option<&mut ChildRef> { + let Node::Internal(parent) = self.get_mut(key)? else { + return None; + }; + parent.child_ref_mut(child_nibble) + } + + pub fn push_level(&mut self, level: HashMap) { + self.changes_by_nibble_count.push(level); + } + + /// Ensures that the root node in the patch set, if it exists, is an internal node. Returns + /// a copy of the root node. + pub fn ensure_internal_root_node(&mut self) -> InternalNode { + match self.get(&Nibbles::EMPTY) { + Some(Node::Internal(node)) => node.clone(), + Some(Node::Leaf(leaf)) => { + let leaf = *leaf; + let first_nibble = Nibbles::nibble(&leaf.full_key, 0); + let mut internal_node = InternalNode::default(); + internal_node.insert_child_ref(first_nibble, ChildRef::leaf(self.version)); + self.insert(Nibbles::EMPTY, internal_node.clone().into()); + self.insert(Nibbles::new(&leaf.full_key, 1), leaf.into()); + internal_node + } + None => { + let internal_node = InternalNode::default(); + self.insert(Nibbles::EMPTY, internal_node.clone().into()); + internal_node + } + } + } + + /// Splits this patch set by the first nibble of the contained keys. + pub fn split(self) -> [Self; SUBTREE_COUNT] { + let mut parts = [(); SUBTREE_COUNT].map(|_| Self { + version: self.version, + changes_by_nibble_count: vec![HashMap::new(); self.changes_by_nibble_count.len()], + }); + + let levels = self.changes_by_nibble_count.into_iter().enumerate(); + for (nibble_count, level) in levels { + if nibble_count == 0 { + // Copy the root node to all parts. + for part in &mut parts { + part.changes_by_nibble_count[0] = level.clone(); + } + } else { + for (nibbles, node) in level { + let first_nibble = nibbles[0] >> 4; + let part = &mut parts[first_nibble as usize]; + part.changes_by_nibble_count[nibble_count].insert(nibbles, node); + } + } + } + parts + } + + pub fn merge(&mut self, other: Self) { + debug_assert_eq!(self.version, other.version); + + let other_len = other.changes_by_nibble_count.len(); + if self.changes_by_nibble_count.len() < other_len { + self.changes_by_nibble_count + .resize_with(other_len, HashMap::new); + } + + let it = self + .changes_by_nibble_count + .iter_mut() + .zip(other.changes_by_nibble_count) + .skip(1); + // ^ Do not overwrite the root node; it needs to be dealt with separately anyway + for (target_level, src_level) in it { + let expected_new_len = target_level.len() + src_level.len(); + target_level.extend(src_level); + debug_assert_eq!( + target_level.len(), + expected_new_len, + "Cannot merge `WorkingPatchSet`s with intersecting changes" + ); + } + } + + /// Computes hashes and serializes this changeset. + pub fn finalize( + self, + leaf_count: u64, + hasher: &dyn HashTree, + ) -> (ValueHash, PatchSet, HashingStats) { + let stats = HashingStats::default(); + + let mut changes_by_nibble_count = self.changes_by_nibble_count; + if changes_by_nibble_count.is_empty() { + // The tree is empty and there is no root present. + let patch = PatchSet::for_empty_root(self.version, hasher); + return (hasher.empty_tree_hash(), patch, stats); + } + let len = changes_by_nibble_count.iter().map(HashMap::len).sum(); + let mut patched_nodes = HashMap::with_capacity(len); + + // Compute hashes for the changed nodes with decreasing nibble count (i.e., topologically + // sorted) and store the computed hash in the parent nodes. + while let Some(level_changes) = changes_by_nibble_count.pop() { + let nibble_count = changes_by_nibble_count.len(); + let tree_level = nibble_count * 4; + // `into_par_iter()` below uses `rayon` to parallelize hash computations. + let hashed_nodes: Vec<_> = level_changes + .into_par_iter() + .map_init( + || hasher.with_stats(&stats), + |hasher, (nibbles, node)| { + let nibbles = Nibbles::from_parts(nibbles, nibble_count); + (nibbles, node.hash(hasher, tree_level), node) + }, + ) + .collect(); + + for (nibbles, node_hash, node) in hashed_nodes { + if let Some(upper_level_changes) = changes_by_nibble_count.last_mut() { + let (parent_nibbles, last_nibble) = nibbles.split_last().unwrap(); + let parent = upper_level_changes.get_mut(parent_nibbles.bytes()); + let Some(Node::Internal(parent)) = parent else { + unreachable!("Node parent must be an internal node"); + }; + // ^ `unwrap()`s are safe by construction: the parent of any changed node + // is an `InternalNode` that must be in the change set as well. + let self_ref = parent.child_ref_mut(last_nibble).unwrap(); + // ^ `unwrap()` is safe by construction: the parent node must reference + // the currently considered child. + self_ref.hash = node_hash; + } else { + // We're at the root node level. + let root = Root::new(leaf_count, node); + let patch = PatchSet::new(self.version, hasher, root, patched_nodes); + return (node_hash, patch, stats); + } + + patched_nodes.insert(nibbles.with_version(self.version), node); + } + } + unreachable!("We should have returned when the root node was encountered above"); + } + + pub fn take_root(&mut self) -> Option { + let root_level = self.changes_by_nibble_count.get_mut(0)?; + root_level.remove(Nibbles::EMPTY.bytes()) + } + + pub fn finalize_without_hashing(mut self, leaf_count: u64, hasher: &dyn HashTree) -> PatchSet { + let Some(root) = self.take_root() else { + return PatchSet::for_empty_root(self.version, hasher); + }; + let root = Root::new(leaf_count, root); + + let levels = self.changes_by_nibble_count.drain(1..); + let nodes = levels.enumerate().flat_map(|(i, level)| { + let nibble_count = i + 1; + level.into_iter().map(move |(nibbles, node)| { + let nibbles = Nibbles::from_parts(nibbles, nibble_count); + (nibbles.with_version(self.version), node) + }) + }); + PatchSet::new(self.version, hasher, root, nodes.collect()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::{Key, LeafNode}; + + fn patch_len(patch: &WorkingPatchSet) -> usize { + patch.changes_by_nibble_count.iter().map(HashMap::len).sum() + } + + #[test] + fn splitting_patch_set() { + let mut patch = WorkingPatchSet::new(0, Root::Empty); + let node = patch.ensure_internal_root_node(); + assert_eq!(node.child_count(), 0); + + let all_nibbles = (1_u8..=255).map(|i| { + let key = Key::from_little_endian(&[i; 32]); + let nibbles = Nibbles::new(&key, 2 + usize::from(i) % 4); + // ^ We need nibble count at least 2 for all `nibbles` to be distinct. + let leaf = LeafNode::new(key, ValueHash::zero(), i.into()); + patch.insert(nibbles, leaf.into()); + nibbles + }); + let all_nibbles: Vec<_> = all_nibbles.collect(); + assert_eq!(patch_len(&patch), all_nibbles.len() + 1); // + root node + let parts = patch.split(); + + for (i, part) in parts.iter().enumerate() { + let part_len = patch_len(part); + assert!( + (15..=17).contains(&part_len), + "unexpected {i}th part length: {part_len}" + ); + + let first_nibble = u8::try_from(i).unwrap(); + let levels = part.changes_by_nibble_count.iter().skip(1); + for nibbles in levels.flat_map(HashMap::keys) { + assert_eq!(nibbles[0] >> 4, first_nibble); + } + } + + let merged = parts + .into_iter() + .reduce(|mut this, other| { + this.merge(other); + this + }) + .unwrap(); + for nibbles in &all_nibbles { + assert!(merged.get(nibbles).is_some()); + } + assert_eq!(patch_len(&merged), all_nibbles.len() + 1); + } +} diff --git a/core/lib/merkle_tree2/src/storage/proofs.rs b/core/lib/merkle_tree2/src/storage/proofs.rs new file mode 100644 index 000000000000..011cbc563d0a --- /dev/null +++ b/core/lib/merkle_tree2/src/storage/proofs.rs @@ -0,0 +1,620 @@ +//! Logic specific to the full tree operation mode, in which it produces Merkle proofs +//! for each operation. +//! +//! # How it works +//! +//! As with the proof-less [`Self::extend()`], we start by loading all relevant nodes +//! from the database and determining the parent node position for each key in `instructions`. +//! +//! A naive implementation would then apply `instructions` one by one, creating a proof +//! for each of instructions. This, however, is quite slow (determined mainly by the hash +//! operations that need to be performed to create Merkle proofs). So, we parallelize +//! the process by splitting the instructions by the first key nibble (i.e., into 16 key groups) +//! and working on each group in parallel. Each group of instructions is *mostly* independent, +//! since it's mostly applied to a separate subtree of the original Merkle tree +//! with root at level 4 (= 1 nibble). Thus, the patch sets and Merkle proofs +//! produced by each group are mostly disjoint; they intersect only at the root node level. +//! +//! ## Computing leaf indices +//! +//! We need to determine leaf indices for all write instructions. Indices potentially depend +//! on the entire list of `instructions`, so we should determine leaf indices before +//! parallelization. Otherwise, we'd need to sync between parallelized tasks, which defeats +//! the purpose of parallelization. +//! +//! We precompute indices as a separate step using the following observations: +//! +//! - If a leaf is present in the tree *before* `instructions` are applied, its index +//! can be obtained from the node ancestors loaded on the first step of the process. +//! - Otherwise, a leaf may have been added by a previous instruction for the same key. +//! Since we already need [`SortedKeys`] to efficiently load ancestors, it's easy +//! to determine such pairs of instructions. +//! - Otherwise, we have a first write, and the leaf index is defined as the current leaf +//! count. +//! +//! In summary, we can determine leaf indices for all write `instructions` in linear time +//! and without synchronization required during the parallel steps of the process. +//! +//! ## Merging Merkle proofs +//! +//! The proofs produced by different groups only intersect at levels 0..4. This can be dealt with +//! as follows: +//! +//! - Produce partial Merkle proofs for levels 4.. (rather than full proofs for levels 0..) +//! when working in groups. The root hash for each of the proofs will actually be the +//! *subtree* root hash, and Merkle proofs would have at most 252 `ValueHash`es. +//! - Recombine the proofs in the original `instructions` order. For each write instruction, +//! update the corresponding child reference hash in the root node to equal +//! the (subtree) root hash from the proof, and recompute the root hash of the root node. +//! Then, extend the Merkle proof with upper 4 `ValueHash`es based on the root node. +//! +//! This approach only works if the root is an [`InternalNode`]. Fortunately, we can always +//! transform the root to an `InternalNode` and then transform it back if necessary. +//! +//! ## Merging patch sets +//! +//! `WorkingPatchSet`s produced by different groups are disjoint except for the root node. +//! We ignore the root node in these sets anyway; the final root node is produced by applying +//! logs with proofs as described above. Thus, we can merge patch sets just by merging +//! their nibbles–node entries. + +use rayon::prelude::*; + +use std::time::Instant; + +use crate::{ + hasher::{HashTree, HasherWithStats, HashingStats, MerklePath}, + storage::{ + Database, NewLeafData, PatchSet, SortedKeys, Storage, StorageMetrics, TraverseOutcome, + TreeUpdater, + }, + types::{ + BlockOutputWithProofs, InternalNode, Key, Nibbles, Node, TreeInstruction, TreeLogEntry, + TreeLogEntryWithProof, ValueHash, + }, + utils::{find_diverging_bit, increment_counter, merge_by_index}, +}; + +/// Number of subtrees used for parallel computations. +pub(super) const SUBTREE_COUNT: usize = 16; +/// 0-based tree level at which subtree roots are located. +const SUBTREE_ROOT_LEVEL: usize = 4; + +impl TreeUpdater { + fn extend_precomputed( + &mut self, + hasher: &mut HasherWithStats<'_>, + first_nibble: u8, + instructions: Vec, + ) -> Vec<(usize, TreeLogEntryWithProof)> { + let mut logs = Vec::with_capacity(instructions.len()); + let root_nibbles = Nibbles::single(first_nibble); + let mut root_hash = match self.patch_set.get(&root_nibbles) { + Some(node) => node.hash(hasher, SUBTREE_ROOT_LEVEL), + None => hasher.empty_subtree_hash(SUBTREE_ROOT_LEVEL), + }; + + for instruction in instructions { + let InstructionWithPrecomputes { + index, + key, + instruction, + parent_nibbles, + leaf_index, + } = instruction; + + let log = match instruction { + TreeInstruction::Write(value_hash) => { + let (log, leaf_data) = + self.insert(key, value_hash, &parent_nibbles, || leaf_index); + let (new_root_hash, merkle_path) = self.update_node_hashes(hasher, &leaf_data); + root_hash = new_root_hash; + TreeLogEntryWithProof { + base: log, + merkle_path, + root_hash, + } + } + TreeInstruction::Read => { + let (log, merkle_path) = self.prove(hasher, key, &parent_nibbles); + TreeLogEntryWithProof { + base: log, + merkle_path, + root_hash, + } + } + }; + logs.push((index, log)); + } + logs + } + + /// Updates hashes for the leaves inserted or updated in the tree together with all ancestor + /// internal nodes. Returns the new root hash of the tree and the Merkle path + /// for the inserted / updated key. + fn update_node_hashes( + &mut self, + hasher: &mut HasherWithStats<'_>, + leaf_data: &NewLeafData, + ) -> (ValueHash, MerklePath) { + if let Some((nibbles, leaf)) = leaf_data.adjacent_leaf { + let (parent_nibbles, last_nibble) = nibbles.split_last().unwrap(); + let leaf_level = nibbles.nibble_count() * 4; + debug_assert!(leaf_level >= SUBTREE_ROOT_LEVEL); + // ^ Because we've ensured an internal root node, all inserted leaves have at least + // 1 nibble. + let node_hash = leaf.hash(hasher, leaf_level); + self.patch_set + .child_ref_mut(&parent_nibbles, last_nibble) + .unwrap() + .hash = node_hash; + // ^ This only works because the parent node has been just created; in the general case, + // mutating `ChildRef.hash` invalidates `InternalNodeCache`. + } + + let mut nibbles = leaf_data.nibbles; + let leaf_level = nibbles.nibble_count() * 4; + debug_assert!(leaf_level >= SUBTREE_ROOT_LEVEL); + let mut node_hash = leaf_data.leaf.hash(hasher, leaf_level); + let mut merkle_path = MerklePath::new(leaf_level); + while let Some((parent_nibbles, last_nibble)) = nibbles.split_last() { + if parent_nibbles.nibble_count() == 0 { + break; + } + + let parent = self.patch_set.get_mut(&parent_nibbles); + let Some(Node::Internal(parent)) = parent else { unreachable!() }; + let parent_level = parent_nibbles.nibble_count() * 4; + let mut updater = parent.updater(hasher, parent_level, last_nibble); + node_hash = updater.update_child_hash(node_hash); + updater.extend_merkle_path(&mut merkle_path); + nibbles = parent_nibbles; + } + + (node_hash, merkle_path) + } + + /// Proves the existence or absence of a key in the tree. + pub(super) fn prove( + &mut self, + hasher: &mut HasherWithStats<'_>, + key: Key, + parent_nibbles: &Nibbles, + ) -> (TreeLogEntry, MerklePath) { + let traverse_outcome = self.traverse(key, parent_nibbles); + let (operation, merkle_path) = match traverse_outcome { + TraverseOutcome::MissingChild(_) => (TreeLogEntry::ReadMissingKey, None), + TraverseOutcome::LeafMatch(_, leaf) => { + let log = TreeLogEntry::read(leaf.value_hash, leaf.leaf_index); + (log, None) + } + TraverseOutcome::LeafMismatch(nibbles, leaf) => { + // Find the level at which `leaf.full_key` and `key` diverge. + // Note the addition of 1; e.g., if the keys differ at 0th bit, they + // differ at level 1 of the tree. + let diverging_level = find_diverging_bit(key, leaf.full_key) + 1; + let nibble_count = nibbles.nibble_count(); + debug_assert!(diverging_level > 4 * nibble_count); + let mut path = MerklePath::new(diverging_level); + // Find the hash of the existing `leaf` at the level, and include it + // as the first hash on the Merkle path. + let adjacent_hash = leaf.hash(hasher, diverging_level); + path.push(hasher, Some(adjacent_hash)); + // Fill the path with empty hashes until we've reached the leaf level. + for _ in (4 * nibble_count + 1)..diverging_level { + path.push(hasher, None); + } + (TreeLogEntry::ReadMissingKey, Some(path)) + } + }; + + if matches!(operation, TreeLogEntry::ReadMissingKey) { + self.metrics.missing_key_reads += 1; + } else { + self.metrics.key_reads += 1; + } + + let mut nibbles = traverse_outcome.position(); + let leaf_level = nibbles.nibble_count() * 4; + debug_assert!(leaf_level >= SUBTREE_ROOT_LEVEL); + // ^ Because we've ensured an internal root node, all found positions have at least + // 1 nibble. + + let mut merkle_path = merkle_path.unwrap_or_else(|| MerklePath::new(leaf_level)); + while let Some((parent_nibbles, last_nibble)) = nibbles.split_last() { + if parent_nibbles.nibble_count() == 0 { + break; + } + + let parent = self.patch_set.get_mut(&parent_nibbles); + let Some(Node::Internal(parent)) = parent else { unreachable!() }; + let parent_level = parent_nibbles.nibble_count() * 4; + parent + .updater(hasher, parent_level, last_nibble) + .extend_merkle_path(&mut merkle_path); + nibbles = parent_nibbles; + } + (operation, merkle_path) + } + + fn split(self) -> [Self; SUBTREE_COUNT] { + self.patch_set.split().map(|patch_set| Self { + metrics: StorageMetrics::default(), + patch_set, + }) + } + + fn merge(mut self, other: Self) -> Self { + self.patch_set.merge(other.patch_set); + self.metrics += other.metrics; + self + } + + /// Sequentially applies `logs` produced by parallelized tree traversal updating the root node + /// using log data. Finalizes Merkle paths in each log. + fn finalize_logs( + &mut self, + hasher: &mut HasherWithStats<'_>, + mut root: InternalNode, + logs: Vec<(usize, TreeLogEntryWithProof)>, + ) -> Vec { + let version = self.patch_set.version(); + let mut root_hash = root.hash(hasher, 0); + + // Check the kind of each of subtrees. This is used later to ensure the correct + // `ChildRef.is_leaf` values in the root node. + let mut is_leaf_by_subtree = [false; SUBTREE_COUNT]; + for (subtree_idx, is_leaf) in is_leaf_by_subtree.iter_mut().enumerate() { + let nibble = u8::try_from(subtree_idx).unwrap(); + let child = self.patch_set.get(&Nibbles::single(nibble)); + *is_leaf = matches!(child, Some(Node::Leaf(_))); + } + + let logs = logs.into_iter().map(|(subtree_idx, mut log)| { + let nibble = u8::try_from(subtree_idx).unwrap(); + let mut updater = root.updater(hasher, 0, nibble); + if !log.base.is_read() { + updater.ensure_child_ref(version, is_leaf_by_subtree[subtree_idx]); + root_hash = updater.update_child_hash(log.root_hash); + } + updater.extend_merkle_path(&mut log.merkle_path); + + TreeLogEntryWithProof { + base: log.base, + merkle_path: log.merkle_path.into_inner(), + root_hash, + } + }); + let logs = logs.collect(); + + if root.child_count() == 0 { + // We cannot save the empty internal root node because it'll fail deserialization + // checks later. By construction, the patch set is guaranteed to be valid (namely empty) + // after removal. + self.patch_set.take_root(); + } else { + self.set_root_node(root.into()); + } + logs + } +} + +impl<'a, DB: Database + ?Sized> Storage<'a, DB> { + pub fn extend_with_proofs( + mut self, + hasher: &dyn HashTree, + instructions: Vec<(Key, TreeInstruction)>, + ) -> (BlockOutputWithProofs, PatchSet) { + let start = Instant::now(); + let sorted_keys = SortedKeys::new(instructions.iter().map(|(key, _)| *key)); + let parent_nibbles = self.updater.load_ancestors(&sorted_keys, self.db); + metrics::histogram!("merkle_tree.load_nodes", start.elapsed()); + + let leaf_indices = self.compute_leaf_indices(&instructions, sorted_keys, &parent_nibbles); + let instruction_parts = + InstructionWithPrecomputes::split(instructions, parent_nibbles, leaf_indices); + let initial_root = self.updater.patch_set.ensure_internal_root_node(); + let initial_metrics = self.updater.metrics; + let storage_parts = self.updater.split(); + + let hashing_stats = HashingStats::default(); + + let start = Instant::now(); + // `into_par_iter()` below uses `rayon` to parallelize tree traversal and proof generation. + let (storage_parts, logs): (Vec<_>, Vec<_>) = storage_parts + .into_par_iter() + .zip_eq(instruction_parts) + .enumerate() + .map_init( + || hasher.with_stats(&hashing_stats), + |hasher, (i, (mut storage, instructions))| { + let first_nibble = u8::try_from(i).unwrap(); + let logs = storage.extend_precomputed(hasher, first_nibble, instructions); + (storage, logs) + }, + ) + .unzip(); + metrics::histogram!("merkle_tree.extend_patch", start.elapsed()); + + let start = Instant::now(); + self.updater = storage_parts + .into_iter() + .reduce(TreeUpdater::merge) + .unwrap(); + // ^ `unwrap()` is safe: `storage_parts` is non-empty + self.updater.metrics += initial_metrics; + + let logs = merge_by_index(logs); + let mut hasher = hasher.with_stats(&hashing_stats); + let output_with_proofs = self.finalize_with_proofs(&mut hasher, initial_root, logs); + metrics::histogram!("merkle_tree.finalize_patch", start.elapsed()); + drop(hasher); + hashing_stats.report(); + + output_with_proofs + } + + /// Computes leaf indices for all writes in `instructions`. Leaf indices are not used for reads; + /// thus, the corresponding entries are always 0. + fn compute_leaf_indices( + &mut self, + instructions: &[(Key, TreeInstruction)], + mut sorted_keys: SortedKeys, + parent_nibbles: &[Nibbles], + ) -> Vec { + sorted_keys.remove_read_instructions(instructions); + let key_mentions = sorted_keys.key_mentions(instructions.len()); + let patch_set = &self.updater.patch_set; + + let mut leaf_indices = Vec::with_capacity(instructions.len()); + let it = instructions.iter().zip(parent_nibbles).enumerate(); + for (idx, ((key, instruction), nibbles)) in it { + let leaf_index = match (instruction, key_mentions[idx]) { + (TreeInstruction::Read, _) => 0, + // ^ Leaf indices are not used for read instructions. + (TreeInstruction::Write(_), KeyMention::First) => { + let leaf_index = match patch_set.get(nibbles) { + Some(Node::Leaf(leaf)) if leaf.full_key == *key => Some(leaf.leaf_index), + _ => None, + }; + leaf_index.unwrap_or_else(|| increment_counter(&mut self.leaf_count)) + } + (TreeInstruction::Write(_), KeyMention::SameAs(prev_idx)) => leaf_indices[prev_idx], + }; + leaf_indices.push(leaf_index); + } + leaf_indices + } + + #[allow(clippy::cast_precision_loss)] // unlikely to happen given practical leaf counts + fn finalize_with_proofs( + mut self, + hasher: &mut HasherWithStats<'_>, + root: InternalNode, + logs: Vec<(usize, TreeLogEntryWithProof)>, + ) -> (BlockOutputWithProofs, PatchSet) { + let logs = self.updater.finalize_logs(hasher, root, logs); + self.updater.metrics.report(); + + let patch = self + .updater + .patch_set + .finalize_without_hashing(self.leaf_count, hasher.as_ref()); + let block_output = BlockOutputWithProofs { + logs, + leaf_count: self.leaf_count, + }; + metrics::gauge!("merkle_tree.leaf_count", self.leaf_count as f64); + (block_output, patch) + } +} + +/// Mention of a key in a block: either the first mention, or the same mention as the specified +/// 0-based index in the block. +#[derive(Debug, Clone, Copy)] +enum KeyMention { + First, + SameAs(usize), +} + +impl SortedKeys { + fn remove_read_instructions(&mut self, instructions: &[(Key, TreeInstruction)]) { + debug_assert_eq!(instructions.len(), self.0.len()); + + self.0.retain(|(idx, key)| { + let (key_for_instruction, instruction) = &instructions[*idx]; + debug_assert_eq!(key_for_instruction, key); + matches!(instruction, TreeInstruction::Write(_)) + }); + } + + /// Determines for the original sequence of `Key`s whether a particular key mention + /// is the first one, or it follows after another mention. + fn key_mentions(&self, original_len: usize) -> Vec { + debug_assert!(original_len >= self.0.len()); + + let mut flags = vec![KeyMention::First; original_len]; + let [(mut first_key_mention, mut prev_key), tail @ ..] = self.0.as_slice() else { + return flags; + }; + + // Note that `SameAs(_)` doesn't necessarily reference the first mention of a key, + // just one with a lesser index. This is OK for our purposes. + for &(idx, key) in tail { + if prev_key == key { + if idx > first_key_mention { + flags[idx] = KeyMention::SameAs(first_key_mention); + } else { + debug_assert!(idx < first_key_mention); // all indices should be unique + flags[first_key_mention] = KeyMention::SameAs(idx); + first_key_mention = idx; + } + } else { + prev_key = key; + first_key_mention = idx; + } + } + flags + } +} + +/// [`TreeInstruction`] together with precomputed data necessary to efficiently parallelize +/// Merkle tree traversal. +#[derive(Debug)] +struct InstructionWithPrecomputes { + /// 0-based index of the instruction. + index: usize, + /// Key read / written by the instruction. + key: Key, + instruction: TreeInstruction, + /// Nibbles for the parent node computed by [`Storage::load_ancestors()`]. + parent_nibbles: Nibbles, + /// Leaf index for the operation computed by [`Storage::compute_leaf_indices()`]. + /// Always 0 for reads. + leaf_index: u64, +} + +impl InstructionWithPrecomputes { + /// Creates groups of instructions to be used during parallelized tree traversal. + fn split( + instructions: Vec<(Key, TreeInstruction)>, + parent_nibbles: Vec, + leaf_indices: Vec, + ) -> [Vec; SUBTREE_COUNT] { + const EMPTY_VEC: Vec = Vec::new(); + // ^ Need to extract this to a constant to be usable as an array initializer. + + let mut parts = [EMPTY_VEC; SUBTREE_COUNT]; + let it = instructions + .into_iter() + .zip(parent_nibbles) + .zip(leaf_indices); + for (index, (((key, instruction), parent_nibbles), leaf_index)) in it.enumerate() { + let first_nibble = Nibbles::nibble(&key, 0); + let part = &mut parts[first_nibble as usize]; + part.push(Self { + index, + key, + instruction, + parent_nibbles, + leaf_index, + }); + } + parts + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::*; + use crate::types::Root; + + const HASH: ValueHash = ValueHash::zero(); + + fn byte_key(byte: u8) -> Key { + Key::from_little_endian(&[byte; 32]) + } + + #[test] + fn sorting_keys() { + let keys = [4, 1, 5, 2, 3].map(byte_key); + let sorted_keys = SortedKeys::new(keys.into_iter()); + assert_eq!(sorted_keys.0, [1, 3, 4, 0, 2].map(|i| (i, keys[i]))); + } + + #[test] + fn computing_key_mentions() { + let keys = [4, 1, 3, 4, 3, 3].map(byte_key); + let sorted_keys = SortedKeys::new(keys.into_iter()); + let mentions = sorted_keys.key_mentions(6); + + assert_matches!( + mentions.as_slice(), + [ + KeyMention::First, KeyMention::First, KeyMention::First, + KeyMention::SameAs(0), KeyMention::SameAs(2), KeyMention::SameAs(i) + ] if *i == 2 || *i == 4 + ); + } + + #[test] + fn computing_leaf_indices() { + let db = prepare_db(); + let (instructions, expected_indices) = get_instructions_and_leaf_indices(); + let mut storage = Storage::new(&db, 1); + let sorted_keys = SortedKeys::new(instructions.iter().map(|(key, _)| *key)); + let parent_nibbles = storage.updater.load_ancestors(&sorted_keys, &db); + + let leaf_indices = + storage.compute_leaf_indices(&instructions, sorted_keys, &parent_nibbles); + assert_eq!(leaf_indices, expected_indices); + } + + fn prepare_db() -> PatchSet { + let mut db = PatchSet::default(); + let (_, patch) = + Storage::new(&db, 0).extend(&(), vec![(byte_key(2), HASH), (byte_key(1), HASH)]); + db.apply_patch(patch); + db + } + + fn get_instructions_and_leaf_indices() -> (Vec<(Key, TreeInstruction)>, Vec) { + let instructions_and_indices = vec![ + (byte_key(3), TreeInstruction::Read, 0), + (byte_key(1), TreeInstruction::Write(HASH), 2), + (byte_key(2), TreeInstruction::Read, 0), + (byte_key(3), TreeInstruction::Write(HASH), 3), + (byte_key(1), TreeInstruction::Read, 0), + (byte_key(3), TreeInstruction::Write(HASH), 3), + (byte_key(2), TreeInstruction::Write(HASH), 1), + (byte_key(0xc0), TreeInstruction::Write(HASH), 4), + (byte_key(2), TreeInstruction::Write(HASH), 1), + ]; + instructions_and_indices + .into_iter() + .map(|(key, instr, idx)| ((key, instr), idx)) + .unzip() + } + + #[test] + fn extending_storage_with_proofs() { + let db = prepare_db(); + let (instructions, expected_indices) = get_instructions_and_leaf_indices(); + let storage = Storage::new(&db, 1); + let (block_output, _) = storage.extend_with_proofs(&(), instructions); + assert_eq!(block_output.leaf_count, 4); + + assert_eq!(block_output.logs.len(), expected_indices.len()); + for (expected_idx, log) in expected_indices.into_iter().zip(&block_output.logs) { + match log.base { + TreeLogEntry::Inserted { leaf_index } + | TreeLogEntry::Updated { leaf_index, .. } => { + assert_eq!(leaf_index, expected_idx); + } + _ => {} + } + } + } + + #[test] + fn proofs_for_empty_storage() { + let db = PatchSet::default(); + let storage = Storage::new(&db, 0); + let instructions = vec![ + (byte_key(1), TreeInstruction::Read), + (byte_key(2), TreeInstruction::Read), + (byte_key(0xff), TreeInstruction::Read), + ]; + let (block_output, patch) = storage.extend_with_proofs(&(), instructions); + assert_eq!(block_output.leaf_count, 0); + let all_misses = block_output + .logs + .iter() + .all(|log| matches!(log.base, TreeLogEntry::ReadMissingKey)); + assert!(all_misses); + + assert_matches!(patch.roots[&0], Root::Empty); + } +} diff --git a/core/lib/merkle_tree2/src/storage/serialization.rs b/core/lib/merkle_tree2/src/storage/serialization.rs new file mode 100644 index 000000000000..3751f619124f --- /dev/null +++ b/core/lib/merkle_tree2/src/storage/serialization.rs @@ -0,0 +1,416 @@ +//! Serialization of node types in the database. + +use std::str; + +use crate::{ + errors::{DeserializeError, DeserializeErrorKind, ErrorContext}, + types::{ + ChildRef, InternalNode, Key, LeafNode, Manifest, Node, Root, TreeTags, ValueHash, + HASH_SIZE, KEY_SIZE, + }, +}; + +/// Estimate for the byte size of LEB128-encoded `u64` values. 3 bytes fits values +/// up to `2 ** (3 * 7) = 2_097_152` (exclusive). +const LEB128_SIZE_ESTIMATE: usize = 3; + +impl LeafNode { + pub(super) fn deserialize(bytes: &[u8]) -> Result { + if bytes.len() < KEY_SIZE + HASH_SIZE { + return Err(DeserializeErrorKind::UnexpectedEof.into()); + } + let full_key = Key::from_big_endian(&bytes[..KEY_SIZE]); + let value_hash = ValueHash::from_slice(&bytes[KEY_SIZE..(KEY_SIZE + HASH_SIZE)]); + + let mut bytes = &bytes[(KEY_SIZE + HASH_SIZE)..]; + let leaf_index = leb128::read::unsigned(&mut bytes).map_err(|err| { + DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafIndex) + })?; + Ok(Self::new(full_key, value_hash, leaf_index)) + } + + pub(super) fn serialize(&self, buffer: &mut Vec) { + buffer.reserve(KEY_SIZE + HASH_SIZE + LEB128_SIZE_ESTIMATE); + let mut key_bytes = [0_u8; KEY_SIZE]; + self.full_key.to_big_endian(&mut key_bytes); + buffer.extend_from_slice(&key_bytes); + buffer.extend_from_slice(self.value_hash.as_ref()); + leb128::write::unsigned(buffer, self.leaf_index).unwrap(); + } +} + +#[derive(Debug, Clone, Copy)] +#[repr(u32)] +enum ChildKind { + None = 0, + Internal = 1, + Leaf = 2, +} + +impl ChildKind { + const MASK: u32 = 3; + + fn deserialize(bitmap_chunk: u32) -> Result { + match bitmap_chunk { + 0 => Ok(Self::None), + 1 => Ok(Self::Internal), + 2 => Ok(Self::Leaf), + _ => Err(DeserializeErrorKind::InvalidChildKind.into()), + } + } +} + +impl ChildRef { + /// Estimated capacity to serialize a `ChildRef`. + const ESTIMATED_CAPACITY: usize = LEB128_SIZE_ESTIMATE + HASH_SIZE; + + fn deserialize(buffer: &mut &[u8], is_leaf: bool) -> Result { + if buffer.len() < HASH_SIZE { + let err = DeserializeErrorKind::UnexpectedEof; + return Err(err.with_context(ErrorContext::ChildRefHash)); + } + let (hash, rest) = buffer.split_at(HASH_SIZE); + let hash = ValueHash::from_slice(hash); + + *buffer = rest; + let version = leb128::read::unsigned(buffer) + .map_err(|err| DeserializeErrorKind::Leb128(err).with_context(ErrorContext::Version))?; + + Ok(Self { + hash, + version, + is_leaf, + }) + } + + fn serialize(&self, buffer: &mut Vec) { + buffer.extend_from_slice(self.hash.as_bytes()); + leb128::write::unsigned(buffer, self.version).unwrap(); + // ^ `unwrap()` is safe; writing to a `Vec` always succeeds + + // `self.is_leaf` is not serialized here, but rather in `InternalNode::serialize()` + } + + fn kind(&self) -> ChildKind { + if self.is_leaf { + ChildKind::Leaf + } else { + ChildKind::Internal + } + } +} + +impl InternalNode { + pub(super) fn deserialize(bytes: &[u8]) -> Result { + if bytes.len() < 4 { + let err = DeserializeErrorKind::UnexpectedEof; + return Err(err.with_context(ErrorContext::ChildrenMask)); + } + let (bitmap, mut bytes) = bytes.split_at(4); + let mut bitmap = u32::from_le_bytes([bitmap[0], bitmap[1], bitmap[2], bitmap[3]]); + if bitmap == 0 { + return Err(DeserializeErrorKind::EmptyInternalNode.into()); + } + + // This works because both non-empty `ChildKind`s have exactly one bit set + // in their binary representation. + let child_count = bitmap.count_ones(); + let mut this = Self::with_capacity(child_count as usize); + for i in 0..Self::CHILD_COUNT { + match ChildKind::deserialize(bitmap & ChildKind::MASK)? { + ChildKind::None => { /* skip */ } + ChildKind::Internal => { + let child_ref = ChildRef::deserialize(&mut bytes, false)?; + this.insert_child_ref(i, child_ref); + } + ChildKind::Leaf => { + let child_ref = ChildRef::deserialize(&mut bytes, true)?; + this.insert_child_ref(i, child_ref); + } + } + bitmap >>= 2; + } + Ok(this) + } + + pub(super) fn serialize(&self, buffer: &mut Vec) { + // Creates a bitmap specifying children existence and type (internal node or leaf). + // Each child occupies 2 bits in the bitmap (i.e., the entire bitmap is 32 bits), + // with ordering from least significant bits to most significant ones. + // `0b00` means no child, while bitmap chunks for existing children are determined by + // `ChildKind`. + let mut bitmap = 0_u32; + let mut child_count = 0; + for (i, child_ref) in self.children() { + let offset = 2 * u32::from(i); + bitmap |= (child_ref.kind() as u32) << offset; + child_count += 1; + } + + let additional_capacity = 4 + ChildRef::ESTIMATED_CAPACITY * child_count; + buffer.reserve(additional_capacity); + buffer.extend_from_slice(&bitmap.to_le_bytes()); + + for child_ref in self.child_refs() { + child_ref.serialize(buffer); + } + } +} + +impl Root { + pub(super) fn deserialize(mut bytes: &[u8]) -> Result { + let leaf_count = leb128::read::unsigned(&mut bytes).map_err(|err| { + DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafCount) + })?; + let node = match leaf_count { + 0 => return Ok(Self::Empty), + 1 => Node::Leaf(LeafNode::deserialize(bytes)?), + _ => Node::Internal(InternalNode::deserialize(bytes)?), + }; + Ok(Self::new(leaf_count, node)) + } + + pub(super) fn serialize(&self, buffer: &mut Vec) { + match self { + Self::Empty => { + leb128::write::unsigned(buffer, 0 /* leaf_count */).unwrap(); + } + Self::Filled { leaf_count, node } => { + leb128::write::unsigned(buffer, (*leaf_count).into()).unwrap(); + node.serialize(buffer); + } + } + } +} + +impl Node { + pub(super) fn serialize(&self, buffer: &mut Vec) { + match self { + Self::Internal(node) => node.serialize(buffer), + Self::Leaf(leaf) => leaf.serialize(buffer), + } + } +} + +impl TreeTags { + /// Tags are serialized as a length-prefixed list of `(&str, &str)` tuples, where each + /// `&str` is length-prefixed as well. All lengths are encoded using LEB128. + fn deserialize(bytes: &mut &[u8]) -> Result { + let tag_count = leb128::read::unsigned(bytes).map_err(DeserializeErrorKind::Leb128)?; + let mut architecture = None; + let mut hasher = None; + let mut depth = None; + + for _ in 0..tag_count { + let key = Self::deserialize_str(bytes)?; + let value = Self::deserialize_str(bytes)?; + match key { + "architecture" => architecture = Some(value.to_owned()), + "hasher" => hasher = Some(value.to_owned()), + "depth" => { + let parsed = value.parse::().map_err(|err| { + DeserializeErrorKind::MalformedTag { + name: "depth", + err: err.into(), + } + })?; + depth = Some(parsed); + } + _ => return Err(DeserializeErrorKind::UnknownTag(key.to_owned()).into()), + } + } + Ok(Self { + architecture: architecture.ok_or(DeserializeErrorKind::MissingTag("architecture"))?, + hasher: hasher.ok_or(DeserializeErrorKind::MissingTag("hasher"))?, + depth: depth.ok_or(DeserializeErrorKind::MissingTag("depth"))?, + }) + } + + fn deserialize_str<'a>(bytes: &mut &'a [u8]) -> Result<&'a str, DeserializeErrorKind> { + let str_len = leb128::read::unsigned(bytes).map_err(DeserializeErrorKind::Leb128)?; + let str_len = usize::try_from(str_len).map_err(|_| DeserializeErrorKind::UnexpectedEof)?; + + if bytes.len() < str_len { + return Err(DeserializeErrorKind::UnexpectedEof); + } + let (s, rest) = bytes.split_at(str_len); + *bytes = rest; + str::from_utf8(s).map_err(DeserializeErrorKind::Utf8) + } + + fn serialize_str(bytes: &mut Vec, s: &str) { + leb128::write::unsigned(bytes, s.len() as u64).unwrap(); + bytes.extend_from_slice(s.as_bytes()); + } + + fn serialize(&self, buffer: &mut Vec) { + leb128::write::unsigned(buffer, 3).unwrap(); + Self::serialize_str(buffer, "architecture"); + Self::serialize_str(buffer, &self.architecture); + Self::serialize_str(buffer, "depth"); + Self::serialize_str(buffer, &self.depth.to_string()); + Self::serialize_str(buffer, "hasher"); + Self::serialize_str(buffer, &self.hasher); + } +} + +impl Manifest { + pub(super) fn deserialize(mut bytes: &[u8]) -> Result { + let version_count = + leb128::read::unsigned(&mut bytes).map_err(DeserializeErrorKind::Leb128)?; + let tags = if bytes.is_empty() { + None + } else { + Some(TreeTags::deserialize(&mut bytes)?) + }; + + Ok(Self { + version_count, + tags, + }) + } + + pub(super) fn serialize(&self, buffer: &mut Vec) { + leb128::write::unsigned(buffer, self.version_count).unwrap(); + if let Some(tags) = &self.tags { + tags.serialize(buffer); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use zksync_types::H256; + + #[test] + fn serializing_manifest() { + let manifest = Manifest::new(42, &()); + let mut buffer = vec![]; + manifest.serialize(&mut buffer); + assert_eq!(buffer[0], 42); // version count + assert_eq!(buffer[1], 3); // number of tags + assert_eq!( + buffer[2..], + *b"\x0Carchitecture\x06AR16MT\x05depth\x03256\x06hasher\x08no_op256" + ); + // ^ length-prefixed tag names and values + + let manifest_copy = Manifest::deserialize(&buffer).unwrap(); + assert_eq!(manifest_copy, manifest); + } + + #[test] + fn manifest_serialization_errors() { + let manifest = Manifest::new(42, &()); + let mut buffer = vec![]; + manifest.serialize(&mut buffer); + + // Replace "architecture" -> "Architecture" + let mut mangled_buffer = buffer.clone(); + mangled_buffer[3] = b'A'; + let err = Manifest::deserialize(&mangled_buffer).unwrap_err(); + let err = err.to_string(); + assert!( + err.contains("unknown tag `Architecture` in tree manifest"), + "{err}" + ); + + let mut mangled_buffer = buffer.clone(); + mangled_buffer.truncate(mangled_buffer.len() - 1); + let err = Manifest::deserialize(&mangled_buffer).unwrap_err(); + let err = err.to_string(); + assert!(err.contains("unexpected end of input"), "{err}"); + + // Remove the `hasher` tag. + let mut mangled_buffer = buffer.clone(); + mangled_buffer[1] = 2; // decreased number of tags + let err = Manifest::deserialize(&mangled_buffer).unwrap_err(); + let err = err.to_string(); + assert!( + err.contains("missing required tag `hasher` in tree manifest"), + "{err}" + ); + } + + #[test] + fn serializing_leaf_node() { + let leaf = LeafNode::new(513.into(), H256([4; 32]), 42); + let mut buffer = vec![]; + leaf.serialize(&mut buffer); + assert_eq!(buffer[..30], [0; 30]); // padding for the key + assert_eq!(buffer[30..32], [2, 1]); // lower 2 bytes of the key + assert_eq!(buffer[32..64], [4; 32]); // value hash + assert_eq!(buffer[64], 42); // leaf index + assert_eq!(buffer.len(), 65); + + let leaf_copy = LeafNode::deserialize(&buffer).unwrap(); + assert_eq!(leaf_copy, leaf); + } + + fn create_internal_node() -> InternalNode { + let mut node = InternalNode::default(); + node.insert_child_ref(1, ChildRef::internal(3)); + node.child_ref_mut(1).unwrap().hash = H256([1; 32]); + node.insert_child_ref(0xb, ChildRef::leaf(2)); + node.child_ref_mut(0xb).unwrap().hash = H256([11; 32]); + node + } + + #[test] + fn serializing_internal_node() { + let node = create_internal_node(); + let mut buffer = vec![]; + node.serialize(&mut buffer); + assert_eq!(buffer[..4], [4, 0, 128, 0]); + // ^ bitmap (`4 == ChildKind::Internal << 2`, `128 == ChildKind::Leaf << 6`). + assert_eq!(buffer[4..36], [1; 32]); // hash of the child at 1 + assert_eq!(buffer[36], 3); // version of the child at 1 + assert_eq!(buffer[37..69], [11; 32]); // hash of the child at b + assert_eq!(buffer[69], 2); // version of the child at b + assert_eq!(buffer.len(), 70); + + // Check that the child count estimate works correctly. + let bitmap = u32::from_le_bytes([4, 0, 128, 0]); + let child_count = bitmap.count_ones(); + assert_eq!(child_count, 2); + + let node_copy = InternalNode::deserialize(&buffer).unwrap(); + assert_eq!(node_copy, node); + } + + #[test] + fn serializing_empty_root() { + let root = Root::Empty; + let mut buffer = vec![]; + root.serialize(&mut buffer); + assert_eq!(buffer, [0]); + + let root_copy = Root::deserialize(&buffer).unwrap(); + assert_eq!(root_copy, root); + } + + #[test] + fn serializing_root_with_leaf() { + let leaf = LeafNode::new(513.into(), H256([4; 32]), 42); + let root = Root::new(1, leaf.into()); + let mut buffer = vec![]; + root.serialize(&mut buffer); + assert_eq!(buffer[0], 1); + + let root_copy = Root::deserialize(&buffer).unwrap(); + assert_eq!(root_copy, root); + } + + #[test] + fn serializing_root_with_internal_node() { + let node = create_internal_node(); + let root = Root::new(2, node.into()); + let mut buffer = vec![]; + root.serialize(&mut buffer); + assert_eq!(buffer[0], 2); + + let root_copy = Root::deserialize(&buffer).unwrap(); + assert_eq!(root_copy, root); + } +} diff --git a/core/lib/merkle_tree2/src/storage/tests.rs b/core/lib/merkle_tree2/src/storage/tests.rs new file mode 100644 index 000000000000..29fdc2ee2dd2 --- /dev/null +++ b/core/lib/merkle_tree2/src/storage/tests.rs @@ -0,0 +1,265 @@ +use assert_matches::assert_matches; + +use super::*; +use crate::hasher::{HasherWithStats, MerklePath}; +use zksync_types::{H256, U256}; + +const FIRST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); +const SECOND_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0100_0000]); +const THIRD_KEY: Key = U256([0, 0, 0, 0x_dead_d00d_1234_5678]); + +#[test] +fn inserting_entries_in_empty_database() { + let db = PatchSet::default(); + let mut updater = TreeUpdater::new(0, Root::Empty); + assert_eq!(updater.patch_set.version(), 0); + assert!(updater.root_node_mut().is_none()); + + let sorted_keys = SortedKeys::new([FIRST_KEY, SECOND_KEY, THIRD_KEY].into_iter()); + let parent_nibbles = updater.load_ancestors(&sorted_keys, &db); + assert_eq!(parent_nibbles, [Nibbles::EMPTY; 3]); + + updater.insert(FIRST_KEY, H256([1; 32]), &Nibbles::EMPTY, || 1); + + let root_node = updater.patch_set.get(&Nibbles::EMPTY).unwrap(); + let Node::Leaf(root_leaf) = root_node else { + panic!("Unexpected root node: {root_node:?}"); + }; + assert_eq!(root_leaf.full_key, FIRST_KEY); + assert_eq!(root_leaf.value_hash, H256([1; 32])); + + updater.insert(SECOND_KEY, H256([2; 32]), &Nibbles::EMPTY, || 2); + assert_storage_with_2_keys(&updater); + + updater.insert(THIRD_KEY, H256([3; 32]), &Nibbles::EMPTY, || 3); + assert_storage_with_3_keys(&updater); +} + +fn assert_storage_with_2_keys(updater: &TreeUpdater) { + // Check the internal nodes with a single child that should be created at keys + // '', 'd', 'de', ..., 'deadbeef'. + let internal_node_nibbles = (0..8).map(|i| { + let nibbles = Nibbles::new(&FIRST_KEY, i); + let next_nibble = Nibbles::nibble(&FIRST_KEY, i); + (nibbles, next_nibble) + }); + for (nibbles, next_nibble) in internal_node_nibbles { + let node = updater.patch_set.get(&nibbles).unwrap(); + let Node::Internal(node) = node else { + panic!("Unexpected node at {nibbles}: {node:?}"); + }; + assert_eq!(node.child_count(), 1); + let child_ref = node.child_ref(next_nibble).unwrap(); + assert_eq!(child_ref.version, 0); + assert!(!child_ref.is_leaf); + } + + // Check the final internal node with 2 leaf children at 'deadbeef0'. + let nibbles = Nibbles::new(&FIRST_KEY, 9); + let node = updater.patch_set.get(&nibbles).unwrap(); + let Node::Internal(node) = node else { + panic!("Unexpected node at {nibbles}: {node:?}"); + }; + assert_eq!(node.child_count(), 2); + for next_nibble in [0, 1] { + let child_ref = node.child_ref(next_nibble).unwrap(); + assert_eq!(child_ref.version, 0); + assert!(child_ref.is_leaf); + } + + // Finally, check the leaves. + let first_leaf_nibbles = Nibbles::new(&FIRST_KEY, 10); + let node = updater.patch_set.get(&first_leaf_nibbles).unwrap(); + let Node::Leaf(leaf) = node else { + panic!("Unexpected node at {first_leaf_nibbles}: {node:?}"); + }; + assert_eq!(leaf.full_key, FIRST_KEY); + assert_eq!(leaf.value_hash, H256([1; 32])); + + let second_leaf_nibbles = Nibbles::new(&SECOND_KEY, 10); + assert_ne!(second_leaf_nibbles, first_leaf_nibbles); + let node = updater.patch_set.get(&second_leaf_nibbles).unwrap(); + let Node::Leaf(leaf) = node else { + panic!("Unexpected node at {second_leaf_nibbles}: {node:?}"); + }; + assert_eq!(leaf.full_key, SECOND_KEY); + assert_eq!(leaf.value_hash, H256([2; 32])); +} + +fn assert_storage_with_3_keys(updater: &TreeUpdater) { + // The 'dead' internal node should now contain 'b' and 'd' children. + let nibbles = Nibbles::new(&FIRST_KEY, 4); + let node = updater.patch_set.get(&nibbles).unwrap(); + let Node::Internal(node) = node else { + panic!("Unexpected node at {nibbles}: {node:?}"); + }; + assert_eq!(node.child_count(), 2); + + let child_ref = node.child_ref(0xb).unwrap(); + assert!(!child_ref.is_leaf); + let child_ref = node.child_ref(0xd).unwrap(); + assert!(child_ref.is_leaf); + + let third_leaf_nibbles = Nibbles::new(&THIRD_KEY, 5); + let node = updater.patch_set.get(&third_leaf_nibbles).unwrap(); + let Node::Leaf(leaf) = node else { + panic!("Unexpected node at {third_leaf_nibbles}: {node:?}"); + }; + assert_eq!(leaf.full_key, THIRD_KEY); + assert_eq!(leaf.value_hash, H256([3; 32])); +} + +#[test] +fn changing_child_ref_type() { + let mut updater = TreeUpdater::new(0, Root::Empty); + updater.insert(FIRST_KEY, H256([1; 32]), &Nibbles::EMPTY, || 1); + let e_key = U256([0, 0, 0, 0x_e000_0000_0000_0000]); + updater.insert(e_key, H256([2; 32]), &Nibbles::EMPTY, || 2); + + let node = updater.patch_set.get(&Nibbles::EMPTY).unwrap(); + let Node::Internal(node) = node else { + panic!("Unexpected root node: {node:?}"); + }; + assert!(node.child_ref(0xd).unwrap().is_leaf); + assert!(node.child_ref(0xe).unwrap().is_leaf); + + updater.insert(SECOND_KEY, H256([3; 32]), &Nibbles::EMPTY, || 3); + + let node = updater.patch_set.get(&Nibbles::EMPTY).unwrap(); + let Node::Internal(node) = node else { + panic!("Unexpected root node: {node:?}"); + }; + assert!(!node.child_ref(0xd).unwrap().is_leaf); + assert!(node.child_ref(0xe).unwrap().is_leaf); +} + +#[test] +fn inserting_node_in_non_empty_database() { + const E_KEY: U256 = U256([0, 0, 0, 0x_e000_0000_0000_0000]); + + let mut db = PatchSet::default(); + let storage = Storage::new(&db, 0); + let kvs = vec![(FIRST_KEY, H256([1; 32])), (SECOND_KEY, H256([2; 32]))]; + let (_, patch) = storage.extend(&(), kvs); + db.apply_patch(patch); + + let mut count = 2; + let mut leaf_index_fn = || increment_counter(&mut count); + let mut updater = TreeUpdater::new(1, db.root(0).unwrap()); + let sorted_keys = SortedKeys::new([THIRD_KEY, E_KEY, SECOND_KEY].into_iter()); + let parent_nibbles = updater.load_ancestors(&sorted_keys, &db); + assert_eq!(updater.metrics.db_reads, 10); + assert_eq!( + parent_nibbles, + [ + Nibbles::new(&THIRD_KEY, 4), // dead + Nibbles::EMPTY, + Nibbles::new(&SECOND_KEY, 10), // deadbeef01 + ] + ); + + let node = updater.patch_set.get(&Nibbles::EMPTY).unwrap(); + let Node::Internal(node) = node else { + panic!("unexpected root node: {node:?}"); + }; + // Check that child refs for the loaded children were updated. + assert_eq!(node.child_ref(0xd).unwrap().version, 1); + + let (op, _) = updater.insert( + THIRD_KEY, + H256([3; 32]), + &parent_nibbles[0], + &mut leaf_index_fn, + ); + assert_eq!(op, TreeLogEntry::insert(3)); + let (op, _) = updater.insert(E_KEY, H256::zero(), &parent_nibbles[1], &mut leaf_index_fn); + assert_eq!(op, TreeLogEntry::insert(4)); + let (op, _) = updater.insert( + SECOND_KEY, + H256([2; 32]), + &parent_nibbles[2], + &mut leaf_index_fn, + ); + assert_matches!(op, TreeLogEntry::Updated { leaf_index: 2, .. }); + assert_eq!(updater.metrics.new_internal_nodes, 0); + assert_eq!(updater.metrics.new_leaves, 2); + + // Check that all necessary child refs have updated versions. + let node = &updater.patch_set.get(&Nibbles::EMPTY).unwrap(); + let Node::Internal(node) = node else { + panic!("unexpected root node: {node:?}"); + }; + assert_eq!(node.child_ref(0xe).unwrap().version, 1); + + assert_storage_with_3_keys(&updater); +} + +#[test] +fn inserting_node_in_non_empty_database_with_moved_key() { + let mut db = PatchSet::default(); + let storage = Storage::new(&db, 0); + let kvs = vec![(FIRST_KEY, H256([1; 32])), (THIRD_KEY, H256([3; 32]))]; + let (_, patch) = storage.extend(&(), kvs); + db.apply_patch(patch); + + let mut updater = TreeUpdater::new(1, db.root(0).unwrap()); + let sorted_keys = SortedKeys::new([SECOND_KEY].into_iter()); + let parent_nibbles = updater.load_ancestors(&sorted_keys, &db); + assert_eq!( + parent_nibbles, + [Nibbles::new(&SECOND_KEY, 5)] // `deadb`, a leaf node + ); + assert_matches!( + updater.patch_set.get(&parent_nibbles[0]), + Some(Node::Leaf(_)) + ); + + let (op, _) = updater.insert(SECOND_KEY, H256([2; 32]), &parent_nibbles[0], || 3); + assert_eq!(op, TreeLogEntry::insert(3)); + assert_matches!( + updater.patch_set.get(&parent_nibbles[0]), + Some(Node::Internal(_)) + ); + assert_eq!(updater.metrics.new_leaves, 1); + assert_eq!(updater.metrics.moved_leaves, 1); +} + +#[test] +fn proving_keys_existence_and_absence() { + let mut updater = TreeUpdater::new(0, Root::Empty); + updater.patch_set.ensure_internal_root_node(); // Necessary for proofs to work. + updater.insert(FIRST_KEY, H256([1; 32]), &Nibbles::EMPTY, || 1); + + let mut hasher = (&() as &dyn HashTree).into(); + let (op, merkle_path) = updater.prove(&mut hasher, FIRST_KEY, &Nibbles::EMPTY); + assert_matches!(op, TreeLogEntry::Read { .. }); + let merkle_path = finalize_merkle_path(merkle_path, &mut hasher); + assert!(merkle_path.is_empty()); // all adjacent hashes correspond to empty subtrees + + let (op, merkle_path) = updater.prove(&mut hasher, SECOND_KEY, &Nibbles::EMPTY); + assert_matches!(op, TreeLogEntry::ReadMissingKey); + let merkle_path = finalize_merkle_path(merkle_path, &mut hasher); + assert_eq!(merkle_path.len(), 40); + + updater.insert(THIRD_KEY, H256([3; 32]), &Nibbles::EMPTY, || 2); + let (op, merkle_path) = updater.prove(&mut hasher, FIRST_KEY, &Nibbles::EMPTY); + assert_matches!(op, TreeLogEntry::Read { .. }); + let merkle_path = finalize_merkle_path(merkle_path, &mut hasher); + assert_eq!(merkle_path.len(), 18); // keys diverge at 18th bit + + let (op, merkle_path) = updater.prove(&mut hasher, SECOND_KEY, &Nibbles::EMPTY); + assert_matches!(op, TreeLogEntry::ReadMissingKey); + let merkle_path = finalize_merkle_path(merkle_path, &mut hasher); + assert_eq!(merkle_path.len(), 40); + + assert_eq!(updater.metrics.key_reads, 2); + assert_eq!(updater.metrics.missing_key_reads, 2); +} + +// Emulate Merkle path finalization. +fn finalize_merkle_path(mut path: MerklePath, hasher: &mut HasherWithStats<'_>) -> Vec { + for _ in 0..4 { + path.push(hasher, None); + } + path.into_inner() +} diff --git a/core/lib/merkle_tree2/src/types.rs b/core/lib/merkle_tree2/src/types.rs new file mode 100644 index 000000000000..dcfbddfeb291 --- /dev/null +++ b/core/lib/merkle_tree2/src/types.rs @@ -0,0 +1,646 @@ +//! Basic storage types. + +use std::{fmt, num::NonZeroU64}; + +use zksync_types::{H256, U256}; + +use crate::{ + hasher::{HashTree, InternalNodeCache}, + utils::SmallMap, +}; + +/// Size of a (leaf) tree key in bytes. +pub(crate) const KEY_SIZE: usize = 32; +/// Depth of the tree (= number of bits in `KEY_SIZE`). +pub(crate) const TREE_DEPTH: usize = KEY_SIZE * 8; +/// Size of a hashed value in bytes. +pub(crate) const HASH_SIZE: usize = 32; + +/// Instruction to read or write a tree value at a certain key. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TreeInstruction { + /// Read the current tree value. + Read, + /// Write the specified value. + Write(ValueHash), +} + +/// Tags associated with a tree. +#[derive(Debug, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub(crate) struct TreeTags { + pub architecture: String, + pub depth: usize, + pub hasher: String, +} + +impl TreeTags { + pub const ARCHITECTURE: &'static str = "AR16MT"; + + pub fn new(hasher: &dyn HashTree) -> Self { + Self { + architecture: Self::ARCHITECTURE.to_owned(), + hasher: hasher.name().to_owned(), + depth: TREE_DEPTH, + } + } + + pub fn assert_consistency(&self, hasher: &dyn HashTree) { + assert_eq!( + self.architecture, + Self::ARCHITECTURE, + "Unsupported tree architecture `{}`, expected `{}`", + self.architecture, + Self::ARCHITECTURE + ); + assert_eq!( + self.depth, TREE_DEPTH, + "Unexpected tree depth: expected {TREE_DEPTH}, got {}", + self.depth + ); + assert_eq!( + hasher.name(), + self.hasher, + "Mismatch between the provided tree hasher `{}` and the hasher `{}` used \ + in the database", + hasher.name(), + self.hasher + ); + } +} + +/// Version-independent information about the tree. +#[derive(Debug, Default, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub struct Manifest { + // Number of tree versions stored in the database. + pub(crate) version_count: u64, + pub(crate) tags: Option, +} + +impl Manifest { + pub(crate) fn new(version_count: u64, hasher: &dyn HashTree) -> Self { + Self { + version_count, + tags: Some(TreeTags::new(hasher)), + } + } +} + +pub(crate) type NibblesBytes = [u8; KEY_SIZE]; + +/// Unversioned key (a sequence of nibbles) in a radix-16 Merkle tree. +#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub(crate) struct Nibbles { + nibble_count: usize, + bytes: NibblesBytes, +} + +impl Nibbles { + pub const EMPTY: Self = Self { + nibble_count: 0, + bytes: [0_u8; KEY_SIZE], + }; + + pub fn nibble(key: &Key, index: usize) -> u8 { + const NIBBLES_IN_U64: usize = 16; // 8 bytes * 2 nibbles / byte + + debug_assert!(index < 2 * KEY_SIZE); + // Since the `Key` layout is little-endian, we reverse indexing of `u64`: + // nibbles 0..=15 are in the final `u64`, etc. + let u64_idx = 3 - index / NIBBLES_IN_U64; + // The shift in `u64` is reversed as well: the 0th nibble needs the largest shift + // `60 = 15 * 4`, the 1st one - 56, etc. + let shift_in_u64 = (NIBBLES_IN_U64 - 1 - index % NIBBLES_IN_U64) * 4; + ((key.0[u64_idx] >> shift_in_u64) & 0x0f) as u8 + } + + pub fn new(key: &Key, nibble_count: usize) -> Self { + debug_assert!(nibble_count <= 2 * KEY_SIZE); + let mut bytes = [0_u8; KEY_SIZE]; + key.to_big_endian(&mut bytes); + + // Unused bytes (and, if appropriate, the unused nibble in the last used byte) + // needs to be zeroized in order for `Ord` / `Eq` / `Hash` traits to work properly. + if nibble_count % 2 == 1 { + bytes[nibble_count / 2] &= 0xf0; + } + let meaningful_bytes = (nibble_count + 1) / 2; + for byte in bytes.iter_mut().skip(meaningful_bytes) { + *byte = 0; + } + + Self { + nibble_count, + bytes, + } + } + + pub fn from_parts(bytes: NibblesBytes, nibble_count: usize) -> Self { + debug_assert!(nibble_count <= 2 * KEY_SIZE); + Self { + nibble_count, + bytes, + } + } + + pub fn single(nibble: u8) -> Self { + assert!(nibble < 16); + let mut bytes = NibblesBytes::default(); + bytes[0] = nibble << 4; + Self::from_parts(bytes, 1) + } + + pub fn with_version(self, version: u64) -> NodeKey { + NodeKey { + version, + nibbles: self, + } + } + + pub fn nibble_count(&self) -> usize { + self.nibble_count + } + + pub fn bytes(&self) -> &NibblesBytes { + &self.bytes + } + + /// Extracts the last nibble and the parent sequence of nibbles + /// (i.e., one with the last nibble truncated). If this sequence of nibbles is empty, + /// returns `None`. + pub fn split_last(self) -> Option<(Self, u8)> { + if self.nibble_count == 0 { + return None; + } + + let mut truncated_bytes = self.bytes; + let last_byte_idx = (self.nibble_count - 1) / 2; + let last_byte = self.bytes[last_byte_idx]; + let last_nibble = if self.nibble_count % 2 == 1 { + truncated_bytes[last_byte_idx] = 0; + last_byte >> 4 + } else { + truncated_bytes[last_byte_idx] &= 0xf0; + last_byte & 15 + }; + + let parent = Self { + nibble_count: self.nibble_count - 1, + bytes: truncated_bytes, + }; + Some((parent, last_nibble)) + } + + /// Pushes a nibble to the end of this sequence and returns the resulting nibble. Returns `None` + /// if this nibble is full. + pub fn push(self, nibble: u8) -> Option { + if self.nibble_count == KEY_SIZE * 2 { + return None; + } + + let mut child = self; + child.nibble_count += 1; + let last_byte_idx = self.nibble_count / 2; + if child.nibble_count % 2 == 0 { + // The new `nibble` is 4 lower bits + child.bytes[last_byte_idx] += nibble; + } else { + // The new `nibble` is 4 upper bits of a new byte + child.bytes[last_byte_idx] = nibble << 4; + } + Some(child) + } +} + +impl fmt::Display for Nibbles { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let full_bytes = self.bytes.iter().take(self.nibble_count / 2); + for &byte in full_bytes { + write!(formatter, "{byte:02x}")?; + } + if self.nibble_count % 2 == 1 { + let last_byte = self.bytes[self.nibble_count / 2]; + let last_nibble = last_byte >> 4; + write!(formatter, "{last_nibble:x}")?; + } + Ok(()) + } +} + +impl fmt::Debug for Nibbles { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, formatter) + } +} + +/// Versioned key in a radix-16 Merkle tree. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct NodeKey { + pub(crate) version: u64, + pub(crate) nibbles: Nibbles, +} + +impl NodeKey { + pub(crate) const fn empty(version: u64) -> Self { + Self { + version, + nibbles: Nibbles::EMPTY, + } + } + + pub fn is_empty(&self) -> bool { + self.nibbles.nibble_count == 0 + } + + #[allow(clippy::cast_possible_truncation)] + pub(crate) fn to_db_key(self) -> Vec { + let nibbles_byte_len = (self.nibbles.nibble_count + 1) / 2; + // ^ equivalent to ceil(self.nibble_count / 2) + let mut bytes = Vec::with_capacity(9 + nibbles_byte_len); + // ^ 8 bytes for `version` + 1 byte for nibble count + bytes.extend_from_slice(&self.version.to_be_bytes()); + bytes.push(self.nibbles.nibble_count as u8); + // ^ conversion is safe: nibble_count <= 64 + bytes.extend_from_slice(&self.nibbles.bytes[..nibbles_byte_len]); + bytes + } +} + +impl fmt::Display for NodeKey { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "{}:{}", self.version, self.nibbles) + } +} + +/// Key stored in the tree. +pub type Key = U256; +/// Hashed value stored in the tree. +pub type ValueHash = H256; + +/// Leaf node of the tree. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub struct LeafNode { + pub(crate) full_key: Key, + pub(crate) value_hash: ValueHash, + pub(crate) leaf_index: u64, +} + +impl LeafNode { + pub(crate) fn new(full_key: Key, value_hash: ValueHash, leaf_index: u64) -> Self { + Self { + full_key, + value_hash, + leaf_index, + } + } +} + +/// Reference to a child in an [`InternalNode`]. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub(crate) struct ChildRef { + pub hash: ValueHash, + pub version: u64, + pub is_leaf: bool, +} + +impl ChildRef { + /// Creates a reference to a child with `value_hash` left blank (it will be computed later). + pub fn leaf(version: u64) -> Self { + Self { + hash: ValueHash::default(), + version, + is_leaf: true, + } + } + + pub fn internal(version: u64) -> Self { + Self { + hash: ValueHash::default(), + version, + is_leaf: false, + } + } +} + +/// Internal node in AR16MT containing up to 16 children. +#[derive(Default)] +pub struct InternalNode { + children: SmallMap, + cache: Option>, +} + +impl Clone for InternalNode { + fn clone(&self) -> Self { + Self { + children: self.children.clone(), + cache: None, + // ^ The cache shouldn't theoretically get invalidated as long as the tree operation + // mode doesn't change, but we drop it just to be safe. + } + } +} + +#[cfg(test)] +impl PartialEq for InternalNode { + fn eq(&self, other: &Self) -> bool { + self.children == other.children + } +} + +impl fmt::Debug for InternalNode { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut map = formatter.debug_map(); + for (nibble, child_ref) in self.children() { + let nibble = format!("{nibble:x}"); + map.entry(&nibble, child_ref); + } + map.finish() + } +} + +impl InternalNode { + /// Number of children in an internal node (= tree radix). + pub(crate) const CHILD_COUNT: u8 = 16; + + pub(crate) fn with_capacity(capacity: usize) -> Self { + Self { + cache: None, + children: SmallMap::with_capacity(capacity), + } + } + + pub(crate) fn child_count(&self) -> usize { + self.children.len() + } + + pub(crate) fn cache_mut(&mut self) -> Option<&mut InternalNodeCache> { + self.cache.as_deref_mut() + } + + pub(crate) fn set_cache(&mut self, cache: Box) -> &mut InternalNodeCache { + debug_assert!(self.cache.is_none()); + self.cache.get_or_insert(cache) + } + + pub(crate) fn children(&self) -> impl Iterator + '_ { + self.children.iter() + } + + pub(crate) fn child_refs(&self) -> impl Iterator + '_ { + self.children.values() + } + + pub(crate) fn child_hashes(&self) -> [Option; Self::CHILD_COUNT as usize] { + let mut hashes = [None; Self::CHILD_COUNT as usize]; + for (nibble, child_ref) in self.children.iter() { + hashes[nibble as usize] = Some(child_ref.hash); + } + hashes + } + + pub(crate) fn child_ref(&self, nibble: u8) -> Option<&ChildRef> { + self.children.get(nibble) + } + + pub(crate) fn child_ref_mut(&mut self, nibble: u8) -> Option<&mut ChildRef> { + self.children.get_mut(nibble) + } + + pub(crate) fn insert_child_ref(&mut self, nibble: u8, child_ref: ChildRef) { + self.children.insert(nibble, child_ref); + } +} + +#[derive(Debug, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub enum Node { + Internal(InternalNode), + Leaf(LeafNode), +} + +impl From for Node { + fn from(leaf: LeafNode) -> Self { + Self::Leaf(leaf) + } +} + +impl From for Node { + fn from(node: InternalNode) -> Self { + Self::Internal(node) + } +} + +/// Root node of the tree. Besides a [`Node`], contains the general information about the tree +/// (e.g., the number of leaves). +#[derive(Debug, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub enum Root { + /// Root for an empty tree. + Empty, + /// Root for a tree with at least one leaf. + Filled { + /// Number of leaves in the tree. + leaf_count: NonZeroU64, + /// Root node of the tree. + node: Node, + }, +} + +impl Root { + pub(crate) fn new(leaf_count: u64, node: Node) -> Self { + Self::Filled { + leaf_count: NonZeroU64::new(leaf_count).unwrap(), + node, + } + } + + pub(crate) fn leaf_count(&self) -> u64 { + match self { + Self::Empty => 0, + Self::Filled { leaf_count, .. } => (*leaf_count).into(), + } + } +} + +/// Output of inserting a block of entries into a Merkle tree. +#[derive(Debug, PartialEq, Eq)] +pub struct BlockOutput { + /// The new hash of the tree. + pub root_hash: ValueHash, + /// The number of leaves in the tree after the update. + pub leaf_count: u64, + /// Information about each insertion / update operation in the order of application. + pub logs: Vec, +} + +/// Information about an the effect of a [`TreeInstruction`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TreeLogEntry { + /// A node was inserted into the tree. + Inserted { + /// Index of the inserted node. + leaf_index: u64, + }, + /// A node with the specified index was updated. + Updated { + /// Index of the updated node. + leaf_index: u64, + /// Hash of the previous value. + previous_value: ValueHash, + }, + /// A node was read from the tree. + Read { + /// Index of the read node. + leaf_index: u64, + /// Hash of the read value. + value: ValueHash, + }, + /// A missing key was read. + ReadMissingKey, +} + +impl TreeLogEntry { + pub(crate) fn insert(leaf_index: u64) -> Self { + Self::Inserted { leaf_index } + } + + pub(crate) fn update(previous_value: ValueHash, leaf_index: u64) -> Self { + Self::Updated { + leaf_index, + previous_value, + } + } + + pub(crate) fn read(value: ValueHash, leaf_index: u64) -> Self { + Self::Read { leaf_index, value } + } + + pub(crate) fn is_read(&self) -> bool { + matches!(self, Self::Read { .. } | Self::ReadMissingKey) + } +} + +/// Extended output of inserting a block of entries into a Merkle tree that contains +/// Merkle proofs for each operation. +#[derive(Debug)] +pub struct BlockOutputWithProofs { + /// Extended information about each insertion / update operation in the order of application. + pub logs: Vec, + /// The number of leaves in the tree after the update. + pub leaf_count: u64, +} + +impl BlockOutputWithProofs { + /// Returns the final root hash of the Merkle tree. + pub fn root_hash(&self) -> Option { + Some(self.logs.last()?.root_hash) + } +} + +/// [`TreeLogEntry`] together with its authenticity proof. +#[derive(Debug)] +pub struct TreeLogEntryWithProof

> { + /// Log entry about an atomic operation on the tree. + pub base: TreeLogEntry, + /// Merkle path to prove the log authenticity. The path consists of up to 256 hashes + /// ordered starting the bottommost level of the tree (one with leaves) and ending before + /// the root level. + /// + /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning + /// corresponding to the empty subtrees are skipped. This allows compacting the proof ~10x. + pub merkle_path: P, + /// Root tree hash after the operation. + pub root_hash: ValueHash, +} + +#[cfg(test)] +mod tests { + use super::*; + + // `U256` uses little-endian `u64` ordering; i.e., this is + // 0x_dead_beef_0000_0000_.._0000. + const TEST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); + + #[test] + fn accessing_nibbles_in_key() { + let start_nibbles = [0xd, 0xe, 0xa, 0xd, 0xb, 0xe, 0xe, 0xf]; + for (i, nibble) in start_nibbles.into_iter().enumerate() { + assert_eq!(Nibbles::nibble(&TEST_KEY, i), nibble); + } + for i in 8..(2 * KEY_SIZE) { + assert_eq!(Nibbles::nibble(&TEST_KEY, i), 0); + } + } + + #[test] + fn nibbles_and_node_key_display() { + let nibbles = Nibbles::new(&TEST_KEY, 5); + assert_eq!(nibbles.to_string(), "deadb"); + + let nibbles = Nibbles::new(&TEST_KEY, 6); + assert_eq!(nibbles.to_string(), "deadbe"); + + let nibbles = Nibbles::new(&TEST_KEY, 9); + assert_eq!(nibbles.to_string(), "deadbeef0"); + + let node_key = nibbles.with_version(3); + assert_eq!(node_key.to_string(), "3:deadbeef0"); + } + + #[test] + fn manipulating_nibbles() { + let nibbles = Nibbles::new(&TEST_KEY, 0); + let child = nibbles.push(0xb).unwrap(); + assert_eq!(child.to_string(), "b"); + + let nibbles = Nibbles::new(&TEST_KEY, 6); + let child = nibbles.push(0xb).unwrap(); + assert_eq!(child.to_string(), "deadbeb"); + + let nibbles = Nibbles::new(&TEST_KEY, 7); + let child = nibbles.push(0xb).unwrap(); + assert_eq!(child.to_string(), "deadbeeb"); + + let nibbles = Nibbles::new(&TEST_KEY, 64); + assert!(nibbles.push(0xb).is_none()); + } + + #[test] + fn node_key_serialization() { + let nibbles = Nibbles::new(&TEST_KEY, 6); + let node_key = nibbles.with_version(3); + + let serialized_key = node_key.to_db_key(); + assert_eq!( + serialized_key, + [0, 0, 0, 0, 0, 0, 0, 3, 6, 0xde, 0xad, 0xbe] + ); + // ^ big-endian u64 version, then u8 nibble count, then nibbles + + let nibbles = Nibbles::new(&TEST_KEY, 7); + let node_key = nibbles.with_version(3); + + let serialized_key = node_key.to_db_key(); + assert_eq!( + serialized_key, + [0, 0, 0, 0, 0, 0, 0, 3, 7, 0xde, 0xad, 0xbe, 0xe0] + ); + // ^ the last byte must be truncated + } + + #[test] + fn nibbles_created_from_different_sources_can_be_equal() { + let nibbles = Nibbles::new(&TEST_KEY, 1); + let other_key = U256([0, 0, 0, 0x_d000_0000_0000_0000]); + let other_nibbles = Nibbles::new(&other_key, 1); + assert_eq!(nibbles, other_nibbles); + + let nibbles = Nibbles::new(&TEST_KEY, 2); + let other_nibbles = Nibbles::new(&other_key, 2); + assert_ne!(nibbles, other_nibbles); + assert!(nibbles > other_nibbles); + } +} diff --git a/core/lib/merkle_tree2/src/utils.rs b/core/lib/merkle_tree2/src/utils.rs new file mode 100644 index 000000000000..5faedf597162 --- /dev/null +++ b/core/lib/merkle_tree2/src/utils.rs @@ -0,0 +1,255 @@ +//! Misc utils used in tree algorithms. + +use std::{iter::Peekable, vec}; + +use crate::types::Key; + +/// Map with keys in the range `0..16`. +/// +/// This data type is more memory-efficient than a `Box<[Option<_>; 16]>`, and more +/// computationally efficient than a `HashMap<_, _>`. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SmallMap { + // Bitmap with i-th bit set to 1 if key `i` is in the map. + bitmap: u16, + // Values in the order of keys. + values: Vec, +} + +impl Default for SmallMap { + fn default() -> Self { + Self { + bitmap: 0, + values: Vec::new(), + } + } +} + +impl SmallMap { + const CAPACITY: u8 = 16; + + pub fn with_capacity(capacity: usize) -> Self { + assert!( + capacity <= usize::from(Self::CAPACITY), + "capacity is too large" + ); + Self { + bitmap: 0, + values: Vec::with_capacity(capacity), + } + } + + pub fn len(&self) -> usize { + self.bitmap.count_ones() as usize + } + + pub fn get(&self, index: u8) -> Option<&V> { + assert!(index < Self::CAPACITY, "index is too large"); + + let mask = 1 << u16::from(index); + if self.bitmap & mask == 0 { + None + } else { + // Zero out all bits with index `index` and higher, then compute the number + // of remaining bits (efficient on modern CPU architectures which have a dedicated + // CTPOP instruction). This is the number of set bits with a lower index, + // which is equal to the index of the value in `self.values`. + let index = (self.bitmap & (mask - 1)).count_ones(); + Some(&self.values[index as usize]) + } + } + + pub fn iter(&self) -> impl Iterator + '_ { + Self::indices(self.bitmap).zip(&self.values) + } + + fn indices(bitmap: u16) -> impl Iterator { + (0..Self::CAPACITY).filter(move |&index| { + let mask = 1 << u16::from(index); + bitmap & mask != 0 + }) + } + + pub fn values(&self) -> impl Iterator + '_ { + self.values.iter() + } + + pub fn get_mut(&mut self, index: u8) -> Option<&mut V> { + assert!(index < Self::CAPACITY, "index is too large"); + + let mask = 1 << u16::from(index); + if self.bitmap & mask == 0 { + None + } else { + let index = (self.bitmap & (mask - 1)).count_ones(); + Some(&mut self.values[index as usize]) + } + } + + pub fn insert(&mut self, index: u8, value: V) { + assert!(index < Self::CAPACITY, "index is too large"); + + let mask = 1 << u16::from(index); + let index = (self.bitmap & (mask - 1)).count_ones() as usize; + if self.bitmap & mask == 0 { + // The index is not set currently. + self.bitmap |= mask; + self.values.insert(index, value); + } else { + // The index is set. + self.values[index] = value; + } + } +} + +pub(crate) fn increment_counter(counter: &mut u64) -> u64 { + *counter += 1; + *counter +} + +pub(crate) fn find_diverging_bit(lhs: Key, rhs: Key) -> usize { + let diff = lhs ^ rhs; + diff.leading_zeros() as usize +} + +/// Merges several vectors of items into a single vector, where each original vector +/// and the resulting vector are ordered by the item index (the first element of the tuple +/// in the original vectors). +/// +/// # Return value +/// +/// Returns the merged values, each accompanied with a 0-based index of the original part +/// where the value is coming from. +pub(crate) fn merge_by_index(parts: Vec>) -> Vec<(usize, T)> { + let total_len: usize = parts.iter().map(Vec::len).sum(); + let iterators = parts + .into_iter() + .map(|part| part.into_iter().peekable()) + .collect(); + let merging_iter = MergingIter { + iterators, + total_len, + }; + merging_iter.collect() +} + +#[derive(Debug)] +struct MergingIter { + iterators: Vec>>, + total_len: usize, +} + +impl Iterator for MergingIter { + type Item = (usize, T); + + fn next(&mut self) -> Option { + let iterators = self.iterators.iter_mut().enumerate(); + let items = iterators.filter_map(|(iter_idx, it)| it.peek().map(|next| (iter_idx, next))); + let (min_iter_idx, _) = items.min_by_key(|(_, (idx, _))| *idx)?; + + let (_, item) = self.iterators[min_iter_idx].next()?; + Some((min_iter_idx, item)) + } + + fn size_hint(&self) -> (usize, Option) { + (self.total_len, Some(self.total_len)) + } +} + +impl ExactSizeIterator for MergingIter {} + +#[cfg(test)] +mod tests { + use zksync_types::U256; + + use super::*; + + #[test] + fn small_map_operations() { + let mut map = SmallMap::default(); + map.insert(2, "2"); + assert_eq!(map.bitmap, 0b_0100); + assert_eq!(map.values, ["2"]); + assert_eq!(map.get(2), Some(&"2")); + assert_eq!(map.get(0), None); + assert_eq!(map.get(15), None); + assert_eq!(map.iter().collect::>(), [(2, &"2")]); + + map.insert(0, "0"); + assert_eq!(map.bitmap, 0b_0101); + assert_eq!(map.values, ["0", "2"]); + assert_eq!(map.get(2), Some(&"2")); + assert_eq!(map.get(0), Some(&"0")); + assert_eq!(map.get(15), None); + assert_eq!(map.iter().collect::>(), [(0, &"0"), (2, &"2")]); + + map.insert(7, "7"); + assert_eq!(map.bitmap, 0b_1000_0101); + assert_eq!(map.values, ["0", "2", "7"]); + assert_eq!(map.get(7), Some(&"7")); + assert_eq!(map.get(2), Some(&"2")); + assert_eq!(map.get(0), Some(&"0")); + assert_eq!( + map.iter().collect::>(), + [(0, &"0"), (2, &"2"), (7, &"7")] + ); + + map.insert(2, "2!"); + assert_eq!(map.get(7), Some(&"7")); + assert_eq!(map.get(2), Some(&"2!")); + assert_eq!(map.get(0), Some(&"0")); + assert_eq!( + map.iter().collect::>(), + [(0, &"0"), (2, &"2!"), (7, &"7")] + ); + } + + #[test] + fn small_map_works_correctly_for_all_key_sets() { + for bitmap in 0_u32..65_536 { + let values = (0_u8..16).filter(|&i| bitmap & (1 << u32::from(i)) != 0); + let values: Vec<_> = values.collect(); + + let mut map = SmallMap::with_capacity(values.len()); + for &value in &values { + map.insert(value, value); + } + + assert_eq!(map.len(), values.len()); + for i in 0..16 { + assert_eq!(map.get(i).copied(), values.contains(&i).then_some(i)); + assert_eq!(map.get_mut(i).copied(), values.contains(&i).then_some(i)); + } + assert_eq!(map.values().copied().collect::>(), values); + + let values_with_indices: Vec<_> = values.iter().map(|i| (*i, i)).collect(); + assert_eq!(map.iter().collect::>(), values_with_indices); + } + } + + #[test] + fn finding_diverging_bit() { + let key = U256([0x_dead_beef_c0ff_eec0; 4]); + assert_eq!(find_diverging_bit(key, key), 256); + for i in 0..256 { + let other_key = key ^ (U256::one() << i); + assert_eq!(find_diverging_bit(key, other_key), 255 - i); + } + } + + #[test] + fn merging_by_index() { + let items = vec![ + vec![(1, "1"), (5, "5"), (6, "6")], + vec![(0, "0"), (4, "4"), (7, "7")], + vec![(2, "2"), (3, "3")], + ]; + let merged = merge_by_index(items); + + #[rustfmt::skip] // one array item per line looks uglier + assert_eq!( + merged, + [(1, "0"), (0, "1"), (2, "2"), (2, "3"), (1, "4"), (0, "5"), (0, "6"), (1, "7")] + ); + } +} diff --git a/core/lib/merkle_tree2/tests/integration/common.rs b/core/lib/merkle_tree2/tests/integration/common.rs new file mode 100644 index 000000000000..dff7c8ca012c --- /dev/null +++ b/core/lib/merkle_tree2/tests/integration/common.rs @@ -0,0 +1,13 @@ +//! Shared functionality. + +use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; + +pub fn generate_key_value_pairs(indexes: impl Iterator) -> Vec<(U256, H256)> { + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let kvs = indexes.map(|idx| { + let key = H256::from_low_u64_be(idx); + let key = StorageKey::new(AccountTreeId::new(address), key); + (key.hashed_key_u256(), H256::from_low_u64_be(idx + 1)) + }); + kvs.collect() +} diff --git a/core/lib/merkle_tree2/tests/integration/consistency.rs b/core/lib/merkle_tree2/tests/integration/consistency.rs new file mode 100644 index 000000000000..b2833c31e1a4 --- /dev/null +++ b/core/lib/merkle_tree2/tests/integration/consistency.rs @@ -0,0 +1,69 @@ +//! Sort of fuzz testing for Merkle tree consistency checks. Should run in the release mode +//! for efficiency. + +use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use tempfile::TempDir; + +use zksync_merkle_tree2::{Database, MerkleTree, RocksDBWrapper}; +use zksync_storage::{db, rocksdb::WriteBatch}; + +use crate::common::generate_key_value_pairs; + +// Something (maybe RocksDB) makes the test below work very slowly in the debug mode; +// thus, the number of test cases is conditionally reduced. +#[cfg(debug_assertions)] +const ITER_COUNT: usize = 10; +#[cfg(not(debug_assertions))] +const ITER_COUNT: usize = 5_000; + +/// Tests that if a single key is removed from the DB, or a single bit is changed in a value, +/// the tree does not pass a consistency check. +#[test] +fn five_thousand_angry_monkeys_vs_merkle_tree() { + const RNG_SEED: u64 = 42; + + let dir = TempDir::new().expect("failed creating temporary dir for RocksDB"); + let mut db = RocksDBWrapper::new(&dir); + let kvs = generate_key_value_pairs(0..100); + let (_, patch) = MerkleTree::new(&db).extend(kvs); + db.apply_patch(patch); + + MerkleTree::new(&db).verify_consistency(0).unwrap(); + + let mut raw_db = db.into_inner(); + let cf = raw_db.cf_merkle_tree_handle(db::MerkleTreeColumnFamily::Tree); + // Load all key-node pairs from the 0-th version of the tree. + let raw_kvs: Vec<_> = raw_db.prefix_iterator_cf(cf, [0; 8]).collect(); + assert!(raw_kvs.len() > 100); + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + for _ in 0..ITER_COUNT { + let (key, value) = raw_kvs.choose(&mut rng).unwrap(); + let should_remove = rng.gen(); + + let mut batch = WriteBatch::default(); + let cf = raw_db.cf_merkle_tree_handle(db::MerkleTreeColumnFamily::Tree); + if should_remove { + println!("deleting value at {key:?}"); + batch.delete_cf(cf, key); + } else { + let mut mangled_value = value.to_vec(); + let mangled_idx = rng.gen_range(0..mangled_value.len()); + mangled_value[mangled_idx] ^= 1; + println!("mangling byte {mangled_idx} of the value at {key:?}"); + batch.put_cf(cf, key, mangled_value); + } + raw_db.write(batch).unwrap(); + + let db = RocksDBWrapper::from(raw_db); + let err = MerkleTree::new(&db).verify_consistency(0).unwrap_err(); + println!("{err}"); + + // Restore the value back so that it doesn't influence the following cases. + raw_db = db.into_inner(); + let cf = raw_db.cf_merkle_tree_handle(db::MerkleTreeColumnFamily::Tree); + let mut reverse_batch = WriteBatch::default(); + reverse_batch.put_cf(cf, key, value); + raw_db.write(reverse_batch).unwrap(); + } +} diff --git a/core/lib/merkle_tree2/tests/integration/domain.rs b/core/lib/merkle_tree2/tests/integration/domain.rs new file mode 100644 index 000000000000..f828afeaf751 --- /dev/null +++ b/core/lib/merkle_tree2/tests/integration/domain.rs @@ -0,0 +1,487 @@ +//! Domain-specific tests. Taken almost verbatim from the previous tree implementation. + +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; +use tempfile::TempDir; + +use std::{fs::File, io::BufReader, num::NonZeroU32, slice}; + +use zksync_config::constants::ACCOUNT_CODE_STORAGE_ADDRESS; +use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_merkle_tree2::domain::ZkSyncTree; +use zksync_merkle_tree2::HashTree; +use zksync_storage::db::Database; +use zksync_storage::RocksDB; +use zksync_types::{ + proofs::StorageLogMetadata, AccountTreeId, Address, L1BatchNumber, StorageKey, StorageLog, + WitnessStorageLog, H256, +}; +use zksync_utils::u32_to_h256; + +fn gen_storage_logs() -> Vec { + let addrs = vec![ + "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2", + "ef4bb7b21c5fe7432a7d63876cc59ecc23b46636", + "89b8988a018f5348f52eeac77155a793adf03ecc", + "782806db027c08d36b2bed376b4271d1237626b3", + "b2b57b76717ee02ae1327cc3cf1f40e76f692311", + ] + .into_iter() + .map(|s| s.parse::

().unwrap()); + + let proof_keys = addrs.flat_map(|addr| { + (0..20).map(move |i| StorageKey::new(AccountTreeId::new(addr), u32_to_h256(i))) + }); + let proof_values = (0..100).map(u32_to_h256); + + proof_keys + .zip(proof_values) + .map(|(proof_key, proof_value)| { + let storage_log = StorageLog::new_write_log(proof_key, proof_value); + WitnessStorageLog { + storage_log, + previous_value: H256::zero(), + } + }) + .collect() +} + +fn convert_logs(logs: impl Iterator) -> Vec { + logs.map(|storage_log| WitnessStorageLog { + storage_log, + previous_value: H256::zero(), + }) + .collect() +} + +#[test] +fn basic_workflow() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let logs = gen_storage_logs(); + let block_number = NonZeroU32::new(1).unwrap(); + + let (metadata, expected_root_hash) = { + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new_lightweight(db); + let metadata = tree.process_block(&logs); + tree.save(); + tree.verify_consistency(block_number); + (metadata, tree.root_hash()) + }; + + assert_eq!(metadata.root_hash, expected_root_hash); + assert_eq!(metadata.rollup_last_leaf_index, 101); + assert_eq!(metadata.initial_writes.len(), logs.len()); + for (write, log) in metadata.initial_writes.iter().zip(&logs) { + assert_eq!(write.value, log.storage_log.value); + } + assert!(metadata.repeated_writes.is_empty()); + + assert_eq!( + expected_root_hash, + H256([ + 125, 25, 107, 171, 182, 155, 32, 70, 138, 108, 238, 150, 140, 205, 193, 39, 90, 92, + 122, 233, 118, 238, 248, 201, 160, 55, 58, 206, 244, 216, 188, 10 + ]), + ); + + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let tree = ZkSyncTree::new_lightweight(db); + tree.verify_consistency(block_number); + assert_eq!(tree.root_hash(), expected_root_hash); + assert_eq!(tree.block_number(), block_number.get()); +} + +#[test] +fn basic_workflow_multiblock() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let logs = gen_storage_logs(); + let blocks = logs.chunks(9); + + let expected_root_hash = { + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new_lightweight(db); + tree.use_dedicated_thread_pool(2); + for block in blocks { + tree.process_block(block); + } + tree.save(); + tree.root_hash() + }; + + assert_eq!( + expected_root_hash, + H256([ + 125, 25, 107, 171, 182, 155, 32, 70, 138, 108, 238, 150, 140, 205, 193, 39, 90, 92, + 122, 233, 118, 238, 248, 201, 160, 55, 58, 206, 244, 216, 188, 10 + ]), + ); + + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let tree = ZkSyncTree::new_lightweight(db); + assert_eq!(tree.root_hash(), expected_root_hash); + assert_eq!(tree.block_number(), 12); +} + +#[test] +fn revert_blocks() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + + // Generate logs and save them to DB. + // Produce 4 blocks with distinct values and 1 block with modified values from first block + let block_size: usize = 25; + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let proof_keys = + (0..100).map(move |i| StorageKey::new(AccountTreeId::new(address), u32_to_h256(i))); + let proof_values = (0..100).map(u32_to_h256); + + // Add couple of blocks of distinct keys/values + let mut logs: Vec<_> = convert_logs( + proof_keys + .zip(proof_values) + .map(|(proof_key, proof_value)| StorageLog::new_write_log(proof_key, proof_value)), + ); + // Add a block with repeated keys + let mut extra_logs = convert_logs((0..block_size).map(move |i| { + StorageLog::new_write_log( + StorageKey::new(AccountTreeId::new(address), u32_to_h256(i as u32)), + u32_to_h256((i + 1) as u32), + ) + })); + logs.append(&mut extra_logs); + + let mirror_logs = logs.clone(); + let tree_metadata: Vec<_> = { + let mut tree = ZkSyncTree::new_lightweight(storage); + let metadata = logs.chunks(block_size).map(|chunk| { + let metadata = tree.process_block(chunk); + tree.save(); + metadata + }); + metadata.collect() + }; + + assert_eq!(tree_metadata.len(), 5); + // 4 first blocks must contain only insert ops, while the last one must contain + // only the update ops. + for (i, metadata) in tree_metadata.iter().enumerate() { + let expected_leaf_index = if i == 4 { + assert!(metadata.initial_writes.is_empty()); + assert_eq!(metadata.repeated_writes.len(), block_size); + for (write, idx) in metadata.repeated_writes.iter().zip(1_u64..) { + assert_eq!(write.index, idx); + assert_eq!(write.value, H256::from_low_u64_be(idx)); + } + block_size * 4 + 1 + } else { + assert!(metadata.repeated_writes.is_empty()); + assert_eq!(metadata.initial_writes.len(), block_size); + block_size * (i + 1) + 1 + }; + assert_eq!(metadata.rollup_last_leaf_index, expected_leaf_index as u64); + } + + // Revert the last block. + let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + { + let mut tree = ZkSyncTree::new_lightweight(storage); + assert_eq!(tree.root_hash(), tree_metadata.last().unwrap().root_hash); + tree.revert_logs(L1BatchNumber(3)); + assert_eq!(tree.root_hash(), tree_metadata[3].root_hash); + tree.save(); + } + + // Revert two more blocks. + let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + { + let mut tree = ZkSyncTree::new_lightweight(storage); + tree.revert_logs(L1BatchNumber(1)); + assert_eq!(tree.root_hash(), tree_metadata[1].root_hash); + tree.save(); + } + + // Revert two more blocks second time; the result should be the same + let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + { + let mut tree = ZkSyncTree::new_lightweight(storage); + tree.revert_logs(L1BatchNumber(1)); + assert_eq!(tree.root_hash(), tree_metadata[1].root_hash); + tree.save(); + } + + // Reapply one of the reverted logs + let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + { + let storage_log = mirror_logs.get(3 * block_size).unwrap(); + let mut tree = ZkSyncTree::new_lightweight(storage); + tree.process_block(slice::from_ref(storage_log)); + tree.save(); + } + + // check saved block number + let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let tree = ZkSyncTree::new_lightweight(storage); + assert_eq!(tree.block_number(), 3); +} + +#[test] +fn reset_tree() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let storage = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let logs = gen_storage_logs(); + let mut tree = ZkSyncTree::new_lightweight(storage); + let empty_root_hash = tree.root_hash(); + + logs.chunks(5) + .into_iter() + .fold(empty_root_hash, |hash, chunk| { + tree.process_block(chunk); + tree.reset(); + assert_eq!(tree.root_hash(), hash); + + tree.process_block(chunk); + tree.save(); + tree.root_hash() + }); +} + +#[test] +fn read_logs() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let mut logs = gen_storage_logs(); + logs.truncate(5); + + let write_metadata = { + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new_lightweight(db); + let metadata = tree.process_block(&logs); + tree.save(); + metadata + }; + + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new_lightweight(db); + let read_logs = logs + .into_iter() + .map(|log| StorageLog::new_read_log(log.storage_log.key, log.storage_log.value)); + let read_metadata = tree.process_block(&convert_logs(read_logs)); + + assert_eq!(read_metadata.root_hash, write_metadata.root_hash); +} + +fn create_write_log( + address: Address, + address_storage_key: [u8; 32], + value: [u8; 32], +) -> WitnessStorageLog { + WitnessStorageLog { + storage_log: StorageLog::new_write_log( + StorageKey::new(AccountTreeId::new(address), H256(address_storage_key)), + H256(value), + ), + previous_value: H256::zero(), + } +} + +fn subtract_from_max_value(diff: u8) -> [u8; 32] { + let mut value = [255_u8; 32]; + value[31] -= diff; + value +} + +#[test] +fn root_hash_compatibility() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new_lightweight(db); + assert_eq!( + tree.root_hash(), + H256([ + 152, 164, 142, 78, 209, 115, 97, 136, 56, 74, 232, 167, 157, 210, 28, 77, 102, 135, + 229, 253, 34, 202, 24, 20, 137, 6, 215, 135, 54, 192, 216, 106 + ]), + ); + + let storage_logs = vec![ + create_write_log(ACCOUNT_CODE_STORAGE_ADDRESS, [0; 32], [1; 32]), + create_write_log( + Address::from_low_u64_be(9223372036854775808), + [254; 32], + subtract_from_max_value(1), + ), + create_write_log( + Address::from_low_u64_be(9223372036854775809), + [253; 32], + subtract_from_max_value(2), + ), + create_write_log( + Address::from_low_u64_be(9223372036854775810), + [252; 32], + subtract_from_max_value(3), + ), + create_write_log( + Address::from_low_u64_be(9223372036854775811), + [251; 32], + subtract_from_max_value(4), + ), + create_write_log( + Address::from_low_u64_be(9223372036854775812), + [250; 32], + subtract_from_max_value(5), + ), + ]; + + let metadata = tree.process_block(&storage_logs); + assert_eq!( + metadata.root_hash, + H256([ + 35, 191, 235, 50, 17, 223, 143, 160, 240, 38, 139, 111, 221, 156, 42, 29, 72, 90, 196, + 198, 72, 13, 219, 88, 59, 250, 94, 112, 221, 3, 44, 171 + ]) + ); +} + +#[test] +fn process_block_idempotency_check() { + let temp_dir = TempDir::new().expect("failed to get temporary directory for RocksDB"); + let rocks_db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new_lightweight(rocks_db); + let logs = gen_storage_logs(); + let tree_metadata = tree.process_block(&logs); + + // Simulate server restart by calling `process_block` again on the same tree + tree.reset(); + let repeated_tree_metadata = tree.process_block(&logs); + assert_eq!(repeated_tree_metadata.root_hash, tree_metadata.root_hash); + assert_eq!( + repeated_tree_metadata.initial_writes, + tree_metadata.initial_writes + ); + assert_eq!( + repeated_tree_metadata.repeated_writes, + tree_metadata.repeated_writes + ); +} + +/// Serializable version of [`StorageLogMetadata`] that hex-encodes byte arrays. +#[serde_as] +#[derive(Debug, PartialEq, Serialize, Deserialize)] +struct StorageLogMetadataSnapshot { + #[serde_as(as = "Hex")] + root_hash: [u8; 32], + is_write: bool, + first_write: bool, + #[serde_as(as = "Vec")] + merkle_paths: Vec<[u8; 32]>, + #[serde_as(as = "Hex")] + leaf_hashed_key: [u8; 32], + leaf_enumeration_index: u64, + #[serde_as(as = "Hex")] + value_written: [u8; 32], + #[serde_as(as = "Hex")] + value_read: [u8; 32], +} + +impl From for StorageLogMetadataSnapshot { + fn from(metadata: StorageLogMetadata) -> Self { + let mut leaf_hashed_key = [0_u8; 32]; + metadata.leaf_hashed_key.to_big_endian(&mut leaf_hashed_key); + + Self { + root_hash: metadata.root_hash, + is_write: metadata.is_write, + first_write: metadata.first_write, + merkle_paths: metadata.merkle_paths, + leaf_hashed_key, + leaf_enumeration_index: metadata.leaf_enumeration_index, + value_written: metadata.value_written, + value_read: metadata.value_read, + } + } +} + +fn load_json(path: &str) -> T { + let file = File::open(path).unwrap_or_else(|err| { + panic!("Cannot open snapshot at `{path}`: {err}"); + }); + serde_json::from_reader(BufReader::new(file)).expect("cannot deserialize snapshot") +} + +// Snapshots are taken from the old tree implementation. +#[test] +fn witness_workflow() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let logs = gen_storage_logs(); + let (first_chunk, _) = logs.split_at(logs.len() / 2); + + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new(db); + let metadata = tree.process_block(first_chunk); + let job = metadata.witness.unwrap(); + assert_eq!(job.next_enumeration_index(), 1); + let merkle_paths: Vec<_> = job.into_merkle_paths().collect(); + + assert!(merkle_paths.iter().all(|log| log.merkle_paths.len() == 256)); + + let mut witnesses: Vec<_> = merkle_paths + .into_iter() + .map(StorageLogMetadataSnapshot::from) + .collect(); + + // The working dir for integration tests is set to the crate dir, so specifying relative paths + // should be OK. + let last_witness: StorageLogMetadataSnapshot = + load_json("./tests/integration/snapshots/log-metadata-full.json"); + assert_eq!(*witnesses.last().unwrap(), last_witness); + + for witness in &mut witnesses { + // Leave only 8 hashes closest to the tree root + witness.merkle_paths = witness.merkle_paths.split_off(248); + } + let truncated_witnesses: Vec = + load_json("./tests/integration/snapshots/log-metadata-list-short.json"); + assert_eq!(witnesses, truncated_witnesses); +} + +#[test] +fn witnesses_with_multiple_blocks() { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let logs = gen_storage_logs(); + + let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); + let mut tree = ZkSyncTree::new(db); + let empty_tree_hashes: Vec<_> = (0..256) + .map(|i| Blake2Hasher.empty_subtree_hash(i)) + .collect(); + + let non_empty_levels_by_block = logs.chunks(10).map(|block| { + let metadata = tree.process_block(block); + let witness = metadata.witness.unwrap(); + + let non_empty_levels = witness.into_merkle_paths().map(|log| { + assert_eq!(log.merkle_paths.len(), 256); + log.merkle_paths + .iter() + .zip(&empty_tree_hashes) + .position(|(hash, empty_hash)| *hash != empty_hash.0) + .unwrap_or(256) + }); + non_empty_levels.collect::>() + }); + let non_empty_levels_by_block: Vec<_> = non_empty_levels_by_block.collect(); + + // For at least some blocks, the non-empty level for the first log can be lower + // than for the following logs (meaning that additional empty subtree hashes + // are included in the Merkle path for the following log). + let has_following_log_with_greater_level = + non_empty_levels_by_block.iter().any(|block_levels| { + block_levels[1..] + .iter() + .any(|&level| level > block_levels[0]) + }); + assert!( + has_following_log_with_greater_level, + "{non_empty_levels_by_block:?}" + ); +} diff --git a/core/lib/merkle_tree2/tests/integration/main.rs b/core/lib/merkle_tree2/tests/integration/main.rs new file mode 100644 index 000000000000..bf3391df6ccf --- /dev/null +++ b/core/lib/merkle_tree2/tests/integration/main.rs @@ -0,0 +1,6 @@ +//! Integration tests for the Merkle tree. + +mod common; +mod consistency; +mod domain; +mod merkle_tree; diff --git a/core/lib/merkle_tree2/tests/integration/merkle_tree.rs b/core/lib/merkle_tree2/tests/integration/merkle_tree.rs new file mode 100644 index 000000000000..37466fcc585d --- /dev/null +++ b/core/lib/merkle_tree2/tests/integration/merkle_tree.rs @@ -0,0 +1,571 @@ +//! Tests not tied to the zksync domain. + +use once_cell::sync::Lazy; +use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; + +use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; +use zksync_merkle_tree2::{ + Database, HashTree, MerkleTree, PatchSet, Patched, TreeInstruction, TreeLogEntry, +}; +use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; +use zksync_utils::u32_to_h256; + +use crate::common::generate_key_value_pairs; + +fn convert_to_writes(kvs: &[(U256, H256)]) -> Vec<(U256, TreeInstruction)> { + let kvs = kvs + .iter() + .map(|&(key, hash)| (key, TreeInstruction::Write(hash))); + kvs.collect() +} + +// The extended version of computations used in `InternalNode`. +fn compute_tree_hash(kvs: &[(U256, H256)]) -> H256 { + assert!(!kvs.is_empty()); + + let hasher = Blake2Hasher; + let mut empty_tree_hash = hasher.hash_bytes([0_u8; 40]); + let level = kvs.iter().enumerate().map(|(i, (key, value))| { + let leaf_index = i as u64 + 1; + let mut bytes = [0_u8; 40]; + bytes[..8].copy_from_slice(&leaf_index.to_be_bytes()); + bytes[8..].copy_from_slice(value.as_ref()); + (*key, hasher.hash_bytes(bytes)) + }); + let mut level: Vec<(U256, H256)> = level.collect(); + level.sort_unstable_by_key(|(key, _)| *key); + + for _ in 0..256 { + let mut next_level = vec![]; + let mut i = 0; + while i < level.len() { + let (pos, hash) = level[i]; + let aggregate_hash = if pos.bit(0) { + // `pos` corresponds to a right branch of its parent + hasher.compress(&empty_tree_hash, &hash) + } else if let Some((next_pos, next_hash)) = level.get(i + 1) { + if pos + 1 == *next_pos { + i += 1; + hasher.compress(&hash, next_hash) + } else { + hasher.compress(&hash, &empty_tree_hash) + } + } else { + hasher.compress(&hash, &empty_tree_hash) + }; + next_level.push((pos >> 1, aggregate_hash)); + i += 1; + } + + level = next_level; + empty_tree_hash = hasher.compress(&empty_tree_hash, &empty_tree_hash); + } + level[0].1 +} + +#[test] +fn compute_tree_hash_works_correctly() { + // Reference value taken from the previous implementation. + const EXPECTED_HASH: H256 = H256([ + 127, 0, 166, 178, 238, 222, 150, 8, 87, 112, 60, 140, 185, 233, 111, 40, 185, 16, 230, 105, + 52, 18, 206, 164, 176, 6, 242, 66, 57, 182, 129, 224, + ]); + + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); + let key = key.hashed_key_u256(); + let hash = compute_tree_hash(&[(key, H256([1; 32]))]); + assert_eq!(hash, EXPECTED_HASH); +} + +#[test] +fn root_hash_is_computed_correctly_on_empty_tree() { + for kv_count in [1, 2, 3, 5, 8, 13, 21, 100] { + println!("Inserting {kv_count} key-value pairs"); + + let database = PatchSet::default(); + let tree = MerkleTree::new(&database); + let kvs = generate_key_value_pairs(0..kv_count); + let expected_hash = compute_tree_hash(&kvs); + let (output, _) = tree.extend(kvs); + assert_eq!(output.root_hash, expected_hash); + } +} + +#[test] +fn proofs_are_computed_correctly_on_empty_tree() { + const RNG_SEED: u64 = 123; + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + let empty_tree_hash = Blake2Hasher.empty_subtree_hash(256); + for kv_count in [1, 2, 3, 5, 8, 13, 21, 100] { + println!("Inserting {kv_count} key-value pairs"); + + let mut database = PatchSet::default(); + let tree = MerkleTree::new(&database); + let kvs = generate_key_value_pairs(0..kv_count); + let expected_hash = compute_tree_hash(&kvs); + let instructions = convert_to_writes(&kvs); + let (output, patch) = tree.extend_with_proofs(instructions.clone()); + database.apply_patch(patch); + + assert_eq!(output.root_hash(), Some(expected_hash)); + assert_eq!(output.logs.len(), instructions.len()); + output.verify_proofs(&Blake2Hasher, empty_tree_hash, &instructions); + let root_hash = output.root_hash().unwrap(); + + let reads = instructions + .iter() + .map(|(key, _)| (*key, TreeInstruction::Read)); + let mut reads: Vec<_> = reads.collect(); + reads.shuffle(&mut rng); + let tree = MerkleTree::new(&database); + let (output, _) = tree.extend_with_proofs(reads.clone()); + output.verify_proofs(&Blake2Hasher, root_hash, &reads); + assert_eq!(output.root_hash(), Some(root_hash)); + } +} + +#[test] +fn proofs_are_computed_correctly_for_mixed_instructions() { + const RNG_SEED: u64 = 123; + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + let mut database = PatchSet::default(); + let tree = MerkleTree::new(&database); + let kvs = generate_key_value_pairs(0..20); + let (output, patch) = tree.extend(kvs.clone()); + database.apply_patch(patch); + let old_root_hash = output.root_hash; + + let reads = kvs.iter().map(|(key, _)| (*key, TreeInstruction::Read)); + let mut instructions: Vec<_> = reads.collect(); + // Overwrite all keys in the tree. + let writes: Vec<_> = kvs.iter().map(|(key, _)| (*key, H256::zero())).collect(); + let expected_hash = compute_tree_hash(&writes); + instructions.extend(convert_to_writes(&writes)); + instructions.shuffle(&mut rng); + + let tree = MerkleTree::new(&database); + let (output, _) = tree.extend_with_proofs(instructions.clone()); + // Check that there are some read ops recorded. + assert!(output + .logs + .iter() + .any(|op| matches!(op.base, TreeLogEntry::Read { .. }))); + + output.verify_proofs(&Blake2Hasher, old_root_hash, &instructions); + assert_eq!(output.root_hash(), Some(expected_hash)); +} + +#[test] +fn proofs_are_computed_correctly_for_missing_keys() { + const RNG_SEED: u64 = 123; + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + let kvs = generate_key_value_pairs(0..20); + let mut instructions = convert_to_writes(&kvs); + let missing_reads = generate_key_value_pairs(20..50) + .into_iter() + .map(|(key, _)| (key, TreeInstruction::Read)); + instructions.extend(missing_reads); + instructions.shuffle(&mut rng); + + let database = PatchSet::default(); + let tree = MerkleTree::new(&database); + let (output, _) = tree.extend_with_proofs(instructions.clone()); + let read_misses = output + .logs + .iter() + .filter(|op| matches!(op.base, TreeLogEntry::ReadMissingKey)); + assert_eq!(read_misses.count(), 30); + let empty_tree_hash = Blake2Hasher.empty_subtree_hash(256); + output.verify_proofs(&Blake2Hasher, empty_tree_hash, &instructions); +} + +// Computing the expected hash takes some time in the debug mode, so we memoize it. +static KVS_AND_HASH: Lazy<(Vec<(U256, H256)>, H256)> = Lazy::new(|| { + let kvs = generate_key_value_pairs(0..100); + let expected_hash = compute_tree_hash(&kvs); + (kvs, expected_hash) +}); + +fn test_intermediate_commits(mut db: impl Database, chunk_size: usize) { + let (kvs, expected_hash) = &*KVS_AND_HASH; + let mut final_hash = H256::zero(); + for chunk in kvs.chunks(chunk_size) { + let tree = MerkleTree::new(&db); + let (output, patch) = tree.extend(chunk.to_vec()); + db.apply_patch(patch); + final_hash = output.root_hash; + } + assert_eq!(final_hash, *expected_hash); + + let tree = MerkleTree::new(&db); + let latest_version = tree.latest_version().unwrap(); + for version in 0..=latest_version { + tree.verify_consistency(version).unwrap(); + } +} + +#[test] +fn root_hash_is_computed_correctly_with_intermediate_commits() { + for chunk_size in [3, 5, 10, 17, 28, 42] { + println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); + test_intermediate_commits(PatchSet::default(), chunk_size); + } +} + +#[test] +fn proofs_are_computed_correctly_with_intermediate_commits() { + let (kvs, expected_hash) = &*KVS_AND_HASH; + for chunk_size in [3, 5, 10, 17, 28, 42] { + println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); + + let mut db = PatchSet::default(); + let mut root_hash = Blake2Hasher.empty_subtree_hash(256); + for chunk in kvs.chunks(chunk_size) { + let instructions = convert_to_writes(chunk); + let tree = MerkleTree::new(&db); + let (output, patch) = tree.extend_with_proofs(instructions.clone()); + db.apply_patch(patch); + output.verify_proofs(&Blake2Hasher, root_hash, &instructions); + root_hash = output.root_hash().unwrap(); + } + assert_eq!(root_hash, *expected_hash); + } +} + +fn test_accumulated_commits(db: DB, chunk_size: usize) -> DB { + let (kvs, expected_hash) = &*KVS_AND_HASH; + let mut db = Patched::new(db); + let mut final_hash = H256::zero(); + for chunk in kvs.chunks(chunk_size) { + let tree = MerkleTree::new(&db); + let (output, patch) = tree.extend(chunk.to_vec()); + db.apply_patch(patch); + final_hash = output.root_hash; + } + assert_eq!(final_hash, *expected_hash); + + db.flush(); + let db = db.into_inner(); + let tree = MerkleTree::new(&db); + let latest_version = tree.latest_version().unwrap(); + for version in 0..=latest_version { + tree.verify_consistency(version).unwrap(); + } + db +} + +#[test] +fn accumulating_commits() { + for chunk_size in [3, 5, 10, 17, 28, 42] { + println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); + test_accumulated_commits(PatchSet::default(), chunk_size); + } +} + +fn test_root_hash_computing_with_reverts(db: &mut impl Database) { + let (kvs, expected_hash) = &*KVS_AND_HASH; + let (initial_update, final_update) = kvs.split_at(75); + let key_updates: Vec<_> = kvs.iter().map(|(key, _)| (*key, H256([255; 32]))).collect(); + let key_inserts = generate_key_value_pairs(100..200); + + let tree = MerkleTree::new(&*db); + let (initial_output, patch) = tree.extend(initial_update.to_vec()); + db.apply_patch(patch); + + // Try rolling back one block at a time. + let reverted_updates = key_updates.chunks(25).chain(key_inserts.chunks(25)); + for reverted_update in reverted_updates { + let tree = MerkleTree::new(&*db); + let (reverted_output, patch) = tree.extend(reverted_update.to_vec()); + db.apply_patch(patch); + assert_ne!(reverted_output, initial_output); + + let patch = MerkleTree::new(&*db).truncate_versions(1).unwrap(); + db.apply_patch(patch); + let tree = MerkleTree::new(&*db); + assert_eq!(tree.latest_version(), Some(0)); + assert_eq!(tree.root_hash(0), Some(initial_output.root_hash)); + + let (final_output, patch) = tree.extend(final_update.to_vec()); + db.apply_patch(patch); + assert_eq!(final_output.root_hash, *expected_hash); + let tree = MerkleTree::new(&*db); + assert_eq!(tree.latest_version(), Some(1)); + assert_eq!(tree.root_hash(0), Some(initial_output.root_hash)); + assert_eq!(tree.root_hash(1), Some(final_output.root_hash)); + + let patch = tree.truncate_versions(1).unwrap(); + db.apply_patch(patch); + } +} + +#[test] +fn root_hash_is_computed_correctly_with_reverts() { + test_root_hash_computing_with_reverts(&mut PatchSet::default()); +} + +fn test_root_hash_computing_with_key_updates(mut db: impl Database) { + const RNG_SEED: u64 = 42; + const P_SCALE: usize = 1_000; + // ^ Scaling factor for probabilities (to avoid floating-point conversions) + + let mut kvs = generate_key_value_pairs(0..50); + let tree = MerkleTree::new(&db); + let expected_hash = compute_tree_hash(&kvs); + let (output, patch) = tree.extend(kvs.clone()); + assert_eq!(output.root_hash, expected_hash); + + db.apply_patch(patch); + + // Overwrite some `kvs` entries and add some new ones. + let changed_kvs = kvs.iter_mut().enumerate().filter_map(|(i, kv)| { + if i % 3 == 1 { + kv.1 = u32_to_h256((i + 100) as u32); + return Some(*kv); + } + None + }); + let changed_kvs: Vec<_> = changed_kvs.collect(); + let new_kvs = generate_key_value_pairs(50..75); + kvs.extend_from_slice(&new_kvs); + let expected_hash = compute_tree_hash(&kvs); + + // We can merge `changed_kvs` and `new_kvs` in any way that preserves `new_kvs` ordering. + // We'll do multiple ways (which also will effectively test DB rollbacks). + + // All changed KVs, then all new KVs. + let mut update = Vec::with_capacity(changed_kvs.len() + new_kvs.len()); + update.extend_from_slice(&changed_kvs); + update.extend_from_slice(&new_kvs); + let tree = MerkleTree::new(&db); + let (output, _) = tree.extend(update.clone()); + assert_eq!(output.root_hash, expected_hash); + + // All changed KVs (randomly shuffled), then all new KVs. + let mut rng = StdRng::seed_from_u64(RNG_SEED); + update[..changed_kvs.len()].shuffle(&mut rng); + let tree = MerkleTree::new(&db); + let (output, _) = tree.extend(update); + assert_eq!(output.root_hash, expected_hash); + + // All new KVs, then all changed KVs. + let mut update = Vec::with_capacity(changed_kvs.len() + new_kvs.len()); + update.extend_from_slice(&new_kvs); + update.extend_from_slice(&changed_kvs); + let tree = MerkleTree::new(&db); + let (output, _) = tree.extend(update); + assert_eq!(output.root_hash, expected_hash); + + // New KVs and changed KVs randomly spliced. + let mut update = Vec::with_capacity(changed_kvs.len() + new_kvs.len()); + let changed_p = changed_kvs.len() * P_SCALE / (changed_kvs.len() + new_kvs.len()); + let mut changed_kvs = changed_kvs.into_iter(); + let mut new_kvs = new_kvs.into_iter(); + for _ in 0..(changed_kvs.len() + new_kvs.len()) { + // We can run out of elements in one of the iterators, but we don't really care. + if rng.gen_range(0..P_SCALE) <= changed_p { + update.extend(changed_kvs.next()); + } else { + update.extend(new_kvs.next()); + } + } + update.extend(changed_kvs.chain(new_kvs)); + + let tree = MerkleTree::new(&db); + let (output, _) = tree.extend(update); + assert_eq!(output.root_hash, expected_hash); +} + +#[test] +fn root_hash_is_computed_correctly_with_key_updates() { + test_root_hash_computing_with_key_updates(PatchSet::default()); +} + +#[test] +fn proofs_are_computed_correctly_with_key_updates() { + const RNG_SEED: u64 = 1_234; + + let (kvs, expected_hash) = &*KVS_AND_HASH; + let mut rng = StdRng::seed_from_u64(RNG_SEED); + + for updated_keys in [5, 10, 17, 28, 42] { + println!("Inserting 100 key-value pairs with {updated_keys} updates"); + + let old_instructions: Vec<_> = kvs[..updated_keys] + .iter() + .map(|(key, _)| (*key, TreeInstruction::Write(H256([255; 32])))) + .collect(); + // Move the updated keys to the random places in the `kvs` vector. + let mut writes = convert_to_writes(kvs); + let mut instructions = writes.split_off(updated_keys); + for updated_kv in writes { + let idx = rng.gen_range(0..=instructions.len()); + instructions.insert(idx, updated_kv); + } + + let mut db = PatchSet::default(); + let tree = MerkleTree::new(&db); + let (output, patch) = tree.extend_with_proofs(old_instructions.clone()); + db.apply_patch(patch); + let empty_tree_hash = Blake2Hasher.empty_subtree_hash(256); + output.verify_proofs(&Blake2Hasher, empty_tree_hash, &old_instructions); + + let root_hash = output.root_hash().unwrap(); + let tree = MerkleTree::new(&db); + let (output, _) = tree.extend_with_proofs(instructions.clone()); + assert_eq!(output.root_hash(), Some(*expected_hash)); + output.verify_proofs(&Blake2Hasher, root_hash, &instructions); + } +} + +// Taken from the integration tests for the previous tree implementation. +fn test_root_hash_equals_to_previous_implementation(mut db: impl Database) { + const PREV_IMPL_HASH: H256 = H256([ + 125, 25, 107, 171, 182, 155, 32, 70, 138, 108, 238, 150, 140, 205, 193, 39, 90, 92, 122, + 233, 118, 238, 248, 201, 160, 55, 58, 206, 244, 216, 188, 10, + ]); + + let addrs = [ + "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2", + "ef4bb7b21c5fe7432a7d63876cc59ecc23b46636", + "89b8988a018f5348f52eeac77155a793adf03ecc", + "782806db027c08d36b2bed376b4271d1237626b3", + "b2b57b76717ee02ae1327cc3cf1f40e76f692311", + ] + .into_iter() + .map(|s| s.parse::
().unwrap()); + + let keys = addrs.flat_map(|addr| { + (0..20).map(move |i| { + StorageKey::new(AccountTreeId::new(addr), u32_to_h256(i)).hashed_key_u256() + }) + }); + let values = (0..100).map(u32_to_h256); + let kvs: Vec<_> = keys.zip(values).collect(); + + let expected_hash = compute_tree_hash(&kvs); + assert_eq!(expected_hash, PREV_IMPL_HASH); + + let tree = MerkleTree::new(&db); + assert!(tree.latest_version().is_none()); + let (output, patch) = tree.extend(kvs); + assert_eq!(output.root_hash, PREV_IMPL_HASH); + + db.apply_patch(patch); + let tree = MerkleTree::new(&db); + assert_eq!(tree.latest_version(), Some(0)); + assert_eq!(tree.root_hash(0), Some(PREV_IMPL_HASH)); +} + +#[test] +fn root_hash_equals_to_previous_implementation() { + test_root_hash_equals_to_previous_implementation(PatchSet::default()); +} + +/// RocksDB-specific tests. +mod rocksdb { + use tempfile::TempDir; + + use super::*; + use zksync_merkle_tree2::RocksDBWrapper; + use zksync_storage::db; + + #[derive(Debug)] + struct Harness { + db: RocksDBWrapper, + dir: TempDir, + } + + impl Harness { + fn new() -> Self { + let dir = TempDir::new().expect("failed creating temporary dir for RocksDB"); + let db = RocksDBWrapper::new(&dir); + Self { db, dir } + } + } + + #[test] + fn root_hash_equals_to_previous_implementation() { + let harness = Harness::new(); + test_root_hash_equals_to_previous_implementation(harness.db); + } + + #[test] + fn root_hash_is_computed_correctly_with_key_updates() { + let harness = Harness::new(); + test_root_hash_computing_with_key_updates(harness.db); + } + + #[test] + fn root_hash_is_computed_correctly_with_intermediate_commits() { + let Harness { mut db, dir } = Harness::new(); + for chunk_size in [3, 8, 21] { + if let Some(patch) = MerkleTree::new(&db).truncate_versions(0) { + db.apply_patch(patch); + } + + test_intermediate_commits(db, chunk_size); + db = RocksDBWrapper::new(&dir); + // ^ We overwrite DB data by using the same `version` on each iteration, + // meaning we can use a single RocksDB instance for all of them without clearing it. + } + } + + #[test] + fn root_hash_is_computed_correctly_with_reverts() { + let Harness { mut db, dir: _dir } = Harness::new(); + test_root_hash_computing_with_reverts(&mut db); + + let tree = MerkleTree::new(&db); + assert_eq!(tree.latest_version(), Some(0)); + let (_, patch) = tree.extend(vec![]); + db.apply_patch(patch); + // Check that reverted data is not present in the database. + let raw_db = db.into_inner(); + let cf = raw_db.cf_merkle_tree_handle(db::MerkleTreeColumnFamily::Tree); + let latest_kvs: Vec<_> = raw_db + .prefix_iterator_cf(cf, [0, 0, 0, 0, 0, 0, 0, 1]) + .collect(); + // Only the root node should be present. + assert_eq!(latest_kvs.len(), 1, "{latest_kvs:?}"); + } + + #[test] + fn accumulating_commits() { + let Harness { mut db, dir: _dir } = Harness::new(); + for chunk_size in [3, 5, 10, 17, 28, 42] { + println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); + db = test_accumulated_commits(db, chunk_size); + let patch = MerkleTree::new(&db).truncate_versions(0).unwrap(); + db.apply_patch(patch); + } + } + + #[test] + #[should_panic(expected = "Mismatch between the provided tree hasher `no_op256`")] + fn tree_tags_mismatch() { + let Harness { mut db, dir: _dir } = Harness::new(); + let tree = MerkleTree::new(&db); + let (_, patch) = tree.extend(vec![(U256::zero(), H256::zero())]); + db.apply_patch(patch); + + MerkleTree::with_hasher(&db, &()); + } + + #[test] + #[should_panic(expected = "Mismatch between the provided tree hasher `no_op256`")] + fn tree_tags_mismatch_with_cold_restart() { + let Harness { mut db, dir } = Harness::new(); + let tree = MerkleTree::new(&db); + let (_, patch) = tree.extend(vec![(U256::zero(), H256::zero())]); + db.apply_patch(patch); + drop(db); + + let db = RocksDBWrapper::new(dir); + MerkleTree::with_hasher(&db, &()); + } +} diff --git a/core/lib/merkle_tree2/tests/integration/snapshots/log-metadata-full.json b/core/lib/merkle_tree2/tests/integration/snapshots/log-metadata-full.json new file mode 100644 index 000000000000..5df6847551c7 --- /dev/null +++ b/core/lib/merkle_tree2/tests/integration/snapshots/log-metadata-full.json @@ -0,0 +1,267 @@ +{ + "root_hash": "9dfbdaad554b5816807c1e1c051c3ad8e40b4f4d7695bdcc9c534a26c4e584c9", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "94bb15542026f4f607416f019dffe21bb39bbb32cc92085ab615660a6b5fbef4", + "7952661ab5d63534c5ea72f81887d8dd6bf514b14c8e9fb714b6feb02efb96a0", + "3d75808db532e9685bcc7969ad0f5f0872086b24e02b28cdc7df6e3cc1bd2371", + "29463426092df4c7af14bff4977d825e35d93d5b2c7555997ae0bc5da503b1b3", + "55bdf8e79ad5207ea317d92c4009cea29fe9bb66e0d6cfd5b50cfe202bb7c17b", + "7b269aa0a2836697c2a36aa7e91d61a4e8f3eec4fddda4f0e28f2c00d895f2d9", + "e7d36ccea1ac417362907ff9752ae7beaf3380bc77c554f4c0189915c6e2f156", + "d2d9e3c2060d41b9ef514403cbdf1ed473acf7d6b10518ab0ae119ac3813ad64", + "769e4e5aa4e1968762b761d7213bc1ec97486a83b6d9d10a821dbed37e619ab1", + "01081746734e83cb0afcc9e884dc71e2dc893b74190ae67233b10a333818aaba", + "dcc6d61671642128e20c498ad0ea7e5f02178699a2a3fa4b6d341f78ec1b6f27", + "90aa631a0a952345a6c37faf9f9615bcb79bf3c3e482fb90e7e2943682e95cd6", + "351c803e4a5f9ccdb38e88c63a7f1e6d419d7784da4da58a4ddbba22bb947b88", + "6ff95284d7a2af72fa9bfaf68175bfc726d44d6de42cd55e0956cd3c5a54e789", + "3305aeb5575f473f2bc6561a8a2aa6699de7a9bc0dd1db31843577ede186d025", + "b52a7e7f9609d0f7d208055602c4ccca8436034dcabe8f2dff33148f3beb170e", + "3b21dff05c1fd42478a06e331e850399ab3bcac1ca96f778b11e4f9195c5f3eb", + "5ff91c6907e8ec45ac5dcbc8614d6bd6bc4e87b9612615e4a11492260ace6861", + "f3ca9084d4a47253a68d42ea65f1ff30e14dd9cde00196702f72812934a9c807", + "5371c89c3ba241f45231cbbb2fd462b0db55f8c6385c795a538f704f5b25a8d0", + "070e2ea800ceefac5daa980cb1250c7869deb40128d5d009a7ad762b9598f162", + "5952f729ded9dee4a4ef3c5d9598e2ad950293d8061fa1e53adcf4117c09f9c9", + "15fe4d78d71575df893b49acc33d9113557af357fe3888749b0a210fcf8761ec", + "3d07e28efe5e4e7a7e2f3c557f280f5b9ba24f584fee580357611b9f9365cbe5", + "890017f43434b2a4b995a376e455eb4d6692bc1a9a9bd995f011fdc864dc0ac1", + "2bb086a86d75b83950f62dcbd2be938574b7c7721284009e5a481a0085493d46", + "f486448dfd67d7b5ff80d2ee7df1d05eb6845f4b1bd7e59a2950b048740380ce", + "93d401e5c105a345c1090360796ffedc81377468ded6e3b8c5d0f6e0bc9bd232", + "bd59a52d343e78ea0917b3666f048c251c567d4757a7ed6920aee1a0494f34d4", + "b410a350abfe2bff5cfe602dfd4f62f5c178f6fd7905a48a5d57016e748464d2", + "9bf2f7c5ff739af9dcb04ff3f4ddb2daaa2ded109fc37224abbf097e79db883a", + "2b2b011623060371c1287ac5d0b972ea0acc3e0625479c62f58b1dae9a4e4f0e", + "16fddc72c53f0168b0bc75315f71955b5a616dd9e976dc1d70857ef851bcf87f", + "4f6ddfa4e7a78272d476c78a8e20d75e67b34817fea9699e157a4327999aeabd", + "8a48da19397975fa48f382dadf0db0543f8a1429890791f8edf9d85701cc090c", + "efcc3ad578210006391a5bdcc14416df8c665ef18a34a6fbee768c81366170e2", + "dfc2706b3b0046eb44b04dd01f9c68396c69566bd878ef15fa1602af6d0c3271", + "e575e6bcc863d5a637946f9499aa671c1a026553ff44eaeb6bce593ad7be9509", + "fb885db102e10d3c885c8b9ec215c1d3f54414aba4a3725387bd27ca4a3b5788", + "307bad1feb84aacf221f357bd44fd8424dcd9ad91be7b7c75c2c2688dd09a2dc", + "26faa4b413c6184fe577e5c181cc658ab8a6b794497220eb9e870a53cf9c8616", + "97dac09f6227af3c8fa4b84c8ac7f4ca78d0e5a5acd81fa5419263a0b770e7b0", + "ed615f12b68ea966366a3d9cfaae12560ba721dde64ef42db4e3c28eebe9e7dc", + "46fb536a51cb284ce7220e10c61fb7a9581dce2cf33678082b7ccffeb078e86f", + "65be4a061edada9664ed7eabcbb82306dd1dc4d257215dee2cff096683d7ee31", + "553c120a031f25e775c510e9204b85b2d744ac64f6890c0e372f98ae68111b5d", + "913523e30ee062c2f014307dbb77442447cdc6384556ce5385ce30ae99fe3f1d", + "5968fcade071909e6b28fe49443318733f443cfc997908d18cad658f51d563aa", + "0063d4b6834eb27132e3db7dbca5271a1c1be3ec0278e1476238f9567bd2c923", + "27bff7a116ef02256b937f0d6e189954fcda55ebdbd0311add6710f24587acef", + "3d030a55a63d3686af9d9206fc11e989bb754d091e31def3b766a99926d7cc0f", + "bcac725d366e063158fec8b765ef34dbfe4661a16a024d068b7e8b8c69c969e2", + "667b4cc062dbb7ea73ce2d3765852659ba0d9e50feae1c8f0e03d90c63343fcc", + "7d7b6e62b251d75925ae34a3064b6619b1e92bc0d1b08a578bbb69841f101bd0", + "62402e1ce7d21d56d7ab8acca37aef7210e13b48f7ee4e418d4b738a3ea7ed22", + "83ff1784605dce2361c0feb2bd4e19825cf31f381832b10d19aff4b7a505a137", + "a1791b1f9a05aebc8712ca3318786e23245322f1a7a9c6a8a19c456b0d5065d6", + "751b8b069eaa1f8fec4ff9e0f88b85de603f9412e043446d9a36cf5bfccfa386", + "8a06e0ee48a723bcc78357aeb142fbd67253990fff5285233dfa1a9596a2a1af", + "52f8a45813f1cbfa0f1d666813acc882dfa8bdd659ed3921504d3832c55235c2", + "f377ceca19baadcbb37e12850efab50729c0a4398aaa904c36606f8de7475ab6", + "511e6d6a77e31301e6612b256f21e2e6260d825a75795a537b71500966d929db", + "be3ffa66a40a99ac70a97f9548e8dd728d30e7c8dff5fc914408324343e0f08d", + "607319f7dd524f9f0a2be78b462620c8760962b79f957e2218ad41725ee89583", + "58fda4e0001cbc69d76f7607af089bef317f4bfa318f283bb11e9b345e1bdfc9", + "6c98a54803f13fd7476030a83df64f2c1d4bf7b755219ffb738b58cce3061bda", + "403c1833bcfc771436f27308f2513b2c06c42209f263b5c1c2cb25e050173aea", + "5fa220b7bb9dfc2d0fa7f744c2b4b71e8f1133350ddfc2c7fa0c39a2534caba4", + "ac896816663c28007c11f8dec98e39861f9cfd40061292e311ed89aae4426db0", + "c76d7cdea8ad8572209136bc24ee3f2b3c704e179faefc961e2a2c66b0976771", + "5782ce904f9bbf33717f9911a788f29dd82643c16e92cd1b301112b9d2d8c341", + "849a22938ab76a28353d6ff6682777b19b345f48f292868015f4484f1ece8bfc", + "ea4416c46e8e978e4d24bf68f1bbc36a7c6a24e937d67128061ecdd0efc01320", + "abda90ce54b015a1919b04fc7385313a93d650e01abc97d4ea2939a4a8abf357", + "458b081dc3dcffd435d8880848adad8f0f2ac329ad64215763e0a3086228db5a", + "7b45ad842b671d75792baf30a75e45bc97f9b6dc37f86d1e6b30c0902d1bd24e", + "fe02e60c0135882add8584bef8b28564cceb75a3c7602dd00fcd95d73e26d487", + "2e40946b9ba6a5cdbeb090bbdd67917de698da5551ccf5d9fbe3a17abe14368b", + "ba45e0f142fa48a7ab346c2dc38450a03b0e8e606f1032218b368e4466c6cda7", + "b2b4f4707aa7b1518159e7928ae8ee061e200a31eda791a6dedce41745a4ac78", + "94ae11cc3b8f36815124a4b0a5e7334fbcb9fce317bb1390e421334bd6cf6fd9", + "9ebbff604be1d295ff131fc099d9dcc073d050fa52c8d5e48f76df65d2645281", + "5e4a188a19954a007601c5864cba12194e52e1e6fb099068d4ef70e94c5d34a6", + "bc3f4808d7fbfcc52be674774332cddcbfc478d038fecc181566a051e636e9a0", + "5ae26f7f062461e4ad9150ba7090b06ccf15e88f780d880900909c0e6651ef0c", + "1ae48f07b56715ca2f319da11bb21e386a58350a725db6ff1a6a0632246130ae", + "a32add3446a75a8aaa1407817fc03fce86dcbb2488ccc52fd1187be7235e480c", + "66bda0fdd030f223d61b3fe7b17edafcfb0a4b866aa65272eedc3939dfbc4df2", + "d13c036805f47f6d024e6864983ea60b99ad6a06ea66b347d87d164c3099456b", + "e87e8e29e310a425b5ee5d75bd0e85fae12987660bbcdb36420831aa877a0240", + "369913efa08788a5b6e658878ba7f4c435f2d74a24d4575616d8f398c4279d30", + "c1fc63a024a455c3b92631e1e2be341a1671c1c69f57f746397002f8e8fd7844", + "6c245e27871a203f13f4dfa0965350aaa954ad29085223d481f077d0b54cfb25", + "b5a4e34bdf9f72712ca3f129a728dd026268647f96806263768d9c327fde0edf", + "1155662f3ba0306ca84578c4d5f0e033e2f32dec05fdf3260dcc5392feee0aef", + "5cee98caad5c7c7b2944d8e6eb191d5be6e58ea21484ec484dd9e60322e60e19", + "727ba92c1966ab8d657ef2cdc02ae30e95fccc8877d529b49c308de9d2f2da62", + "88b03a75bde9d0116ef5451bfc3a39695856b188ddfefa0e4eafb3ee554a269b", + "37e159bb27c8ba678da8a85626c732e666c067e515149afd90954b600a5cac4f", + "3046f54f449775c20052e9a9ac10c9ceb6913a0e19a887d02c423bf30a1ade45", + "2e08022b2967de107f9a1bc230904ced7ab67554257bce6e9ea479a99b0c4e95", + "db4883208ab757afe85f3fb2297cb7305c3d01bcaaccf8d956b8139411244c18", + "2a39fae44e6a832a8fc09ce44c99a6fbcc4189e018e54771d3b230142a915268", + "662ead16353a65f7cafb0363528e7f9776d83628a23ad1559ea8cbeeffee562e", + "34166debf87062b5154a9ec9332f52062c2adf3864d482134c8c6a9818170f7c", + "e44b29e69468bc5f7589c2c2ecf56db5aaefaa38ade46a180c5b99a0515fd86c", + "22385e3ecaf622f2f812d9f096ad752185ee8b8c54f25b4263fa783688817592", + "ec5ee649cb1f99c3cbd2b11ad341b9c0ebb41f26c9046e9fbfc53e890500d54d", + "37a16cf3dd5c6e3d89c541c617a0d0f77d687ccfab1b76b5104ed8f531242b15", + "d7f3ad3ff135f3d8411498a0b3755e03af7b60469b76b35eec9b480cf0127c3e", + "10d6d96785d486e4d96761778700177536372e04a32e1b707eb3e644698c5849", + "f80ea428d7e07005a3f297625ae8a14653f9afd1be5d7275400cfa8d1b67abf5", + "a9383b95636efd30b1f6ff709eea46efe10a76d3e3b3ea5dcc9aced69fb67827", + "828d31fab8708ebe604d60aaad6e8befbf8f7233fabe2af7001ba80851474486", + "fae85a1cdd55662e0cf9c12e19c874b932e1dbd4e334e8b98ca44a7920a3ba29", + "c3bf7e2d44787cb559519141a4a1df6000be4018856188bd606ab5b12e4b0c18", + "e4919631d0f624b9cd70f43f201eb6c7a8a3629e03df57265bae1f7cc07abd38", + "b6af8e47dcff8068508cba35fd2a67be41537c4b800a438ebc7c329dd44b7d7d", + "563627ce3e3d74e5d12beca851b89a6a57b488c89fdd5b6c803de3d07256c091", + "05bb69c64a48298baff04b70f19898ead2a9a22d08d8a37790b0b3e30e726319", + "36d70ddbce6a61dedfe197d77c291be0a8cdfbb849943d6fe9e0187fbca0baeb", + "e17b321345876464ca81d19236912ab467e0bc6671391bc88323fb3edf5ea1bd", + "141d36918359d4a3b785e241ef98807dbd984e00dad24f2781dc87a79ae0171b", + "ad842069a2226ac8a4af5fe449bbda223449199d979441a0353ae4a04feb3d52", + "73cc944dbb843eee36b0cbdb25b268b9940390112d6c5867b12a14fb55cfd38b", + "db20ab3ad2ae2245b1859768404c69140969271808da30e399510d30a68209a6", + "898e46d51e2cd328c3fdd68ab59b3d1251496c151596973056245caf2394a2dc", + "c2b50393323af4cc3fdba0be867f76d500e77eccd44c9af7d50fd53a51256f37", + "44feae9f59306da5f3415743c1fa74177ec087a83bf7d9e27e357a5c5101d800", + "99c73ac1b891357d930d40541aad7c32ee01e7506c25a659a2f6eb1091b15393", + "a3ab34f6a0e72911c75d2ebedc0a2110a0f64bdd986c66a5e1ef974fcdec7b22", + "846997cb70d3df079d34c15a0328c07ee0c22cf575c027c240064cefa10aaf4b", + "a766e76df91de995c6d112d65fff3685e863e45786da9f4c5808e1eb6dc2ffc8", + "dbfaad393b49f469cdf4e94fa08dc39ce1b6500ef215078237d303e6cb294b59", + "c62306bac17e5638da4c463336929f6631d641a077f2f169ae10587974fd0b90", + "879df3f243797ef924d4e4430443d1389256a085726fd63ffb2118ab78da8a96", + "95afd8d6299c48500ecd68787b6482cede1d86db5062be284476c962f2432cb1", + "152e243edb060a45e36db5f9a7b441a0816e8cd5dedbdc33eed6a0e7ffade54e", + "652dd32861ca3df609f85071ad2cb2a1e29c8771cd2424df015f3ac257305783", + "d9d166fa543545352f634e4b64f3f406390c45db8b71cebc9b34378eb8447c13", + "4f7e78304c85155bd0d0286705e9f4d9ab0dbba6be657150bace0a7b85ca8d66", + "b56571368d19519fb46dd228bf726a46d80d554bf21077c0c247017be1172a5f", + "6f17b1fae5d675ed9bf1bc56cc1df69e69af7afaad7af59e35f20c78e2ea9d7c", + "62c531c949c6540629a0c8dfc7e3e8c101d703ba4245ee0c7bbe4dd8ea8a3c21", + "1fe1091af9dc87a072c1c6022e3d81b7837d91e3cfbb9325bed1a6a0d47427af", + "55a4f533d6dac9a11cd491c152c384babde2a8a9b26a33d6a986bf46643eb3cb", + "12485a190b3d5d04f7f1fdafa1e93a215b5d58e248ce50c7bb99f78630ac5e1f", + "955660ba9db3d36aba12c604c70beca63a40c71be7c136ccaec702740181c4df", + "a34f1f178ecf9f00bcac16498fa5afadba54394e7d034b0a754e8cbf23686bce", + "cc53b87b50a5e102d2e1c366040f15ae6d9fb918bfae1fe9b93b1ffba789f4d1", + "8828f94f4f8c8f04e0a49df2d0f65f18adec69084de7c074dfcfac9a181916fe", + "2a29dfb789019e2a7cb8954624561e28db729fd7f96f1a9e3d80cddb9a3419b9", + "58352c98ae7f81539d08c0faa55688dd2c4bf1dbbd4c1f9bf097577136853b0e", + "8a974664ccdf5bf982e35fa4a807eb66150c171274d54f47b85ac4dc9549d225", + "3864bb3bf364ad835f35774bcbff61fb1a2c414bc98af4a624fc4173b8828f82", + "b2c80f33312f266cbf386bfdf82a74e71c8e4b50e30828a59df7ca1c6c28ac5b", + "050f9c2e1144bd28e4f09c8de6a5872f625aeb41c12faf0dabf09265018dc4ad", + "c3d65393ed3a849346ea8b0e24c60f2cea676bf4735ffaa1363f97c28dc6e50e", + "5b447234b328dcad7e1ab69312579b43251820db248947f4b99643368ec37338", + "651b4804d24b95de3b375dc8bb20e90a9f7eff785da8d84ab79cf04342f45708", + "adc43c534046af6c70f1c7c366e7bde9cf417fd62516cf274d4accf75273d1d6", + "f279cfb23404b9050675c8e0c8050d98c03f47260d9c37e75b4ad96fca8fbdd4", + "f07f2da83753db9fd6fa4514ebe43e82a7b6a3fe4955f51ac170bb8108a7434a", + "e643d4e12c499e47021c30ee2940689435768e9c2f62c0cec60ca4f83e9892c0", + "8b10fdccf3a4ae8c8f955e4bb6e96bf7e83aa1747faf0f7d1b3f15e3ca299f2b", + "05f218091c86c01ac8550c1aedd16b7d93221a3cfa8e9f14a5bbe9938d7543ff", + "692f3b9ecf7682df49f47a5e4ab30e7e7a623650bbd5cf3fe515b380d606ab01", + "f3c455276c2bb31e2024e0df0a2895d286918659a28136d9d52ed47254b9531c", + "ddbbeb7da840fb7cce492c77c234471b2c4756cfced86527ddbadb35ee378353", + "90156419b5feceafd068406226dd91afb8cf2eb435f1a6a18e54c57efad9b8de", + "9cd4125af6be61fe5e2aa36e0515879d39cb9cb73c0b40fcf8502775136ca05d", + "0a253041b99ce28b86dde27b93187c6cdae856956bcf108f6f8ca7de76202d72", + "15dd747feda5303671a00ac081129033adbde666436e35250b05812ce7225bca", + "c02ae0a01e3069647714226a4d6ffe0f416698bcc4fd4c3f0c1bc0391519d26e", + "e12befa530480449b164522e1f449e36a845132d2fae947fad6c3cd798b93c3b", + "d9ad7840a0b04c36f0639f93d97666c08df9713be41e315bd6341eff2460dc5c", + "eadaa256f8de320fa2ec43bd99c410d190733b30297b50e36b5080345bf288d2", + "2888a5034a5782cb1de792c5cab2ea8c95ec393694895929add4aa2d55d25d61", + "95085d567f1c0ab86f7d146d697f390d4f77610177e8a17e016a0efa4da6d1be", + "0c6a2909792a08f0c17d3017d375ad664e2567bdf7935f7dfa03b0aee0dda7d5", + "1a9b629afbf4b183e5011786029348abb367ab062456acec116cd6ff210df9ed", + "12ae4581b7af256b70c040b4329b12214b114e5c7b5c00d48c5e240c0549e7ac", + "eb867ea531c8fa0ee36cad6fff1bed461950b5d6ebed07c5adab76929e591299", + "8212e0d37184b7fd6e4369a638f99e30af798a9c5d4ccfbde51885e042473b1f", + "7ef9e666413b6e346ac0a5e9d80ef1612d6edd04ef14629564faab10b296a72d", + "487d10feda54c78ef4c60f09d1ce4460b8557e2cc86bf1a9d12b63425d98a73f", + "c268b135329d80e827c3988d766e327a14ce181ac633a3622ce86eb9c313c10b", + "cc026cc79a65875f9074a4449426d47ca79a6fdc4697fa9617af1904402de30c", + "919a6ed5d2eb768d312b3cf64f6e1c91235b48c329f41fc91f32fa03d9bda552", + "ed06115c713cd6fe2ad4cb59701453c53dcb6c8841ab725230bbe1cb55d5ffc2", + "efe11a46f29433a69259289b36663e9a2ac2dccfd959339c39894331f2fe953b", + "8f42939d0de45b67b6912e60a2f1ec7eb2b5ee06ee7df22bd74f149b8573daf6", + "efcce2f150331e37a9e8362d2969c0bff63633b0ef9fdbbf466404db2749e3c6", + "c136b465b68f78a83db6d05f639bb37a45fff5567d91b954ec6d0be266c77d49", + "50467238efe4b40ba2245059360401100f78a799696815c7144aa01defaa5ba4", + "3a07dfbba08ddb467e7f55361ed63fd02daa847db2596f199338b9b3223e45e2", + "6f849eb126cb1dc87eb588c34fd482eb8e861395c992b0730b88df792f261b8a", + "b4a82f98b96b64aa4fbdf9ab26db424f9facda8fc11bbbc6195028feb2556743", + "98c3f4dafca9946ddd7fdaf52888222f3455f28275d9073047e94e114e78e181", + "16df93ccda38738f5ad8ca5b3de0e98f62afe088c8519f3ed663dacfef1e977a", + "19ade9290e6230aa34f05bd1dd2f2d864c7837dc4f67efba7766ac3a688fe662", + "752294bae6c061e5e009bd65681ac5df76be0d8bfd2fafebb53ccdb0d7646a76", + "f220f3915c29bca7ed5002a795815a7f835823d4a70cc153d7f267e0817cd118", + "1179d0892a2e5d43761d97552edde68aca58963e040a24a0d3b0b2dd6cef47a8", + "b80310dce1d65c454cf6c407b51a1b510b36cacc81f586e2ba6d33bd835924e3", + "62b70602c5e0639528a780b9536bd9ac7544a040347a9055ba6c94eb4a3eea51", + "fd59914727fecbde053b642e5578d0d2431c8bb45b428b64765b270406444e3b", + "17fab55e7e82131b83c78f3a77294898bf4d5ef0613097bab861306804dd4240", + "b82cf21c37c587cd04246c2fe8e5548efd00c17060ee2871429d81f7755ef110", + "988b3477e88f23909b5c0a09f8c689fedd59eecb868a294a0ca351823146f981", + "9e11ffef69f46a2415e5d3540c61da673488cc730217708e79e8a56fb060e847", + "629128f7e24d93e8c4aace58b4673d9e35f9dec26d0663d16eb823bd17fca7c2", + "2d629cab81e33db7e048dab910c79d72acfb973b17625b96972ce5bc17b2ac4f", + "60f6fc5b612b2fdc3740a4c9ee809777f6bd5bf71eaec7040036db21daa4586d", + "a80e562644ef7524526554f216cd5fdf9add0d598382facca41c56ba4b1f37e9", + "307294a46497c93728359e895e68d115bdfbafdef5ee8061b8d8166bf2af8521", + "698f011e9b2e4b90b091a7caba3ca35dd682745391f606e6cc687e0f2daa22bc", + "7f4ebae60350d098f834b671918984c9ad5c05d893f7de4793dd068006f196f0", + "6bf6efe7ecdd3d42312df60bb708720765d819e58f3f59fb50946ffb6fc4f81b", + "b43fcdf724e2d2c62db5e49a88a2af94067ffe8984e37681e664ff78cccde4c0", + "a0e322a1bb63c7f43ca00136a60f6352a5457c31a66aea20b33754562e4bdd76", + "1cc273760d600d6d76604b2ddb5d9f6b7bdd32aeb231c7507521d85cd4bd3035", + "cf3072ad71bea0ff55900df08eb223f660a32e2d64e692c1bb6a57644e697d7b", + "1e2029a25ae03038be0fa85287b18ecbc3fcd6144f78ac67996dae2f16d79e6f", + "c806d764e94bfbad0ce94201376899ee64ac0e37f10143a20208b6225e120e75", + "c8d3c5ea0e45b01eb88800b3d186e4f054ed967e3aaec0721458a2c8d67f716e", + "b66f590de840a0b68b431ba1c4c7d493d9bb7996b8f92817627ae91ff85f94fa", + "69eecc684348c834fdf786a23a2c0d7bf2b6701c951fab54aaba1b4ae870b642", + "a6ae29b62375dbf000c8ae1048e0fcab7b56e03949201682553ed0a548278bf9", + "0a39e3389d2437d160f3d95cdf30f61c1afd52a2f82cafd2ac32a6b6ea823e9b", + "154239aae4f546bc027500eae27c714ce01ad0200462bf47c6807564e0e4a468", + "f09bbb2d19218100f0dd9b29f6b7ac6abd09ba145a1e9a6df989b1e39f097c22", + "e7a7c1f87beafe1895c2bcd421fc77d734f76799d2c0d9c23bcf71b9fffc2699", + "bb4e9c282c174e3d438c63ca40ecba99b6e4cb0fc55c6c8cc940fd3f5c6528c9", + "ed2d5bbf62e6bcdc2e1ee1ba63cf2781eb7ee668fb9eb2fd9ee717d0ce1a6f83", + "6cc625d132fbfa408f6bc106176db4ec6fec3b8fa55565d0615b0aad23193304", + "6697e0fb9053689008e7a063e84ad084bf2ee29fa8c5bc60857ff40c6dd96fe6", + "1e72d341b4f5f75fdb9e881516d6b31725a066a5ed7538a05d9093041e8879e7", + "c6c7af64486c17903cf4e142f46b27aca0a3876476d45ca0f8a4cc91301edec4", + "1ade2393a6111780d70e239968f7d4f48828c7d0d62ad9d58aca5cc5fd6f2cfd", + "a54c07a3fe146ef5bcca630eeedf5f6b3cd9b7d9e90f2b342daf56d6db6df70e", + "9c81aa286afca7471f51b6a9d1a04236ef9def691959d47c2b857fb27d358968", + "b16129d9c3c9b419d2e6dc68cee386498dcde84c0047549322b1f08f2ae57ea4", + "28fd198a2a5e173b9c4ffa110a11a50cde6f2bb1432625082bf9be1224fba75c", + "d1ab6dd5d7e9f7cdeb7cf045e9bedb42820d97082ebe8953eb7168886959077d", + "6bad4ba6df031b27224275eb1e2fe490663f7534d15b08b8f4e5181acb29fb4f", + "53e96f44931cba77b5250d552f8044384ae19a88f75d80efd8963508d3f06dd6", + "7bcd0928e4f3c1db66c2dbaa456bf46aacd892725a9bc4a1c8b87a594c693f18", + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "211eeae933df6319882fc39c862cfe26e231372fcf541f22a65c45963d0f63b7", + "dfbbbc10d452059d6a9571ea6db003aafa91f4d435c55adbee8eee9375a70c2b", + "ddb95f8a83f1b8e01fffb57e08843bf463c1ad7b0ff685d34d2cd5e5e4fb3b08", + "a7af945e4c4969707d25cd0d330723e4c3f1e9311601809d6f786f7c1ead69bb", + "473aa11727e69c7ecf7d2ef63acb8c6510988ed1f009daa89eac1938979fadad" + ], + "leaf_hashed_key": "123563e27ba1384aa3f55eecff054af7ae82013d8aff477a215ce6ed4308f520", + "leaf_enumeration_index": 50, + "value_written": "0000000000000000000000000000000000000000000000000000000000000031", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/core/lib/merkle_tree2/tests/integration/snapshots/log-metadata-list-short.json b/core/lib/merkle_tree2/tests/integration/snapshots/log-metadata-list-short.json new file mode 100644 index 000000000000..54e32641b91c --- /dev/null +++ b/core/lib/merkle_tree2/tests/integration/snapshots/log-metadata-list-short.json @@ -0,0 +1,952 @@ +[ + { + "root_hash": "02349928161bc68885c534eda41f7e68d422c1a9f895d1c9246f94e36c7c2ac2", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "6bbb316d292155ad8d2b47a03504033efbf70074141130e9e346a798f5904921", + "395ebe57b2b0ca2592bc9b173eaaedf722c0121cf908386bf2b56d0179fde9c0" + ], + "leaf_hashed_key": "5daa965d9688be0ac629916a9920e45910c13d1fe45d257a9e17217f226dfb4d", + "leaf_enumeration_index": 1, + "value_written": "0000000000000000000000000000000000000000000000000000000000000000", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "14bdd92f9367bc7de83ac9b51dd2d79e55d5a4a399c28623804afb11d12fbf36", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "adae70a756c92a92bb18b9f712c806cb809f54241d91a9d869b76b78767e5579", + "395ebe57b2b0ca2592bc9b173eaaedf722c0121cf908386bf2b56d0179fde9c0" + ], + "leaf_hashed_key": "0059914596ed2e70745c44ef315747ea29aff72bce6757aca30a434d9bb70781", + "leaf_enumeration_index": 2, + "value_written": "0000000000000000000000000000000000000000000000000000000000000001", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "8940a1e87e61e134015706516b8f3dff9a1689bd05a35743b78669da719f10cd", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "7a58997adaae63400b6a69072164594e387e02de898bed83f964e454f7f961cf", + "73163df8aaa2b8db7c6da1f8d12ac8930cf81f79d6540957ce62cb056282ff57", + "395ebe57b2b0ca2592bc9b173eaaedf722c0121cf908386bf2b56d0179fde9c0" + ], + "leaf_hashed_key": "7ed4dea78574266e019059e5b5fd6f94ed1632bd4a643d1c51aa02974def5684", + "leaf_enumeration_index": 3, + "value_written": "0000000000000000000000000000000000000000000000000000000000000002", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "9a51692cb3802cc766713d4f7e1c57e4a2635ffdb6f3bbf9d6700adaae071d51", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "6bbb316d292155ad8d2b47a03504033efbf70074141130e9e346a798f5904921", + "99467fd320e24a771da659e5f8a797f4123fcc9559b01ba91bd7b80c700548d3" + ], + "leaf_hashed_key": "b96e7e15bcbf96c67b1f26fa5ba80089388fbefad39968132c0791cb313d0157", + "leaf_enumeration_index": 4, + "value_written": "0000000000000000000000000000000000000000000000000000000000000003", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "49e8f3ebbda46b74151fb3dd128f3310856b6c8c541068f9b89fe65c6536c184", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "3cb3d66c68162e7db4fd0487ffaa8548734b9654f3dd8415652ad40e5f8d8be2", + "99467fd320e24a771da659e5f8a797f4123fcc9559b01ba91bd7b80c700548d3" + ], + "leaf_hashed_key": "e8b8b981f358516ba6e7b76e0007bdecf3e97873abe468fbef40110d8206c8d8", + "leaf_enumeration_index": 5, + "value_written": "0000000000000000000000000000000000000000000000000000000000000004", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "1772e009bb8e22d434bd2deb60fb301edc1ccd84bb07fa2d8ee1cf8f7ee031eb", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "b8a53934f6d66e2c1b2b06cc0f1f4cfb87fb25bf6169755c1569384e05ad477c", + "3cb3d66c68162e7db4fd0487ffaa8548734b9654f3dd8415652ad40e5f8d8be2", + "99467fd320e24a771da659e5f8a797f4123fcc9559b01ba91bd7b80c700548d3" + ], + "leaf_hashed_key": "dd55470824e0db2b94ea10ae29afd473f265bbe758854354b926846821fef91a", + "leaf_enumeration_index": 6, + "value_written": "0000000000000000000000000000000000000000000000000000000000000005", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "41eb746d4675ccc737851b092973d6b9ddb645f5ca033069528d3147f03b1424", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "f37b8aac1ed99fd0a8de518de2b6a347a7a180cdc08f054a34e1fbfc9ad6f91d", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "c7e943920baa90556abd094830d0b8272cf1c0167018fb8fbbd5b19e49760902", + "4516a563b3546b3fc70af19daa4d083c0e0b5ed04b1cccb50c2d17fe508a31cd" + ], + "leaf_hashed_key": "049dfddb9f03237fed8d902e350f0b0e9b85af93f49fe02f0d189c47cd7b122b", + "leaf_enumeration_index": 7, + "value_written": "0000000000000000000000000000000000000000000000000000000000000006", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "b16548768a4b4f93f746db658a56f4cc66b3f11e1599aa3ff428d375c704c2a3", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "e3a9a31f6c0f3520ca75f92e0bbc29b620703380cd101322cd83754a3d58b98e", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "c7e943920baa90556abd094830d0b8272cf1c0167018fb8fbbd5b19e49760902", + "4516a563b3546b3fc70af19daa4d083c0e0b5ed04b1cccb50c2d17fe508a31cd" + ], + "leaf_hashed_key": "121a1293f56198bb1639951b344a3ae26f5bca796569bb07d53ce98f4085bcbc", + "leaf_enumeration_index": 8, + "value_written": "0000000000000000000000000000000000000000000000000000000000000007", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "6b258633cb55d427b079aa79386aba964a43d2d6ae4ca3f25c3f692be270c05d", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "d3dfea032d16110c644935fb745d6da058fc3c488800fe510414b081e1f28131", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "7a58997adaae63400b6a69072164594e387e02de898bed83f964e454f7f961cf", + "d26d5bc29ea8e0f305d4913395c3bdddd04f2b2189a84897cec50400af56fccb", + "4516a563b3546b3fc70af19daa4d083c0e0b5ed04b1cccb50c2d17fe508a31cd" + ], + "leaf_hashed_key": "7cabd5f57ff72670052939485c4533b01b26fd0e490c31bd48b5ac5002ff1f83", + "leaf_enumeration_index": 9, + "value_written": "0000000000000000000000000000000000000000000000000000000000000008", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "3c46318923db4821701adbb3d47c0e33e42ff2090ff10de17d15f46a9f804ad5", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "4ed92b3f10d3c8ee42c0d83b6bd3c308263fc041f4fb7ab22c1e49e530803db9", + "f9a1928e73b72081bdccdda1b784af27b68ea61ee1a1320504b485b0e2839bd7", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "dcd51db21d69330fb3cbfa38e066bfab8301ddab72a503dcbc9ba48b19593034", + "4516a563b3546b3fc70af19daa4d083c0e0b5ed04b1cccb50c2d17fe508a31cd" + ], + "leaf_hashed_key": "0edeb726b4588b4b373d38b7b43e3083c6e105c991e1812bdcce6c373ed98dd5", + "leaf_enumeration_index": 10, + "value_written": "0000000000000000000000000000000000000000000000000000000000000009", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "3143826021b0b1b0a73e36243097f6b4fb04d4c8d81e2341e5aba7d1b98f29cc", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "e6b57936667bde981f1bafa222b90ad36f55ef30e465db35c94a114d59db2d0b", + "dcd51db21d69330fb3cbfa38e066bfab8301ddab72a503dcbc9ba48b19593034", + "4516a563b3546b3fc70af19daa4d083c0e0b5ed04b1cccb50c2d17fe508a31cd" + ], + "leaf_hashed_key": "2b5ffd54bff018009f93f7cd782e8922b24520eb99a100efe2c1a4ae6c1ca3f5", + "leaf_enumeration_index": 11, + "value_written": "000000000000000000000000000000000000000000000000000000000000000a", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "03a09b3221a565fb0040e46183f3394f5f09aa2585e99fc08de5457aaaaac76f", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "7cc44e345d64f1edaf3ef08a93e3b776b783cf2abc8281908626e292c1468b6d", + "833d36a8fff9d9b0b37ef4bee9c088fff1c9d9b27f7108b140eda20f013392c1", + "abebe10113e17a42c267c455b11e9a4c5db03a799c61a7880ec107ba7fc741d2", + "4516a563b3546b3fc70af19daa4d083c0e0b5ed04b1cccb50c2d17fe508a31cd" + ], + "leaf_hashed_key": "49ac53849b70d666cc840b4add0baff56fa9ce7e27be2acb275d109a5994ff8d", + "leaf_enumeration_index": 12, + "value_written": "000000000000000000000000000000000000000000000000000000000000000b", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "0a3733f29f76a9e301c90eb42e9e5e5f8ad548aebecfdb955589ca2964d26f8e", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "5310757b1ce0388fcfc60bf62e00e805c4f31afb1a6b9971ed426da9727fa063", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "174ea19f0134fa609698fa2bb4c6c90cff7c273f5fc3953d5702694c96b3ddb6", + "2e0899812d5483fdc4339834160fd8dcef2509e732bdfabf436fd892fcf4e3cd" + ], + "leaf_hashed_key": "bbd2fb6ed132cf2780a90802acaaa371de119dc89f636dbb647ccdff8b0dc056", + "leaf_enumeration_index": 13, + "value_written": "000000000000000000000000000000000000000000000000000000000000000c", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "904587d535ed0b3c71bf56953e1f12a84f14b0a63405ee934030be2964dc4d87", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "edc4f3323474e27116bc4a00199bdec20f57d5ec0fccf1ed72180a55be849545", + "e6b57936667bde981f1bafa222b90ad36f55ef30e465db35c94a114d59db2d0b", + "40de9e3d6bd1b5478bd38cfdd78793fb618b9e2c1b8281d45295c84d241c9c15", + "b32cada3e9aff4c907c164d4a039c58dfd7c0bb5877f484b201fc6f540bed88e" + ], + "leaf_hashed_key": "38ec53adc1cd8bc9f788a5986f73d4e29e2d98945c0aa1d6727be9f8baba4337", + "leaf_enumeration_index": 14, + "value_written": "000000000000000000000000000000000000000000000000000000000000000d", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "35eed18b33a4af4f1713169bb6fadce0f070395ac87607e94b4666050d9ccb26", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "81fa93f2948268ca97e6483b870cf6d6c045cd1c3fa295830952b19463769705", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "174ea19f0134fa609698fa2bb4c6c90cff7c273f5fc3953d5702694c96b3ddb6", + "1371b14baa1c42ecdaabac4cc82dbd0e445fe676c066507ed388ca458d242afe" + ], + "leaf_hashed_key": "ab314b8d202e718d011f9f90df81dd0a00dc4f2279da45121f6dec7257622776", + "leaf_enumeration_index": 15, + "value_written": "000000000000000000000000000000000000000000000000000000000000000e", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "c0d4517de36bec1bafab307feb66bf6ec0cd410ef74f791a24d65265983ade29", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "6dc347eca2998454df358aa5dbcd5a8ece556639ecfe758e85c9c8ff685842a5", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "eb1a2d5584eb178be1294774c6ec99e8036bfee6a8cf246b5376991061d1c7e9", + "c601aa282da8fcdf68afc6a9e047bc9a9cff77fac7d1bafe06d6e1be2951ba9b", + "1371b14baa1c42ecdaabac4cc82dbd0e445fe676c066507ed388ca458d242afe" + ], + "leaf_hashed_key": "ebdfe46967031db428c3b807c7f8d78f6a51e9ca5f0500ca6099d3d26b1d312a", + "leaf_enumeration_index": 16, + "value_written": "000000000000000000000000000000000000000000000000000000000000000f", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "109978c91ada694de25a0df93a262213e166ac1f3e66bb0dc3b04e1ed387119f", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "8e566a1d7d20bc08abfc750a54e16bc5ef01b43863466b9b27d01c99af432b78", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "eb1a2d5584eb178be1294774c6ec99e8036bfee6a8cf246b5376991061d1c7e9", + "c601aa282da8fcdf68afc6a9e047bc9a9cff77fac7d1bafe06d6e1be2951ba9b", + "1371b14baa1c42ecdaabac4cc82dbd0e445fe676c066507ed388ca458d242afe" + ], + "leaf_hashed_key": "ef783cc720dbf74c747a155a8975b324d2f8fa80672969dc78fe6f12ea59d03f", + "leaf_enumeration_index": 17, + "value_written": "0000000000000000000000000000000000000000000000000000000000000010", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "b820b3dfb575619ff52c1bb7a37821eaf31ec3f95df378f073521f7f7f8b361b", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "e0d938f15da1d93af39fd8f617041c86bed13c145ef3bcb36a88d19eeff4e882", + "6dc347eca2998454df358aa5dbcd5a8ece556639ecfe758e85c9c8ff685842a5", + "77c99a232aa74e1fdf1b375fa46ddcb3cdc89dd41fc70df86c3bf58e7955a496", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "eb1a2d5584eb178be1294774c6ec99e8036bfee6a8cf246b5376991061d1c7e9", + "c601aa282da8fcdf68afc6a9e047bc9a9cff77fac7d1bafe06d6e1be2951ba9b", + "1371b14baa1c42ecdaabac4cc82dbd0e445fe676c066507ed388ca458d242afe" + ], + "leaf_hashed_key": "eaa40f6cdd316711961300a0f242fdc226f42d4740c381f05200092eaf70b841", + "leaf_enumeration_index": 18, + "value_written": "0000000000000000000000000000000000000000000000000000000000000011", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "689bf7d86b01cae8db19a625901f2daa76d0883879ef642c1ea24212db7eb576", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "86af0a1cf88b68e4a393b42cf7ec7257c29310737377e92c3bf671bb432b6811", + "81fa93f2948268ca97e6483b870cf6d6c045cd1c3fa295830952b19463769705", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "4782923188c70ca4f1f316067fa25e0f5489eaefb84a3f5818d0b12cdaf79473", + "1371b14baa1c42ecdaabac4cc82dbd0e445fe676c066507ed388ca458d242afe" + ], + "leaf_hashed_key": "a58e3a77937b9b747f70d4aee9e84992d7955e52e2de71dc9615df3d16b2b816", + "leaf_enumeration_index": 19, + "value_written": "0000000000000000000000000000000000000000000000000000000000000012", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "18bf39fddf0e3db24bbc5a1457f1eebb96ff9afdd1161e3fc4bea9cff2741731", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "2d0e97688928fc238b726af269dd79cf05b3a231faf3d4fa3f3a5c2a752571e2", + "d22fc1947831a0d8db21a7b4fbc78ceab9e3eb04d136d774ebcf7efa8ea729e2", + "387d507a092847bcb00df365df3803d934c521eb01db2dec5de09e215aef8588", + "04d79e118f0e8fde38490eb1128915e452c7ca22e5da3b4b08edf8fff94b3606" + ], + "leaf_hashed_key": "6dfea072e8e999ba0adb48fc1284af16cc1351d53e52b175f9dfe153602b4362", + "leaf_enumeration_index": 20, + "value_written": "0000000000000000000000000000000000000000000000000000000000000013", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "a478953da260e72065065f5721e8d918e530a51dd93f6bd31b1c0ae7dbf6416f", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "48a8a2c0a6f7acbc814d78ebfb1e0bf6680a2ad89f8b34918065ba8ae28a0b30", + "cc6fb8d932be1ababb95b345e99ebb4385a74093c76fbe92f1dc596678df9527", + "7f391690461b8e3468e2f6ba0fcba50df0195bd6d1bb187180650b00b2a13d5a", + "4782923188c70ca4f1f316067fa25e0f5489eaefb84a3f5818d0b12cdaf79473", + "f87d26628f8403c906f78d2a280f00175967a63f8b9795567fb0be0552502d21" + ], + "leaf_hashed_key": "b79bd507b4ba92ee5e910bfbd28c2f5b88a48e619f51a004c1ecf857c2a18b10", + "leaf_enumeration_index": 21, + "value_written": "0000000000000000000000000000000000000000000000000000000000000014", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "1e7b31fb5db73bc9eee6d6ef6d56d229514daabe68d16fea318218de7a9f7924", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "d82b5d8d327ea5bb7b583c75e4d7219a8398958dccb9a3706ad3d8cf4e1f455f", + "2d0e97688928fc238b726af269dd79cf05b3a231faf3d4fa3f3a5c2a752571e2", + "d22fc1947831a0d8db21a7b4fbc78ceab9e3eb04d136d774ebcf7efa8ea729e2", + "387d507a092847bcb00df365df3803d934c521eb01db2dec5de09e215aef8588", + "d0ef13522195337c847df191500477184ec6c4dfdf4028bdc96a598e39832304" + ], + "leaf_hashed_key": "667d2e8cd4a0af2e6da3c72bfcbfe7f5fabc7d55b3ceaf4e711768c50e8815f3", + "leaf_enumeration_index": 22, + "value_written": "0000000000000000000000000000000000000000000000000000000000000015", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "85fe20d8896581600bbe44c6fd460563b25d35bf56c5f8edbe2938ac9d7e3531", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "edc4f3323474e27116bc4a00199bdec20f57d5ec0fccf1ed72180a55be849545", + "e6b57936667bde981f1bafa222b90ad36f55ef30e465db35c94a114d59db2d0b", + "6d52e4a3f91d432442da947d4c997d2b1681b924f1f4162cab742f69444d4c4d", + "d0ef13522195337c847df191500477184ec6c4dfdf4028bdc96a598e39832304" + ], + "leaf_hashed_key": "38c51d81ef66cc228f31843f3037c67c22ab9e8146ef3cf2d9700aad39af977a", + "leaf_enumeration_index": 23, + "value_written": "0000000000000000000000000000000000000000000000000000000000000016", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "c609f0b713a7aea84001b0f64a07d013c48f7f191844ce0249af64637c4dd76b", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "efa137bbb520ff6d08b47a5a8835205ceb2120b1de48357127a9be8d67686c99", + "edc4f3323474e27116bc4a00199bdec20f57d5ec0fccf1ed72180a55be849545", + "e6b57936667bde981f1bafa222b90ad36f55ef30e465db35c94a114d59db2d0b", + "6d52e4a3f91d432442da947d4c997d2b1681b924f1f4162cab742f69444d4c4d", + "d0ef13522195337c847df191500477184ec6c4dfdf4028bdc96a598e39832304" + ], + "leaf_hashed_key": "3243e0b4c20b7932b88d7ad622b751aaf4d72e8e3f77c15bc498a4907fe0f2dc", + "leaf_enumeration_index": 24, + "value_written": "0000000000000000000000000000000000000000000000000000000000000017", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "92d24bed58287fcbfda7b5ad559957a9d46c212bc6e838d94cff4f09ee70a410", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6bdb40b77bdc25aaaf540b694b6ec2068a6d2a6b283be0791128d16a7c72d72e", + "3dd89869142d6214be48baa5581f1b8c1f69ec800e77f14420fd36418ad94ecd", + "ff16b6ea7a50387e5a1c6228c81eb8e0cafac856a52a3acff85ee51c6bb99c2c", + "ede964ba904c29980a21cc828096b6beb6a36008986a24a1c1d43ab28a6608ff" + ], + "leaf_hashed_key": "cd50b1e1910f7ea71bed1ad5188f527e1d76f3d82c513078ef22aaa2d7abd721", + "leaf_enumeration_index": 25, + "value_written": "0000000000000000000000000000000000000000000000000000000000000018", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "943d3111053c6cf05cbbae4482dd0b37960806008e7929921fdbcf3a09e8aed9", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "2608dbfd00f81d155d7bf71c1b6ba27ad9b27d9ae5e4cbe6cc34e51542017e0f", + "f37b8aac1ed99fd0a8de518de2b6a347a7a180cdc08f054a34e1fbfc9ad6f91d", + "e29f830d96852ce45b61d7addf46f8fa9cda180cf8ef6270e14cefae52d39e8f", + "f9a1928e73b72081bdccdda1b784af27b68ea61ee1a1320504b485b0e2839bd7", + "fdbb1849fa2e9613e8a0f0cc51a18214ef2db4e4554a8b077955d0ab368ea2c4", + "6d52e4a3f91d432442da947d4c997d2b1681b924f1f4162cab742f69444d4c4d", + "e463038bf6607399fed282763b60060d9dbd9b6e10b01e680ccde4e14589e7a7" + ], + "leaf_hashed_key": "073abaa816fdba2d396d529cbabf1c42eaffee32cae656f1ce322b8b3f17c3b0", + "leaf_enumeration_index": 26, + "value_written": "0000000000000000000000000000000000000000000000000000000000000019", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "a46190f8f9d11ecf7e75d3a6a2f1c94ea2f91246383927885f818c1b689b1e8b", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "6ff1a8f2e1ba2de7cc48ca8f6006a9401699a07032cc1168e2e7f06dd0d0fc22", + "21d2134b05b31eb4b8a7b832a10ea9bf9f6f2a4a4bbef496aa604756de717d57", + "eafc19966a0d30cd948786a15f52fa04ae46239b95afdd4fe5b2b468df10ccb0", + "3775e1f7c018c5fb53b6ef0ec1e7838de74fe2874cbfccd0c95fc3d20d82cd9e" + ], + "leaf_hashed_key": "862de3989830d86223147fb4bb36a6ee24c746ba2c21df655a9a13bdb225f3d3", + "leaf_enumeration_index": 27, + "value_written": "000000000000000000000000000000000000000000000000000000000000001a", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "f5c25148647fb38cbd02ca2ab729c22c03a28885ba5cd315e31a33ef67f1fc6a", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "4555787efd3bde28f1cc20b300a9bec3273e0a9c177d74061986ef7765fe3d62", + "3dbbe481619314bf3bd7abdd794a10bbdb91e8e5f769e0773f91d86d80e95421", + "fdbb1849fa2e9613e8a0f0cc51a18214ef2db4e4554a8b077955d0ab368ea2c4", + "6d52e4a3f91d432442da947d4c997d2b1681b924f1f4162cab742f69444d4c4d", + "d004cd0858fc359da84d9bca960482549526825c73efc1ba6ffd84dd63449441" + ], + "leaf_hashed_key": "1bdacb94eb5ce7192a6e70de5a2ca66636afe84156c8674662ad2d3e15d934c3", + "leaf_enumeration_index": 28, + "value_written": "000000000000000000000000000000000000000000000000000000000000001b", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "8dcff9b5ad3bd5f18bd77e092a1c2a057768a59389336b6427e54c9b0ba70ef5", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "80f801c9b76645191dfba4cf54874c900e4ba596deae91126b65337ebe8c889e", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "e6c3bad3e9de3f132d85e5c2434d717141ed64b3d5ff9599b9169b3b3aa47cc2", + "1b17eb3a67fc1fafa0f243763c641719b6e04d91ed2cb435e6563b8be9362739", + "6d52e4a3f91d432442da947d4c997d2b1681b924f1f4162cab742f69444d4c4d", + "d004cd0858fc359da84d9bca960482549526825c73efc1ba6ffd84dd63449441" + ], + "leaf_hashed_key": "2aa43a69ae8fec00f5cfd76d5bd96925ecc74cc2f10ab073917f9180ab7c08aa", + "leaf_enumeration_index": 29, + "value_written": "000000000000000000000000000000000000000000000000000000000000001c", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "ad589e5a7f35e5a1dc8fa32a025fa04279cdafaaf30552c5eaf649c984128891", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "8c40cc8cd334f62f8143c8272897ef4cb2dab703f13ab612b9fd2352f8b0e9dc", + "da1970ae6c151c374f5949f619888a0a953202d5692dd051ea7b07da19e83236", + "e29f830d96852ce45b61d7addf46f8fa9cda180cf8ef6270e14cefae52d39e8f", + "5f826ac7667be4226b9243fef6ac5d1e17dee1586359ed070495d834b1c50a3a", + "aeffd85d428c3130fca2195f13f2657350bcce1b7a3c827eefad367ab114fc96", + "6d52e4a3f91d432442da947d4c997d2b1681b924f1f4162cab742f69444d4c4d", + "d004cd0858fc359da84d9bca960482549526825c73efc1ba6ffd84dd63449441" + ], + "leaf_hashed_key": "03092a9e63ffbabed81e8c70fc6b440760eb7c97e2ef6c7ff2adeee35cde3376", + "leaf_enumeration_index": 30, + "value_written": "000000000000000000000000000000000000000000000000000000000000001d", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "e2ad2c217524f163868b6283980cc271ca0ac13bba9cb16e6fda4e26363662fa", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "98f81d1a67e5ccff4db4a2c52773d705cf19c37d646d65cd9c3e1c1ee17e34a5", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "e8e61afcff287368538d5192101436a0cfc1c876c9960a58a2800174051bfc71", + "2e16a6a06246502867c70ffea89ef9a3a7614ce4896d61c540e6b110ebc3129a", + "2fbd97144d45e005a0d1410a4184275c1b08c30d2ead381e9bc426a65c758748", + "eafc19966a0d30cd948786a15f52fa04ae46239b95afdd4fe5b2b468df10ccb0", + "d7e4d3b4a41aa1a1d504d56bb238e1b681b3e0b0b6e39c986e0bdc60b0689b23" + ], + "leaf_hashed_key": "aaa8a54f5723158280ea3827ccd4a64c0d0ab07fb3abc52475d27ad316348148", + "leaf_enumeration_index": 31, + "value_written": "000000000000000000000000000000000000000000000000000000000000001e", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "f4feae92f895f718bfe80a1c97c4d52da60a5cb4a446604f9d5147f23b514b68", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "9d63fd0aec57819eea5046c633201c7fb405b768796e76939c27d261bf97e78e", + "b1d7451bc0cae000f8822145da99707c5f1c060452b3ef2f1002d4ff9d4b3bd4", + "5f826ac7667be4226b9243fef6ac5d1e17dee1586359ed070495d834b1c50a3a", + "aeffd85d428c3130fca2195f13f2657350bcce1b7a3c827eefad367ab114fc96", + "6d52e4a3f91d432442da947d4c997d2b1681b924f1f4162cab742f69444d4c4d", + "9f5f454ce840f5915eb2c9c6a6bb25e0841627cc7c3599e08627ded763018f8b" + ], + "leaf_hashed_key": "085e9bf51deaeae5f5ae18c2154008e11b6c4558b55c00a5ceefd2231a349227", + "leaf_enumeration_index": 32, + "value_written": "000000000000000000000000000000000000000000000000000000000000001f", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "1c03ee40d3b64f0d29484aa895ca6b23bff258fc316ccb0c8c35b298803ee035", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "fd19e2ecc297635e667233da98dfabe303232a5d3fbf41c982f1345130955d69", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "e6c3bad3e9de3f132d85e5c2434d717141ed64b3d5ff9599b9169b3b3aa47cc2", + "916c87eb16d2c5f404369475762edb6f0a0513ac2dc979ae33bd2540d754aeb9", + "6d52e4a3f91d432442da947d4c997d2b1681b924f1f4162cab742f69444d4c4d", + "9f5f454ce840f5915eb2c9c6a6bb25e0841627cc7c3599e08627ded763018f8b" + ], + "leaf_hashed_key": "2b0340f963d83c228b9b42e15cb4f996e3df448f12fc09d571664a837c26288f", + "leaf_enumeration_index": 33, + "value_written": "0000000000000000000000000000000000000000000000000000000000000020", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "1cefe494c0d3a6e021a391ebbee9965577545e4e4f2a56a09f1576c620b8a759", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "1b3144cf9025c6c45eaadfb6fcf1d8286f958b4c3023f85ef05fc8152f2b4a84", + "7cc44e345d64f1edaf3ef08a93e3b776b783cf2abc8281908626e292c1468b6d", + "5dac9109b2aa3c1cf19a3b39d44ddf6324b79e0acd4f6f5e5e9e97b090200f28", + "69090c2228b44261f478a8746bc77b1a27745b68e0b1b3125e774ae6c1ed60b9", + "9f5f454ce840f5915eb2c9c6a6bb25e0841627cc7c3599e08627ded763018f8b" + ], + "leaf_hashed_key": "4536e10e20cce9fc0d256fbd253319c2ea2644f51daad3eecc5cba05cd6c6a21", + "leaf_enumeration_index": 34, + "value_written": "0000000000000000000000000000000000000000000000000000000000000021", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "cd206ad284481a6bc292d191a38ab27db4e5b2f5a973e967b47875d0db4e8d57", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "753072c96d64b5e11bfc00e7b13ed63601f47e7dc913ee14dba8c8507491ced5", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "4733066b0a6140d40cd79418259c50e040a80fd8658612a8a61431976bfddc09", + "05e19eaf73a2b85f8084b9a48a72981be954c94c1298d80d7186c54df4e6d7cb", + "69090c2228b44261f478a8746bc77b1a27745b68e0b1b3125e774ae6c1ed60b9", + "9f5f454ce840f5915eb2c9c6a6bb25e0841627cc7c3599e08627ded763018f8b" + ], + "leaf_hashed_key": "7bc2334292a3193e2c609e55142bf1e6754c7e46f1f62bd0cfde915e4c738046", + "leaf_enumeration_index": 35, + "value_written": "0000000000000000000000000000000000000000000000000000000000000022", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "f82e1f83f60860616c88ce89cb1b1605734074d92324785317cd8fad9d845685", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "4fa324013a6a98c25450b4226767ad71d65a7e01c7bef3c0d4b30f636f4adf2f", + "48ce6e684e2a2c6803cd4e41fb31a290b908a95a5c51d9508bb55b23bf7dc4ce", + "99f3812a7fb4b1c02d46d48a23818a7cd9420cdb96ae05ff3a45e6d15b3a952c", + "05e19eaf73a2b85f8084b9a48a72981be954c94c1298d80d7186c54df4e6d7cb", + "69090c2228b44261f478a8746bc77b1a27745b68e0b1b3125e774ae6c1ed60b9", + "9f5f454ce840f5915eb2c9c6a6bb25e0841627cc7c3599e08627ded763018f8b" + ], + "leaf_hashed_key": "6b61e415eddec631832ce8c8460e309d8c82a83bec459e4f7071927278504f91", + "leaf_enumeration_index": 36, + "value_written": "0000000000000000000000000000000000000000000000000000000000000023", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "b8413efa3175b6d6448828aed47aa53c9fe8c8c4fcc20db4ccf900caa1cbf75f", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "da4bfe6756fb310bff63796fa8f604d2d74822fc113ecbf92491d0013b4f83db", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "e56357aad6ea138183988e39af483d2a69d9bb371fdc6d54a20b29189fe14a73", + "2e16a6a06246502867c70ffea89ef9a3a7614ce4896d61c540e6b110ebc3129a", + "2fbd97144d45e005a0d1410a4184275c1b08c30d2ead381e9bc426a65c758748", + "eafc19966a0d30cd948786a15f52fa04ae46239b95afdd4fe5b2b468df10ccb0", + "68e8522ca75c6e9f8d456a2839618ec28512d9ff25a201017c4065dae382b106" + ], + "leaf_hashed_key": "a7583b82003b6105f50c3b7f828ea086fd3c52687dcb0f896c3b000673f56821", + "leaf_enumeration_index": 37, + "value_written": "0000000000000000000000000000000000000000000000000000000000000024", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "dc602a4a1cd25bccb872f11b487e0991a90829ae1fa06b3ba838628c49bda3e9", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "3b6aefbce8ac29c50624d3a46d082d3e5ca0c16751eeed78c8675426c3af3e0a", + "cd068fe4cfa405447057d9441c98f42b29bf94008c6b26062578fe49577703c1", + "99f3812a7fb4b1c02d46d48a23818a7cd9420cdb96ae05ff3a45e6d15b3a952c", + "05e19eaf73a2b85f8084b9a48a72981be954c94c1298d80d7186c54df4e6d7cb", + "69090c2228b44261f478a8746bc77b1a27745b68e0b1b3125e774ae6c1ed60b9", + "04ebaca89336b679f1d0e7c408f5e2aed11c117f237f907e366d2632f493a703" + ], + "leaf_hashed_key": "6150d649f2f6bc1f982370f9e499f78820072f67d21587dcdea0aa4bdc5d9014", + "leaf_enumeration_index": 38, + "value_written": "0000000000000000000000000000000000000000000000000000000000000025", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "bf0585a3ee3b5a3c70dd3965c03c897cfe88b7c00d60b3f76c913b141c4672ff", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "3b6aefbce8ac29c50624d3a46d082d3e5ca0c16751eeed78c8675426c3af3e0a", + "cd068fe4cfa405447057d9441c98f42b29bf94008c6b26062578fe49577703c1", + "99f3812a7fb4b1c02d46d48a23818a7cd9420cdb96ae05ff3a45e6d15b3a952c", + "05e19eaf73a2b85f8084b9a48a72981be954c94c1298d80d7186c54df4e6d7cb", + "69090c2228b44261f478a8746bc77b1a27745b68e0b1b3125e774ae6c1ed60b9", + "04ebaca89336b679f1d0e7c408f5e2aed11c117f237f907e366d2632f493a703" + ], + "leaf_hashed_key": "61da14f1b382b075a9b4a2a0af5e0dc14f9c03dbb1703ebb90bab219cfe06380", + "leaf_enumeration_index": 39, + "value_written": "0000000000000000000000000000000000000000000000000000000000000026", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "7c21cf530f55ded1d3892dcf4edd94a6a7d642173ca49554a9e78e41b8f192ce", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "3f55fd31275798166af433479869a898c420d5c78f831c0728056b456d497576", + "b50aa4b064646f83aa9599d5c54fbda33f4401c7f3f0bf7b25ebd139d4afd2cc", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "1b36b4fd2754631a42e6830c0a9648e6f87c4ed530fc01026423a80bb05f2e51", + "f38fc0f925a4301da7af00f80f638eec5ac6218a42589543ee6b826f218ae950", + "2fbd97144d45e005a0d1410a4184275c1b08c30d2ead381e9bc426a65c758748", + "eafc19966a0d30cd948786a15f52fa04ae46239b95afdd4fe5b2b468df10ccb0", + "3d3bd06051bee97b5774c52fb37ed29d0e7a52e66a84635be17814a56dd6c7c3" + ], + "leaf_hashed_key": "b868f09b21686a0fbe7bcbcf6ce49a5a9e9858bfbb289a5ca20545a8b7cfc7c0", + "leaf_enumeration_index": 40, + "value_written": "0000000000000000000000000000000000000000000000000000000000000027", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "5e77478b24cfa22a75519f96d4e374a0d3e8515c355ff0b48c4a11485e76fea4", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "0be565d858204b89d8e0b42c8c33310db59b74ef9c0f3a81894678a197608bf0", + "3e2becbb325780f436936b4c13e47cd971fc5f507c48ac62e5bfbfa5b16681f3", + "e5cce92681938b4ff9af35a4825d6d8bb997b9e8c0412c98c3717ea6ba334d59", + "3d3bd06051bee97b5774c52fb37ed29d0e7a52e66a84635be17814a56dd6c7c3" + ], + "leaf_hashed_key": "f671a19743e44614a75595515fe6f6a91e11110fd42d02f79db1081731a84cee", + "leaf_enumeration_index": 41, + "value_written": "0000000000000000000000000000000000000000000000000000000000000028", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "56861b32d5334d3532709ed763257862a532547d36b3684d14e75a7cbc2ca8e3", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "a6c1536d47516a9a2c1bf70121aabed5b48fa25253f7844ff294cdcbbc17ac0a", + "ba99901e18e90cd8af7ef5a688d743a279bd2ce177cebff3702338d880c09394", + "3e2becbb325780f436936b4c13e47cd971fc5f507c48ac62e5bfbfa5b16681f3", + "e5cce92681938b4ff9af35a4825d6d8bb997b9e8c0412c98c3717ea6ba334d59", + "3d3bd06051bee97b5774c52fb37ed29d0e7a52e66a84635be17814a56dd6c7c3" + ], + "leaf_hashed_key": "e0a775c0a3b4d8497be52ae5ba91ee5ee737377a8e5d39a996fe955d760fa0b9", + "leaf_enumeration_index": 42, + "value_written": "0000000000000000000000000000000000000000000000000000000000000029", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "621e85f9aa7b9a47b0f712b11a7423cf5822bb0c0599a12ab07256e5b968c172", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "47ca26a71d59d7d233e0cf813b1cbada2b51a588bdaa78e5f08964176c7e4166", + "82d5a88ccd489c535e32771b29856aae549de5048ff4ea9eaf412468ca1ce4eb", + "b0e4a80125a0b35cd9405adf1c2a5016e4fd91422259bf138fbedfc495c23963", + "e5cce92681938b4ff9af35a4825d6d8bb997b9e8c0412c98c3717ea6ba334d59", + "3d3bd06051bee97b5774c52fb37ed29d0e7a52e66a84635be17814a56dd6c7c3" + ], + "leaf_hashed_key": "d3420a358b3ded40f46594f783bee907d2e7109ca6f1f1abe5f77141f29ac351", + "leaf_enumeration_index": 43, + "value_written": "000000000000000000000000000000000000000000000000000000000000002a", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "d3ff65493f07617db8c2ef337b11a9bb64b3fcc0cd91c8616eb11822660ce121", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "b7129459a33d67a3c8344dacb862098b7072d61848705851aa89fd2d412ea22f", + "e6c3bad3e9de3f132d85e5c2434d717141ed64b3d5ff9599b9169b3b3aa47cc2", + "916c87eb16d2c5f404369475762edb6f0a0513ac2dc979ae33bd2540d754aeb9", + "5d6854aec2876e126fc0f16a2eafe5e7ab0295201cc2c4fff60fb67ca417f47d", + "6edea7e02b1ff7434d5ada6c5f4853be10b4d1e3de3fd7f608cfc773c7bb406f" + ], + "leaf_hashed_key": "239cefb312123f2d3a712267910d572320e09cb9e26c873fbbe7205ae0945c2b", + "leaf_enumeration_index": 44, + "value_written": "000000000000000000000000000000000000000000000000000000000000002b", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "a7844577478a5cc5dedfea11bfa3dbe489e2132ce5cd1f65e55269a761854f24", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "0c74277d38bd980de32defe2ea4b0dcd463d0fcf1d654d5de8c0f272db50fe9a", + "a106163f126ecb33532b26422a7d89dad356e32b1281793433d2c0c345f08489", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "1b36b4fd2754631a42e6830c0a9648e6f87c4ed530fc01026423a80bb05f2e51", + "f38fc0f925a4301da7af00f80f638eec5ac6218a42589543ee6b826f218ae950", + "2fbd97144d45e005a0d1410a4184275c1b08c30d2ead381e9bc426a65c758748", + "b83ff6f9ce12683e8f4fa736e6461d6c558411e05b47270288c1f9e562e73651", + "11f81d46fbbb0eedd66e553ff00d04e62cb6eb0405ff38a433b99245bc21b849" + ], + "leaf_hashed_key": "ba13bf69cc1ee96567ecd3a1242a15630082afa2b372cac60a745cbb1f5b8e33", + "leaf_enumeration_index": 45, + "value_written": "000000000000000000000000000000000000000000000000000000000000002c", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "613d995e43834c95405e28ee5768e83307382130246faa4a689b447c6c586471", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "d3946f58c04823d1f0c65f47a191e3b7e53dadf2ebd408827f441d20fa52616d", + "79ba8e622367bfadda105325b5677f116d8ad8adc6d5ff47595755758e103950", + "b99b8d866e25e5bcf59ee1016dd9c0db7a57dc0cf60ec75491560f1828ded609", + "88eabb74a8d73fb29524a667a64e8f2da7b30619bf2f9ec939820bc97fe1d628", + "fae7a6800fefaf8e1c0f8cdf33297640e3d5add34d9cb53b710f603c9aa4d142" + ], + "leaf_hashed_key": "50b7c6dd203adc5355bfe5d060ababdd39460456558e339601b9c9a737784362", + "leaf_enumeration_index": 46, + "value_written": "000000000000000000000000000000000000000000000000000000000000002d", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "8184c99283a34fa9844437cc77e42047dbc986cd9a466e940c5ad0c670678648", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "32000514e1d34a85f7b732c6a0f16d27cc25ffb238de1c5c46326fde1a58e3a5", + "9320f411b5fed220994331f5605b5255124cd28ba67cf72c2f8b9be72e2d1dd4", + "b9e02e9445fd42b49e551bd95ea551c5dc8a266e5f67bc44493db9540453aedc", + "b83ff6f9ce12683e8f4fa736e6461d6c558411e05b47270288c1f9e562e73651", + "6f3252030e59c9bb62731c649de3a82d30a534b39c9a7e8efc12ce1553e8255c" + ], + "leaf_hashed_key": "993854c01b44c406823a701db21d4ee0062304d166079a5f5ec3fa7f21bf3f4c", + "leaf_enumeration_index": 47, + "value_written": "000000000000000000000000000000000000000000000000000000000000002e", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "959f7f4e416a159f10e1758562fab1f21bd7d15416c1efa33de272f2258e2b64", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "310d0b8899c64046f46f1efae9af9aefa75b181efee2190b31ed6dd153f64326", + "3b2323bb5b4c7b2e47210eedb567f3b2369a5dd46d96a689d0f2d26de55102a5", + "f38fc0f925a4301da7af00f80f638eec5ac6218a42589543ee6b826f218ae950", + "7466f7fe84da88a21b910b786e430264a2eb66fa7b78075337a62df1f36daee4", + "b83ff6f9ce12683e8f4fa736e6461d6c558411e05b47270288c1f9e562e73651", + "6f3252030e59c9bb62731c649de3a82d30a534b39c9a7e8efc12ce1553e8255c" + ], + "leaf_hashed_key": "b02a8338c2894881e5569427059a3c618686ef5f98c73ec1822450fdf7612375", + "leaf_enumeration_index": 48, + "value_written": "000000000000000000000000000000000000000000000000000000000000002f", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "010d2844fc1075f7642aff1edec6e758e00fb4ae3521f09391ca96fd132d6424", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "2bfce47833a6d7f2eddee63973cd4e9d0e5dcac9d094e5d83d0b75672340d002", + "4555787efd3bde28f1cc20b300a9bec3273e0a9c177d74061986ef7765fe3d62", + "dfbbbc10d452059d6a9571ea6db003aafa91f4d435c55adbee8eee9375a70c2b", + "ddb95f8a83f1b8e01fffb57e08843bf463c1ad7b0ff685d34d2cd5e5e4fb3b08", + "a7af945e4c4969707d25cd0d330723e4c3f1e9311601809d6f786f7c1ead69bb", + "473aa11727e69c7ecf7d2ef63acb8c6510988ed1f009daa89eac1938979fadad" + ], + "leaf_hashed_key": "1f19d716458bf630bf22fbb8dbe3c3f51ea969c78f5f32e00ebabfb4fc53967a", + "leaf_enumeration_index": 49, + "value_written": "0000000000000000000000000000000000000000000000000000000000000030", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "root_hash": "9dfbdaad554b5816807c1e1c051c3ad8e40b4f4d7695bdcc9c534a26c4e584c9", + "is_write": true, + "first_write": true, + "merkle_paths": [ + "a595e686f1fb8123bf00b67d220b5853f8ce470d942387392b06f06a6749d0de", + "61b34855bfdc9b8ec97bf582bd407e50cd6e1247cde484a4daacf29dcce8abba", + "de70d6d799ae74d61c2282c904b790b4d8ea4ae9cfc6f8981e16a9874846e4ec", + "211eeae933df6319882fc39c862cfe26e231372fcf541f22a65c45963d0f63b7", + "dfbbbc10d452059d6a9571ea6db003aafa91f4d435c55adbee8eee9375a70c2b", + "ddb95f8a83f1b8e01fffb57e08843bf463c1ad7b0ff685d34d2cd5e5e4fb3b08", + "a7af945e4c4969707d25cd0d330723e4c3f1e9311601809d6f786f7c1ead69bb", + "473aa11727e69c7ecf7d2ef63acb8c6510988ed1f009daa89eac1938979fadad" + ], + "leaf_hashed_key": "123563e27ba1384aa3f55eecff054af7ae82013d8aff477a215ce6ed4308f520", + "leaf_enumeration_index": 50, + "value_written": "0000000000000000000000000000000000000000000000000000000000000031", + "value_read": "0000000000000000000000000000000000000000000000000000000000000000" + } +] diff --git a/core/lib/mini_merkle_tree/Cargo.toml b/core/lib/mini_merkle_tree/Cargo.toml index d9287a26eaf3..ccb7b9b78f6b 100644 --- a/core/lib/mini_merkle_tree/Cargo.toml +++ b/core/lib/mini_merkle_tree/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index d8cf4481064d..51d7c1b3a824 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -1,24 +1,26 @@ [package] name = "zksync_object_store" version = "1.0.0" -edition = "2018" +edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -google-cloud-storage = "0.9.0" -google-cloud-auth = "0.9.0" -google-cloud-default = { version = "0.1.0", features = ["storage", "google-cloud-metadata"] } -vlog = { path = "../vlog", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } zksync_types = { path = "../types", version = "1.0" } + +bincode = "1" +google-cloud-storage = { git = "https://github.com/yoshidan/google-cloud-rust", branch = "main" } +google-cloud-auth = { git = "https://github.com/yoshidan/google-cloud-rust", branch = "main" } +google-cloud-default = { git = "https://github.com/yoshidan/google-cloud-rust", branch = "main", features = ["storage", "google-cloud-metadata"] } +http = "0.2.9" metrics = "0.20" tokio = { version = "1.21.2", features = ["full"] } -http = "0.2.9" +vlog = { path = "../vlog", version = "1.0" } [dev-dependencies] tempdir = "0.3.7" diff --git a/core/lib/object_store/README.md b/core/lib/object_store/README.md new file mode 100644 index 000000000000..f7d004e3d2c7 --- /dev/null +++ b/core/lib/object_store/README.md @@ -0,0 +1,15 @@ +# Object Store + +This crate provides the object storage abstraction that allows to get, put and remove binary blobs. The following +implementations are available: + +- File-based storage saving blobs as separate files in the local filesystem +- GCS-based storage + +These implementations are not exposed externally. Instead, a store trait object can be constructed based on the +[configuration], which can be provided explicitly or constructed from the environment. + +Besides the lower-level storage abstraction, the crate provides high-level typesafe methods to store (de)serializable +objects. Prefer using these methods whenever possible. + +[configuration]: ../config diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs new file mode 100644 index 000000000000..ab265f5adc8e --- /dev/null +++ b/core/lib/object_store/src/file.rs @@ -0,0 +1,95 @@ +use std::{fmt::Debug, fs, io}; + +use crate::raw::{Bucket, ObjectStore, ObjectStoreError}; + +impl From for ObjectStoreError { + fn from(err: io::Error) -> Self { + match err.kind() { + io::ErrorKind::NotFound => ObjectStoreError::KeyNotFound(err.into()), + _ => ObjectStoreError::Other(err.into()), + } + } +} + +#[derive(Debug)] +pub(crate) struct FileBackedObjectStore { + base_dir: String, +} + +impl FileBackedObjectStore { + pub fn new(base_dir: String) -> Self { + for bucket in &[ + Bucket::ProverJobs, + Bucket::WitnessInput, + Bucket::LeafAggregationWitnessJobs, + Bucket::NodeAggregationWitnessJobs, + Bucket::SchedulerWitnessJobs, + ] { + fs::create_dir_all(format!("{base_dir}/{bucket}")).expect("failed creating bucket"); + } + FileBackedObjectStore { base_dir } + } + + fn filename(&self, bucket: Bucket, key: &str) -> String { + format!("{}/{bucket}/{key}", self.base_dir) + } +} + +impl ObjectStore for FileBackedObjectStore { + fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { + let filename = self.filename(bucket, key); + fs::read(filename).map_err(From::from) + } + + fn put_raw(&self, bucket: Bucket, key: &str, value: Vec) -> Result<(), ObjectStoreError> { + let filename = self.filename(bucket, key); + fs::write(filename, value).map_err(From::from) + } + + fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { + let filename = self.filename(bucket, key); + fs::remove_file(filename).map_err(From::from) + } +} + +#[cfg(test)] +mod test { + use tempdir::TempDir; + + use super::*; + + #[test] + fn test_get() { + let dir = TempDir::new("test-data").unwrap(); + let path = dir.into_path().into_os_string().into_string().unwrap(); + let object_store = FileBackedObjectStore::new(path); + let expected = vec![9, 0, 8, 9, 0, 7]; + let result = object_store.put_raw(Bucket::ProverJobs, "test-key.bin", expected.clone()); + assert!(result.is_ok(), "result must be OK"); + let bytes = object_store + .get_raw(Bucket::ProverJobs, "test-key.bin") + .unwrap(); + assert_eq!(expected, bytes, "expected didn't match"); + } + + #[test] + fn test_put() { + let dir = TempDir::new("test-data").unwrap(); + let path = dir.into_path().into_os_string().into_string().unwrap(); + let object_store = FileBackedObjectStore::new(path); + let bytes = vec![9, 0, 8, 9, 0, 7]; + let result = object_store.put_raw(Bucket::ProverJobs, "test-key.bin", bytes); + assert!(result.is_ok(), "result must be OK"); + } + + #[test] + fn test_remove() { + let dir = TempDir::new("test-data").unwrap(); + let path = dir.into_path().into_os_string().into_string().unwrap(); + let object_store = FileBackedObjectStore::new(path); + let result = object_store.put_raw(Bucket::ProverJobs, "test-key.bin", vec![0, 1]); + assert!(result.is_ok(), "result must be OK"); + let result = object_store.remove_raw(Bucket::ProverJobs, "test-key.bin"); + assert!(result.is_ok(), "result must be OK"); + } +} diff --git a/core/lib/object_store/src/file_backed_object_store.rs b/core/lib/object_store/src/file_backed_object_store.rs deleted file mode 100644 index d864cad68dc7..000000000000 --- a/core/lib/object_store/src/file_backed_object_store.rs +++ /dev/null @@ -1,129 +0,0 @@ -use std::fmt::Debug; -use std::fs; -use std::fs::File; -use std::io::{ErrorKind, Read, Write}; - -use crate::object_store::{ - ObjectStore, ObjectStoreError, LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, PROVER_JOBS_BUCKET_PATH, - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, WITNESS_INPUT_BUCKET_PATH, -}; - -impl From for ObjectStoreError { - fn from(err: std::io::Error) -> Self { - match err.kind() { - ErrorKind::NotFound => ObjectStoreError::KeyNotFound(err.to_string()), - _ => ObjectStoreError::Other(err.to_string()), - } - } -} - -#[derive(Debug)] -pub struct FileBackedObjectStore { - base_dir: String, -} - -impl FileBackedObjectStore { - pub fn new(base_dir: String) -> Self { - for bucket in &[ - PROVER_JOBS_BUCKET_PATH, - WITNESS_INPUT_BUCKET_PATH, - LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, - SCHEDULER_WITNESS_JOBS_BUCKET_PATH, - ] { - fs::create_dir_all(format!("{}/{}", base_dir, bucket)).expect("failed creating bucket"); - } - FileBackedObjectStore { base_dir } - } - - fn filename(&self, bucket: &'static str, key: String) -> String { - format!("{}/{}/{}", self.base_dir, bucket, key) - } -} - -impl ObjectStore for FileBackedObjectStore { - type Bucket = &'static str; - type Key = String; - type Value = Vec; - - fn get_store_type(&self) -> &'static str { - "FileBackedStore" - } - - fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result { - let filename = self.filename(bucket, key); - let mut file = File::open(filename)?; - let mut buffer = Vec::::new(); - file.read_to_end(&mut buffer)?; - Ok(buffer) - } - - fn put( - &mut self, - bucket: Self::Bucket, - key: Self::Key, - value: Self::Value, - ) -> Result<(), ObjectStoreError> { - let filename = self.filename(bucket, key); - let mut file = File::create(filename)?; - file.write_all(&value)?; - Ok(()) - } - - fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), ObjectStoreError> { - let filename = self.filename(bucket, key); - fs::remove_file(filename)?; - Ok(()) - } -} - -#[cfg(test)] -mod test { - use tempdir::TempDir; - - use super::*; - - #[test] - fn test_get() { - let dir = TempDir::new("test-data").unwrap(); - let path = dir.into_path().into_os_string().into_string().unwrap(); - let mut object_store = FileBackedObjectStore::new(path); - let expected = vec![9, 0, 8, 9, 0, 7]; - let result = object_store.put( - PROVER_JOBS_BUCKET_PATH, - "test-key.bin".to_string(), - expected.clone(), - ); - assert!(result.is_ok(), "result must be OK"); - let bytes = object_store - .get(PROVER_JOBS_BUCKET_PATH, "test-key.bin".to_string()) - .unwrap(); - assert_eq!(expected, bytes, "expected didn't match"); - } - - #[test] - fn test_put() { - let dir = TempDir::new("test-data").unwrap(); - let path = dir.into_path().into_os_string().into_string().unwrap(); - let mut object_store = FileBackedObjectStore::new(path); - let bytes = vec![9, 0, 8, 9, 0, 7]; - let result = object_store.put(PROVER_JOBS_BUCKET_PATH, "test-key.bin".to_string(), bytes); - assert!(result.is_ok(), "result must be OK"); - } - - #[test] - fn test_remove() { - let dir = TempDir::new("test-data").unwrap(); - let path = dir.into_path().into_os_string().into_string().unwrap(); - let mut object_store = FileBackedObjectStore::new(path); - let result = object_store.put( - PROVER_JOBS_BUCKET_PATH, - "test-key.bin".to_string(), - vec![0, 1], - ); - assert!(result.is_ok(), "result must be OK"); - let result = object_store.remove(PROVER_JOBS_BUCKET_PATH, "test-key.bin".to_string()); - assert!(result.is_ok(), "result must be OK"); - } -} diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs new file mode 100644 index 000000000000..3197adb1e33a --- /dev/null +++ b/core/lib/object_store/src/gcs.rs @@ -0,0 +1,337 @@ +//! GCS-based [`ObjectStore`] implementation. + +use google_cloud_auth::{credentials::CredentialsFile, error::Error}; +use google_cloud_default::WithAuthExt; +use google_cloud_storage::{ + client::{Client, ClientConfig}, + http::objects::{ + delete::DeleteObjectRequest, + download::Range, + get::GetObjectRequest, + upload::{Media, UploadObjectRequest, UploadType}, + }, + http::Error as HttpError, +}; +use http::StatusCode; +use tokio::runtime::{Handle, RuntimeFlavor}; + +use std::{ + fmt, + future::Future, + thread, + time::{Duration, Instant}, +}; + +use crate::raw::{Bucket, ObjectStore, ObjectStoreError}; + +async fn retry(max_retries: u16, mut f: F) -> Result +where + Fut: Future>, + F: FnMut() -> Fut, +{ + let mut retries = 1; + let mut backoff = 1; + loop { + match f().await { + Ok(result) => return Ok(result), + Err(err) => { + vlog::warn!("Failed gcs request {retries}/{max_retries}, retrying."); + if retries > max_retries { + return Err(err); + } + retries += 1; + tokio::time::sleep(Duration::from_secs(backoff)).await; + backoff *= 2; + } + } + } +} + +pub struct AsyncGoogleCloudStorage { + bucket_prefix: String, + max_retries: u16, + client: Client, +} + +impl fmt::Debug for AsyncGoogleCloudStorage { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("GoogleCloudStorageAsync") + .field("bucket_prefix", &self.bucket_prefix) + .field("max_retries", &self.max_retries) + .finish() + } +} + +impl AsyncGoogleCloudStorage { + pub async fn new( + credential_file_path: Option, + bucket_prefix: String, + max_retries: u16, + ) -> Self { + let client_config = retry(max_retries, || { + Self::get_client_config(credential_file_path.clone()) + }) + .await + .expect("failed fetching GCS client config after retries"); + + Self { + client: Client::new(client_config), + bucket_prefix, + max_retries, + } + } + + async fn get_client_config( + credential_file_path: Option, + ) -> Result { + if let Some(path) = credential_file_path { + let cred_file = CredentialsFile::new_from_file(path) + .await + .expect("failed loading GCS credential file"); + ClientConfig::default().with_credentials(cred_file).await + } else { + ClientConfig::default().with_auth().await + } + } + + fn filename(bucket: &str, filename: &str) -> String { + format!("{bucket}/{filename}") + } + + pub(crate) async fn get_async( + &self, + bucket: &'static str, + key: &str, + ) -> Result, ObjectStoreError> { + let started_at = Instant::now(); + let filename = Self::filename(bucket, key); + vlog::trace!( + "Fetching data from GCS for key {filename} from bucket {}", + self.bucket_prefix + ); + + let request = GetObjectRequest { + bucket: self.bucket_prefix.clone(), + object: filename, + ..GetObjectRequest::default() + }; + let range = Range::default(); + let blob = retry(self.max_retries, || { + self.client.download_object(&request, &range) + }) + .await; + + vlog::trace!( + "Fetched data from GCS for key {key} from bucket {bucket} and it took: {:?}", + started_at.elapsed() + ); + metrics::histogram!( + "server.object_store.fetching_time", + started_at.elapsed(), + "bucket" => bucket + ); + blob.map_err(ObjectStoreError::from) + } + + pub(crate) async fn put_async( + &self, + bucket: &'static str, + key: &str, + value: Vec, + ) -> Result<(), ObjectStoreError> { + let started_at = Instant::now(); + let filename = Self::filename(bucket, key); + vlog::trace!( + "Storing data to GCS for key {filename} from bucket {}", + self.bucket_prefix + ); + + let upload_type = UploadType::Simple(Media::new(filename)); + let request = UploadObjectRequest { + bucket: self.bucket_prefix.clone(), + ..Default::default() + }; + let object = retry(self.max_retries, || { + self.client + .upload_object(&request, value.clone(), &upload_type) + }) + .await; + + vlog::trace!( + "Stored data to GCS for key {key} from bucket {bucket} and it took: {:?}", + started_at.elapsed() + ); + metrics::histogram!( + "server.object_store.storing_time", + started_at.elapsed(), + "bucket" => bucket + ); + object.map(drop).map_err(ObjectStoreError::from) + } + + // For some bizzare reason, `async fn` doesn't work here, failing with the following error: + // + // > hidden type for `impl std::future::Future>` + // > captures lifetime that does not appear in bounds + pub(crate) fn remove_async( + &self, + bucket: &'static str, + key: &str, + ) -> impl Future> + '_ { + let filename = Self::filename(bucket, key); + vlog::trace!( + "Removing data from GCS for key {filename} from bucket {}", + self.bucket_prefix + ); + + let request = DeleteObjectRequest { + bucket: self.bucket_prefix.clone(), + object: filename, + ..DeleteObjectRequest::default() + }; + async move { + retry(self.max_retries, || self.client.delete_object(&request)) + .await + .map_err(ObjectStoreError::from) + } + } +} + +impl From for ObjectStoreError { + fn from(err: HttpError) -> Self { + let is_not_found = match &err { + HttpError::HttpClient(err) => err + .status() + .map_or(false, |status| matches!(status, StatusCode::NOT_FOUND)), + HttpError::Response(response) => response.code == StatusCode::NOT_FOUND.as_u16(), + HttpError::TokenSource(_) => false, + }; + + if is_not_found { + ObjectStoreError::KeyNotFound(err.into()) + } else { + ObjectStoreError::Other(err.into()) + } + } +} + +#[derive(Debug)] +pub(crate) struct GoogleCloudStorage { + inner: AsyncGoogleCloudStorage, + handle: Handle, +} + +impl GoogleCloudStorage { + pub fn new( + credential_file_path: Option, + bucket_prefix: String, + max_retries: u16, + ) -> Self { + let handle = Handle::try_current().unwrap_or_else(|_| { + panic!( + "No Tokio runtime detected. Make sure that `dyn ObjectStore` is created \ + on a Tokio thread, either in a task run by Tokio, or in the blocking context \ + run with `tokio::task::spawn_blocking()`." + ); + }); + let inner = AsyncGoogleCloudStorage::new(credential_file_path, bucket_prefix, max_retries); + Self { + inner: Self::block_on(&handle, inner), + handle, + } + } + + fn block_on(handle: &Handle, future: impl Future + Send) -> T { + if handle.runtime_flavor() == RuntimeFlavor::CurrentThread { + // We would like to just call `handle.block_on(future)`, but this panics + // if called in an async context. As such, we have this ugly hack, spawning + // a new thread just to block on a future. + thread::scope(|scope| { + scope.spawn(|| handle.block_on(future)).join().unwrap() + // ^ `unwrap()` propagates panics to the calling thread, which is what we want + }) + } else { + // In multi-threaded runtimes, we have `block_in_place` to the rescue. + tokio::task::block_in_place(|| handle.block_on(future)) + } + } +} + +impl ObjectStore for GoogleCloudStorage { + fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { + let task = self.inner.get_async(bucket.as_str(), key); + Self::block_on(&self.handle, task) + } + + fn put_raw(&self, bucket: Bucket, key: &str, value: Vec) -> Result<(), ObjectStoreError> { + let task = self.inner.put_async(bucket.as_str(), key, value); + Self::block_on(&self.handle, task) + } + + fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { + let task = self.inner.remove_async(bucket.as_str(), key); + Self::block_on(&self.handle, task) + } +} + +#[cfg(test)] +mod test { + use std::sync::atomic::{AtomicU16, Ordering}; + + use super::*; + + async fn test_blocking() { + let handle = Handle::current(); + let result = GoogleCloudStorage::block_on(&handle, async { 42 }); + assert_eq!(result, 42); + + let result = tokio::task::spawn_blocking(move || { + GoogleCloudStorage::block_on(&handle, async { 42 }) + }); + assert_eq!(result.await.unwrap(), 42); + } + + #[tokio::test] + async fn blocking_in_sync_and_async_context() { + test_blocking().await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn blocking_in_sync_and_async_context_in_multithreaded_rt() { + test_blocking().await; + } + + #[tokio::test] + async fn test_retry_success_immediate() { + let result = retry(2, || async { Ok::<_, ()>(42) }).await; + assert_eq!(result, Ok(42)); + } + + #[tokio::test] + async fn test_retry_failure_exhausted() { + let result = retry(2, || async { Err::(()) }).await; + assert_eq!(result, Err(())); + } + + async fn retry_success_after_n_retries(n: u16) -> Result { + let retries = AtomicU16::new(0); + let result = retry(n, || async { + let retries = retries.fetch_add(1, Ordering::Relaxed); + if retries + 1 == n { + Ok(42) + } else { + Err(()) + } + }) + .await; + + result.map_err(|_| "Retry failed".to_string()) + } + + #[tokio::test] + async fn test_retry_success_after_retry() { + let result = retry(2, || retry_success_after_n_retries(2)).await; + assert_eq!(result, Ok(42)); + } +} diff --git a/core/lib/object_store/src/gcs_object_store.rs b/core/lib/object_store/src/gcs_object_store.rs deleted file mode 100644 index 3d690a7d1fed..000000000000 --- a/core/lib/object_store/src/gcs_object_store.rs +++ /dev/null @@ -1,217 +0,0 @@ -use std::fmt; -use std::sync::mpsc::channel; -use std::time::Instant; - -use google_cloud_default::WithAuthExt; -use google_cloud_storage::client::{Client, ClientConfig}; -use google_cloud_storage::http::{ - objects::{ - delete::DeleteObjectRequest, - download::Range, - get::GetObjectRequest, - upload::{Media, UploadObjectRequest, UploadType}, - }, - Error::{self, HttpClient}, -}; -use http::StatusCode; -use tokio; - -use zksync_config::ObjectStoreConfig; - -use crate::object_store::{ObjectStore, ObjectStoreError}; - -pub struct GoogleCloudStorage { - client: Client, - bucket_prefix: String, -} - -// we need to implement custom Debug for GoogleCloudStorage because -// `google_cloud_storage::client::Client` type does not implements debug. -impl fmt::Debug for GoogleCloudStorage { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("GoogleCloudStorage") - .field("bucket_prefix", &self.bucket_prefix) - .finish() - } -} - -impl From for ObjectStoreError { - fn from(error: Error) -> Self { - match error { - HttpClient(reqwest_error) => { - if let Some(status) = reqwest_error.status() { - match status { - StatusCode::NOT_FOUND => { - ObjectStoreError::KeyNotFound(reqwest_error.to_string()) - } - _ => ObjectStoreError::Other(reqwest_error.to_string()), - } - } else { - ObjectStoreError::Other(reqwest_error.to_string()) - } - } - _ => ObjectStoreError::Other(error.to_string()), - } - } -} - -pub const GOOGLE_CLOUD_STORAGE_OBJECT_STORE_TYPE: &str = "GoogleCloudStorage"; - -impl GoogleCloudStorage { - pub fn new(client: Client) -> Self { - let object_store_config = ObjectStoreConfig::from_env(); - GoogleCloudStorage { - client, - bucket_prefix: object_store_config.bucket_base_url, - } - } - - fn filename(&self, bucket: &str, filename: &str) -> String { - format!("{}/{}", bucket, filename) - } - - async fn get_async( - self, - bucket: &'static str, - key: String, - ) -> Result, ObjectStoreError> { - let started_at = Instant::now(); - vlog::info!( - "Fetching data from GCS for key {} from bucket {}", - &self.filename(bucket, &key), - self.bucket_prefix - ); - let blob = self - .client - .download_object( - &GetObjectRequest { - bucket: self.bucket_prefix.clone(), - object: self.filename(bucket, &key), - ..Default::default() - }, - &Range::default(), - None, - ) - .await; - vlog::info!( - "Fetched data from GCS for key {} from bucket {} and it took: {:?}", - key, - bucket, - started_at.elapsed() - ); - metrics::histogram!( - "server.object_store.fetching_time", - started_at.elapsed(), - "bucket" => bucket - ); - blob.map_err(ObjectStoreError::from) - } - - async fn put_async( - self, - bucket: &'static str, - key: String, - value: Vec, - ) -> Result<(), ObjectStoreError> { - let started_at = Instant::now(); - vlog::info!( - "Storing data to GCS for key {} from bucket {}", - &self.filename(bucket, &key), - self.bucket_prefix - ); - let upload_type = UploadType::Simple(Media::new(self.filename(bucket, &key))); - let object = self - .client - .upload_object( - &UploadObjectRequest { - bucket: self.bucket_prefix.clone(), - ..Default::default() - }, - value, - &upload_type, - None, - ) - .await; - vlog::info!( - "Stored data to GCS for key {} from bucket {} and it took: {:?}", - key, - bucket, - started_at.elapsed() - ); - metrics::histogram!( - "server.object_store.storing_time", - started_at.elapsed(), - "bucket" => bucket - ); - object.map(drop).map_err(ObjectStoreError::from) - } - - async fn remove_async(self, bucket: &'static str, key: String) -> Result<(), ObjectStoreError> { - vlog::info!( - "Removing data from GCS for key {} from bucket {}", - &self.filename(bucket, &key), - self.bucket_prefix - ); - self.client - .delete_object( - &DeleteObjectRequest { - bucket: self.bucket_prefix.clone(), - object: self.filename(bucket, &key), - ..Default::default() - }, - None, - ) - .await - .map_err(ObjectStoreError::from) - } -} - -fn gcs_query(query: F) -> OUT -where - OUT: Send + 'static, - FUT: std::future::Future, - F: FnOnce(GoogleCloudStorage) -> FUT + Send + 'static, -{ - let (tx, rx) = channel(); - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_current_thread() - .enable_io() - .enable_time() - .build() - .unwrap(); - let result = runtime.block_on(async move { - let gcs_config = ClientConfig::default().with_auth().await.unwrap(); - let gcs = GoogleCloudStorage::new(Client::new(gcs_config)); - query(gcs).await - }); - tx.send(result).unwrap(); - }); - rx.recv().unwrap() -} - -impl ObjectStore for GoogleCloudStorage { - type Bucket = &'static str; - type Key = String; - type Value = Vec; - - fn get_store_type(&self) -> &'static str { - GOOGLE_CLOUD_STORAGE_OBJECT_STORE_TYPE - } - - fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result { - gcs_query(move |gcs| gcs.get_async(bucket, key)) - } - - fn put( - &mut self, - bucket: Self::Bucket, - key: Self::Key, - value: Self::Value, - ) -> Result<(), ObjectStoreError> { - gcs_query(move |gcs| gcs.put_async(bucket, key, value)) - } - - fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), ObjectStoreError> { - gcs_query(move |gcs| gcs.remove_async(bucket, key)) - } -} diff --git a/core/lib/object_store/src/gcs_utils.rs b/core/lib/object_store/src/gcs_utils.rs deleted file mode 100644 index eb7de48a3f77..000000000000 --- a/core/lib/object_store/src/gcs_utils.rs +++ /dev/null @@ -1,42 +0,0 @@ -use zksync_types::proofs::AggregationRound; -use zksync_types::L1BatchNumber; - -pub fn prover_circuit_input_blob_url( - block_number: L1BatchNumber, - sequence_number: usize, - circuit_type: String, - aggregation_round: AggregationRound, -) -> String { - format!( - "{}_{}_{}_{:?}.bin", - block_number, sequence_number, circuit_type, aggregation_round - ) -} - -pub fn merkle_tree_paths_blob_url(block_number: L1BatchNumber) -> String { - format!("merkel_tree_paths_{}.bin", block_number) -} - -pub fn basic_circuits_blob_url(block_number: L1BatchNumber) -> String { - format!("basic_circuits_{}.bin", block_number) -} - -pub fn basic_circuits_inputs_blob_url(block_number: L1BatchNumber) -> String { - format!("basic_circuits_inputs_{}.bin", block_number) -} - -pub fn leaf_layer_subqueues_blob_url(block_number: L1BatchNumber) -> String { - format!("leaf_layer_subqueues_{}.bin", block_number) -} - -pub fn aggregation_outputs_blob_url(block_number: L1BatchNumber) -> String { - format!("aggregation_outputs_{}.bin", block_number) -} - -pub fn scheduler_witness_blob_url(block_number: L1BatchNumber) -> String { - format!("scheduler_witness_{}.bin", block_number) -} - -pub fn final_node_aggregations_blob_url(block_number: L1BatchNumber) -> String { - format!("final_node_aggregations_{}.bin", block_number) -} diff --git a/core/lib/object_store/src/lib.rs b/core/lib/object_store/src/lib.rs index 15b71013d1db..c0bed0e87bc3 100644 --- a/core/lib/object_store/src/lib.rs +++ b/core/lib/object_store/src/lib.rs @@ -1,9 +1,41 @@ -extern crate core; +//! This crate provides the [object storage abstraction](ObjectStore) that allows to get, +//! put and remove binary blobs. The following implementations are available: +//! +//! - File-based storage saving blobs as separate files in the local filesystem +//! - GCS-based storage +//! +//! These implementations are not exposed externally. Instead, a store trait object +//! can be constructed using an [`ObjectStoreFactory`] based on the configuration. +//! The configuration can be provided explicitly (see [`ObjectStoreFactory::new()`]) +//! or obtained from the environment (see [`ObjectStoreFactory::from_env()`]). +//! +//! Besides the lower-level storage abstraction, the crate provides high-level +//! typesafe `::get()` and `::put()` methods +//! to store [(de)serializable objects](StoredObject). Prefer using these methods +//! whenever possible. -pub mod file_backed_object_store; -pub mod gcs_object_store; -pub mod object_store; +// Linter settings. +#![warn(missing_debug_implementations, bare_trait_objects)] +#![warn(clippy::all, clippy::pedantic)] +#![allow( + clippy::must_use_candidate, + clippy::module_name_repetitions, + clippy::doc_markdown +)] -pub mod gcs_utils; -#[cfg(test)] -mod tests; +mod file; +mod gcs; +mod mock; +mod objects; +mod raw; + +#[doc(hidden)] // used by the `serialize_using_bincode!` macro +pub mod _reexports { + pub use crate::raw::BoxedError; + pub use bincode; +} + +pub use self::{ + objects::{CircuitKey, StoredObject}, + raw::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}, +}; diff --git a/core/lib/object_store/src/mock.rs b/core/lib/object_store/src/mock.rs new file mode 100644 index 000000000000..ba9e0c94ac90 --- /dev/null +++ b/core/lib/object_store/src/mock.rs @@ -0,0 +1,39 @@ +//! Mock implementation of [`ObjectStore`]. + +use std::{collections::HashMap, sync::Mutex}; + +use crate::raw::{Bucket, ObjectStore, ObjectStoreError}; + +type BucketMap = HashMap>; + +#[derive(Debug, Default)] +pub(crate) struct MockStore { + inner: Mutex>, +} + +impl ObjectStore for MockStore { + fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { + let lock = self.inner.lock().expect("mock object store poisoned"); + let maybe_bytes = lock.get(&bucket).and_then(|bucket_map| bucket_map.get(key)); + maybe_bytes.cloned().ok_or_else(|| { + let error_message = format!("missing key: {key} in bucket {bucket}"); + ObjectStoreError::KeyNotFound(error_message.into()) + }) + } + + fn put_raw(&self, bucket: Bucket, key: &str, value: Vec) -> Result<(), ObjectStoreError> { + let mut lock = self.inner.lock().expect("mock object store poisoned"); + let bucket_map = lock.entry(bucket).or_default(); + bucket_map.insert(key.to_owned(), value); + Ok(()) + } + + fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { + let mut lock = self.inner.lock().expect("mock object store poisoned"); + let Some(bucket_map) = lock.get_mut(&bucket) else { + return Ok(()) + }; + bucket_map.remove(key); + Ok(()) + } +} diff --git a/core/lib/object_store/src/object_store.rs b/core/lib/object_store/src/object_store.rs deleted file mode 100644 index 2ff4c80057f0..000000000000 --- a/core/lib/object_store/src/object_store.rs +++ /dev/null @@ -1,118 +0,0 @@ -use google_cloud_default::WithAuthExt; -use google_cloud_storage::client::{Client, ClientConfig}; -use std::fmt::{Debug, Display, Formatter}; -use std::str::FromStr; -use std::sync::mpsc::channel; -use std::{error, thread}; -use tokio::runtime::Builder; - -use zksync_config::ObjectStoreConfig; - -use crate::file_backed_object_store::FileBackedObjectStore; -use crate::gcs_object_store::GoogleCloudStorage; - -pub const PROVER_JOBS_BUCKET_PATH: &str = "prover_jobs"; -pub const WITNESS_INPUT_BUCKET_PATH: &str = "witness_inputs"; -pub const LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH: &str = "leaf_aggregation_witness_jobs"; -pub const NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH: &str = "node_aggregation_witness_jobs"; -pub const SCHEDULER_WITNESS_JOBS_BUCKET_PATH: &str = "scheduler_witness_jobs"; - -#[derive(Debug)] -pub enum ObjectStoreError { - KeyNotFound(String), - Other(String), -} - -impl Display for ObjectStoreError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - ObjectStoreError::KeyNotFound(e) => write!(f, "Key Notfound error: {}", e), - ObjectStoreError::Other(s) => write!(f, "Other error: {}", s), - } - } -} - -impl error::Error for ObjectStoreError {} - -/// Trait to fetch and store BLOB's from an object store(S3, Google Cloud Storage, Azure Blobstore etc). -pub trait ObjectStore: Debug + Send + Sync { - type Bucket: Debug; - type Key: Debug; - type Value; - - fn get_store_type(&self) -> &'static str; - - /// Fetches the value for the given key from the given bucket if it exists otherwise returns Error. - fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result; - - /// Stores the value associating it with the key into the given bucket, if the key already exist then the value is replaced. - fn put( - &mut self, - bucket: Self::Bucket, - key: Self::Key, - value: Self::Value, - ) -> Result<(), ObjectStoreError>; - - /// Removes the value associated with the key from the given bucket if it exist. - fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), ObjectStoreError>; -} - -pub type DynamicObjectStore = - Box>>; - -#[derive(Debug, Eq, PartialEq)] -pub enum ObjectStoreMode { - GCS, - FileBacked, -} - -impl FromStr for ObjectStoreMode { - type Err = String; - - fn from_str(input: &str) -> Result { - match input { - "GCS" => Ok(ObjectStoreMode::GCS), - "FileBacked" => Ok(ObjectStoreMode::FileBacked), - _ => Err(format!("Unknown ObjectStoreMode type: {}", input)), - } - } -} - -pub fn create_object_store( - mode: ObjectStoreMode, - file_backed_base_path: String, -) -> DynamicObjectStore { - match mode { - ObjectStoreMode::GCS => { - vlog::trace!("Initialized GoogleCloudStorage Object store"); - let gcs_config = fetch_gcs_config(); - Box::new(GoogleCloudStorage::new(Client::new(gcs_config))) - } - ObjectStoreMode::FileBacked => { - vlog::trace!("Initialized FileBacked Object store"); - Box::new(FileBackedObjectStore::new(file_backed_base_path)) - } - } -} - -pub fn create_object_store_from_env() -> DynamicObjectStore { - let config = ObjectStoreConfig::from_env(); - let mode = ObjectStoreMode::from_str(&config.mode).unwrap(); - create_object_store(mode, config.file_backed_base_path) -} - -fn fetch_gcs_config() -> ClientConfig { - let (tx, rx) = channel(); - thread::spawn(move || { - let runtime = Builder::new_current_thread() - .enable_io() - .enable_time() - .build() - .unwrap(); - let result = runtime - .block_on(ClientConfig::default().with_auth()) - .expect("Failed build GCS client config"); - tx.send(result).unwrap(); - }); - rx.recv().unwrap() -} diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs new file mode 100644 index 000000000000..20a0056380dc --- /dev/null +++ b/core/lib/object_store/src/objects.rs @@ -0,0 +1,196 @@ +//! Stored objects. + +use zksync_types::{ + proofs::{AggregationRound, PrepareBasicCircuitsJob}, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::bn256::Bn256, + encodings::{recursion_request::RecursionRequest, QueueSimulator}, + witness::full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, + witness::oracle::VmWitnessOracle, + LeafAggregationOutputDataWitness, NodeAggregationOutputDataWitness, + SchedulerCircuitInstanceWitness, + }, + L1BatchNumber, +}; + +use crate::raw::{BoxedError, Bucket, ObjectStore, ObjectStoreError}; + +/// Object that can be stored in an [`ObjectStore`]. +pub trait StoredObject: Sized { + /// Bucket in which values are stored. + const BUCKET: Bucket; + /// Logical unique key for the object. The lifetime param allows defining keys + /// that borrow data; see [`CircuitKey`] for an example. + type Key<'a>: Copy; + + /// Encodes the object key to a string. + fn encode_key(key: Self::Key<'_>) -> String; + + /// Serializes a value to a blob. + /// + /// # Errors + /// + /// Returns an error if serialization fails. + fn serialize(&self) -> Result, BoxedError>; + + /// Deserializes a value from the blob. + /// + /// # Errors + /// + /// Returns an error if deserialization fails. + fn deserialize(bytes: Vec) -> Result; +} + +/// Derives [`StoredObject::serialize()`] and [`StoredObject::deserialize()`] using +/// the `bincode` (de)serializer. Should be used in `impl StoredObject` blocks. +#[macro_export] +macro_rules! serialize_using_bincode { + () => { + fn serialize( + &self, + ) -> std::result::Result, $crate::_reexports::BoxedError> { + $crate::_reexports::bincode::serialize(self).map_err(std::convert::From::from) + } + + fn deserialize( + bytes: std::vec::Vec, + ) -> std::result::Result { + $crate::_reexports::bincode::deserialize(&bytes).map_err(std::convert::From::from) + } + }; +} + +impl StoredObject for PrepareBasicCircuitsJob { + const BUCKET: Bucket = Bucket::WitnessInput; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("merkel_tree_paths_{key}.bin") + } + + serialize_using_bincode!(); +} + +impl StoredObject for BlockBasicCircuits { + const BUCKET: Bucket = Bucket::LeafAggregationWitnessJobs; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("basic_circuits_{key}.bin") + } + + serialize_using_bincode!(); +} + +impl StoredObject for BlockBasicCircuitsPublicInputs { + const BUCKET: Bucket = Bucket::LeafAggregationWitnessJobs; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("basic_circuits_inputs_{key}.bin") + } + + serialize_using_bincode!(); +} + +impl StoredObject for SchedulerCircuitInstanceWitness { + const BUCKET: Bucket = Bucket::SchedulerWitnessJobs; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("scheduler_witness_{key}.bin") + } + + serialize_using_bincode!(); +} + +impl StoredObject for NodeAggregationOutputDataWitness { + const BUCKET: Bucket = Bucket::SchedulerWitnessJobs; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("final_node_aggregations_{key}.bin") + } + + serialize_using_bincode!(); +} + +impl StoredObject for Vec> { + const BUCKET: Bucket = Bucket::NodeAggregationWitnessJobs; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("aggregation_outputs_{key}.bin") + } + + serialize_using_bincode!(); +} + +impl StoredObject for Vec, 2, 2>> { + const BUCKET: Bucket = Bucket::NodeAggregationWitnessJobs; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("leaf_layer_subqueues_{key}.bin") + } + + serialize_using_bincode!(); +} + +/// Storage key for a [`ZkSyncCircuit`]. +#[derive(Debug, Clone, Copy)] +pub struct CircuitKey<'a> { + pub block_number: L1BatchNumber, + pub sequence_number: usize, + pub circuit_type: &'a str, + pub aggregation_round: AggregationRound, +} + +impl StoredObject for ZkSyncCircuit> { + const BUCKET: Bucket = Bucket::ProverJobs; + type Key<'a> = CircuitKey<'a>; + + fn encode_key(key: Self::Key<'_>) -> String { + let CircuitKey { + block_number, + sequence_number, + circuit_type, + aggregation_round, + } = key; + format!("{block_number}_{sequence_number}_{circuit_type}_{aggregation_round:?}.bin") + } + + serialize_using_bincode!(); +} + +impl dyn ObjectStore + '_ { + /// Fetches the value for the given key if it exists. + /// + /// # Errors + /// + /// Returns an error if an object with the `key` does not exist, cannot be accessed, + /// or cannot be deserialized. + pub fn get(&self, key: V::Key<'_>) -> Result { + let key = V::encode_key(key); + let bytes = self.get_raw(V::BUCKET, &key)?; + V::deserialize(bytes).map_err(ObjectStoreError::Serialization) + } + + /// Stores the value associating it with the key. If the key already exists, + /// the value is replaced. + /// + /// # Errors + /// + /// Returns an error if serialization or the insertion / replacement operation fails. + pub fn put( + &self, + key: V::Key<'_>, + value: &V, + ) -> Result { + let key = V::encode_key(key); + let bytes = value.serialize().map_err(ObjectStoreError::Serialization)?; + self.put_raw(V::BUCKET, &key, bytes)?; + Ok(key) + } +} diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs new file mode 100644 index 000000000000..86734f1b9fb0 --- /dev/null +++ b/core/lib/object_store/src/raw.rs @@ -0,0 +1,193 @@ +use std::{error, fmt, sync::Arc}; + +use crate::{file::FileBackedObjectStore, gcs::GoogleCloudStorage, mock::MockStore}; +use zksync_config::configs::object_store::ObjectStoreMode; +use zksync_config::ObjectStoreConfig; + +/// Bucket for [`ObjectStore`] in which objects can be placed. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub enum Bucket { + ProverJobs, + WitnessInput, + LeafAggregationWitnessJobs, + NodeAggregationWitnessJobs, + SchedulerWitnessJobs, +} + +impl Bucket { + pub(crate) fn as_str(self) -> &'static str { + match self { + Self::ProverJobs => "prover_jobs", + Self::WitnessInput => "witness_inputs", + Self::LeafAggregationWitnessJobs => "leaf_aggregation_witness_jobs", + Self::NodeAggregationWitnessJobs => "node_aggregation_witness_jobs", + Self::SchedulerWitnessJobs => "scheduler_witness_jobs", + } + } +} + +impl fmt::Display for Bucket { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str(self.as_str()) + } +} + +/// Thread-safe boxed error. +pub type BoxedError = Box; + +/// Errors during [`ObjectStore`] operations. +#[derive(Debug)] +pub enum ObjectStoreError { + /// An object with the specified key is not found. + KeyNotFound(BoxedError), + /// Object (de)serialization failed. + Serialization(BoxedError), + /// Other error has occurred when accessing the store (e.g., a network error). + Other(BoxedError), +} + +impl fmt::Display for ObjectStoreError { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::KeyNotFound(err) => write!(formatter, "key not found: {err}"), + Self::Serialization(err) => write!(formatter, "serialization error: {err}"), + Self::Other(err) => write!(formatter, "other error: {err}"), + } + } +} + +impl error::Error for ObjectStoreError { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self { + Self::KeyNotFound(err) | Self::Serialization(err) | Self::Other(err) => { + Some(err.as_ref()) + } + } + } +} + +/// Functionality to fetch and store byte blobs from an object store (AWS S3, Google Cloud Storage, +/// Azure Blobstore etc). +/// +/// The methods of this trait are low-level. Prefer implementing [`StoredObject`] for the store +/// object and using `get()` / `put()` methods in `dyn ObjectStore`. +/// +/// [`StoredObject`]: crate::StoredObject +pub trait ObjectStore: fmt::Debug + Send + Sync { + /// Fetches the value for the given key from the given bucket if it exists. + /// + /// # Errors + /// + /// Returns an error if an object with the `key` does not exist or cannot be accessed. + fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError>; + + /// Stores the value associating it with the key into the given bucket. + /// If the key already exists, the value is replaced. + /// + /// # Errors + /// + /// Returns an error if the insertion / replacement operation fails. + fn put_raw(&self, bucket: Bucket, key: &str, value: Vec) -> Result<(), ObjectStoreError>; + + /// Removes the value associated with the key from the given bucket if it exists. + /// + /// # Errors + /// + /// Returns an error if removal fails. + fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError>; +} + +impl ObjectStore for Arc { + fn get_raw(&self, bucket: Bucket, key: &str) -> Result, ObjectStoreError> { + (**self).get_raw(bucket, key) + } + + fn put_raw(&self, bucket: Bucket, key: &str, value: Vec) -> Result<(), ObjectStoreError> { + (**self).put_raw(bucket, key, value) + } + + fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { + (**self).remove_raw(bucket, key) + } +} + +#[derive(Debug)] +enum ObjectStoreOrigin { + Config(ObjectStoreConfig), + Mock(Arc), +} + +/// Factory of [`ObjectStore`]s. +#[derive(Debug)] +pub struct ObjectStoreFactory { + origin: ObjectStoreOrigin, +} + +impl ObjectStoreFactory { + /// Creates an object store factory based on the provided `config`. + /// + /// # Panics + /// + /// If the GCS-backed implementation is configured, this constructor will panic if called + /// outside the Tokio runtime. + pub fn new(config: ObjectStoreConfig) -> Self { + Self { + origin: ObjectStoreOrigin::Config(config), + } + } + + /// Creates an object store factory with the configuration taken from the environment. + pub fn from_env() -> Self { + let config = ObjectStoreConfig::from_env(); + Self::new(config) + } + + /// Creates an object store factory with a mock in-memory store. + /// All calls to [`Self::create_store()`] will return the same store; thus, the testing code + /// can use [`ObjectStore`] methods for assertions. + pub fn mock() -> Self { + Self { + origin: ObjectStoreOrigin::Mock(Arc::new(MockStore::default())), + } + } + + /// Creates an [`ObjectStore`]. + pub fn create_store(&self) -> Box { + match &self.origin { + ObjectStoreOrigin::Config(config) => Self::create_from_config(config), + ObjectStoreOrigin::Mock(store) => Box::new(Arc::clone(store)), + } + } + + fn create_from_config(config: &ObjectStoreConfig) -> Box { + let gcs_credential_file_path = match config.mode { + ObjectStoreMode::GCSWithCredentialFile => Some(config.gcs_credential_file_path.clone()), + _ => None, + }; + match config.mode { + ObjectStoreMode::GCS => { + vlog::trace!("Initialized GoogleCloudStorage Object store without credential file"); + Box::new(GoogleCloudStorage::new( + gcs_credential_file_path, + config.bucket_base_url.clone(), + config.max_retries, + )) + } + ObjectStoreMode::GCSWithCredentialFile => { + vlog::trace!("Initialized GoogleCloudStorage Object store with credential file"); + Box::new(GoogleCloudStorage::new( + gcs_credential_file_path, + config.bucket_base_url.clone(), + config.max_retries, + )) + } + ObjectStoreMode::FileBacked => { + vlog::trace!("Initialized FileBacked Object store"); + Box::new(FileBackedObjectStore::new( + config.file_backed_base_path.clone(), + )) + } + } + } +} diff --git a/core/lib/object_store/src/tests.rs b/core/lib/object_store/src/tests.rs deleted file mode 100644 index 1b590a64d92d..000000000000 --- a/core/lib/object_store/src/tests.rs +++ /dev/null @@ -1,21 +0,0 @@ -use crate::object_store::{create_object_store, ObjectStoreMode}; -use std::env; - -#[test] -fn test_object_store_in_memory_creation() { - let object_store = create_object_store(ObjectStoreMode::FileBacked, "artifacts".to_string()); - assert_eq!("FileBackedStore", object_store.get_store_type()); -} - -#[test] -fn test_object_store_gcs_creation() { - set_object_store_environment_variable(); - let object_store = create_object_store(ObjectStoreMode::GCS, "".to_string()); - assert_eq!("GoogleCloudStorage", object_store.get_store_type()); -} - -fn set_object_store_environment_variable() { - env::set_var("OBJECT_STORE_BUCKET_BASE_URL", "zksync_unit_test"); - env::set_var("OBJECT_STORE_MODE", "GCS"); - env::set_var("OBJECT_STORE_FILE_BACKED_BASE_PATH", "/base/url"); -} diff --git a/core/lib/object_store/tests/integration.rs b/core/lib/object_store/tests/integration.rs new file mode 100644 index 000000000000..42f3171ad19c --- /dev/null +++ b/core/lib/object_store/tests/integration.rs @@ -0,0 +1,62 @@ +//! Integration tests for object store. + +use std::fs; + +use zksync_object_store::{Bucket, ObjectStoreFactory}; +use zksync_types::{ + proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, + L1BatchNumber, +}; + +/// Tests compatibility of the `PrepareBasicCircuitsJob` serialization to the previously used +/// one. +#[test] +fn prepare_basic_circuits_job_serialization() { + // The working dir for integration tests is set to the crate dir, so specifying relative paths + // should be OK. + let snapshot = fs::read("./tests/snapshots/prepare-basic-circuits-job-full.bin").unwrap(); + let store = ObjectStoreFactory::mock().create_store(); + store + .put_raw( + Bucket::WitnessInput, + "merkel_tree_paths_1.bin", + snapshot.clone(), + ) + .unwrap(); + + let job: PrepareBasicCircuitsJob = store.get(L1BatchNumber(1)).unwrap(); + + let key = store.put(L1BatchNumber(2), &job).unwrap(); + let serialized_job = store.get_raw(Bucket::WitnessInput, &key).unwrap(); + assert_eq!(serialized_job, snapshot); + assert_job_integrity( + job.next_enumeration_index(), + job.into_merkle_paths().collect(), + ); +} + +fn assert_job_integrity(next_enumeration_index: u64, merkle_paths: Vec) { + assert_eq!(next_enumeration_index, 1); + assert_eq!(merkle_paths.len(), 3); + assert!(merkle_paths + .iter() + .all(|log| log.is_write && log.first_write)); + assert!(merkle_paths.iter().all(|log| log.merkle_paths.len() == 256)); +} + +/// Test that serialization works the same as with a tuple of the job fields. +#[test] +fn prepare_basic_circuits_job_compatibility() { + let snapshot = fs::read("./tests/snapshots/prepare-basic-circuits-job-full.bin").unwrap(); + let job_tuple: (Vec, u64) = bincode::deserialize(&snapshot).unwrap(); + + let serialized = bincode::serialize(&job_tuple).unwrap(); + assert_eq!(serialized, snapshot); + + let job: PrepareBasicCircuitsJob = bincode::deserialize(&snapshot).unwrap(); + assert_eq!(job.next_enumeration_index(), job_tuple.1); + let job_merkle_paths: Vec<_> = job.into_merkle_paths().collect(); + assert_eq!(job_merkle_paths, job_tuple.0); + + assert_job_integrity(job_tuple.1, job_tuple.0); +} diff --git a/core/lib/object_store/tests/snapshots/prepare-basic-circuits-job-full.bin b/core/lib/object_store/tests/snapshots/prepare-basic-circuits-job-full.bin new file mode 100644 index 000000000000..cfaefb0967f7 Binary files /dev/null and b/core/lib/object_store/tests/snapshots/prepare-basic-circuits-job-full.bin differ diff --git a/core/lib/prometheus_exporter/Cargo.toml b/core/lib/prometheus_exporter/Cargo.toml index e6e4ee4835bf..03a5042ddcea 100644 --- a/core/lib/prometheus_exporter/Cargo.toml +++ b/core/lib/prometheus_exporter/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/prometheus_exporter/src/lib.rs b/core/lib/prometheus_exporter/src/lib.rs index 85f1e884268c..81d130418dd6 100644 --- a/core/lib/prometheus_exporter/src/lib.rs +++ b/core/lib/prometheus_exporter/src/lib.rs @@ -31,6 +31,7 @@ pub fn run_prometheus_exporter(config: PrometheusConfig, use_pushgateway: bool) let percents_buckets = [ 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 120.0, ]; + let zero_to_one_buckets = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]; let builder = if use_pushgateway { let job_id = "zksync-pushgateway"; @@ -58,10 +59,15 @@ pub fn run_prometheus_exporter(config: PrometheusConfig, use_pushgateway: bool) .set_buckets(&default_latency_buckets) .unwrap() .set_buckets_for_metric( - Matcher::Full("runtime_context.storage_interaction".to_owned()), + Matcher::Full("runtime_context.storage_interaction.amount".to_owned()), &storage_interactions_per_call_buckets, ) .unwrap() + .set_buckets_for_metric( + Matcher::Full("runtime_context.storage_interaction.ratio".to_owned()), + &zero_to_one_buckets, + ) + .unwrap() .set_buckets_for_metric( Matcher::Prefix("runtime_context.memory".to_owned()), &vm_memory_per_call_buckets, diff --git a/core/lib/prover_utils/Cargo.toml b/core/lib/prover_utils/Cargo.toml index b1c0e442bcf2..e523d0563fba 100644 --- a/core/lib/prover_utils/Cargo.toml +++ b/core/lib/prover_utils/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -16,3 +16,7 @@ zksync_utils = { path = "../../lib/utils", version = "1.0" } metrics = "0.20" reqwest = { version = "0.11", features = ["blocking"] } +regex = "1.7.2" +tokio = "1.27.0" +futures = { version = "0.3", features = ["compat"] } +ctrlc = { version = "3.1", features = ["termination"] } diff --git a/core/lib/prover_utils/src/lib.rs b/core/lib/prover_utils/src/lib.rs index 194f0fcff66d..7c6b4ded2901 100644 --- a/core/lib/prover_utils/src/lib.rs +++ b/core/lib/prover_utils/src/lib.rs @@ -1,11 +1,18 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] +extern crate core; + use std::fs::create_dir_all; use std::io::Cursor; use std::path::Path; use std::time::Duration; use std::time::Instant; +use futures::channel::mpsc; +use futures::executor::block_on; +use futures::{future, SinkExt}; +use tokio::task::JoinHandle; + pub mod region_fetcher; fn download_bytes(key_download_url: &str) -> reqwest::Result> { @@ -114,3 +121,26 @@ pub fn circuit_name_to_numeric_index(circuit_name: &str) -> Option { _ => None, } } + +pub async fn wait_for_tasks(task_futures: Vec>) { + match future::select_all(task_futures).await.0 { + Ok(_) => { + vlog::info!("One of the actors finished its run, while it wasn't expected to do it"); + } + Err(error) => { + vlog::info!( + "One of the tokio actors unexpectedly finished with error: {:?}", + error + ); + } + } +} + +pub fn get_stop_signal_receiver() -> mpsc::Receiver { + let (mut stop_signal_sender, stop_signal_receiver) = mpsc::channel(256); + ctrlc::set_handler(move || { + block_on(stop_signal_sender.send(true)).expect("Ctrl+C signal send"); + }) + .expect("Error setting Ctrl+C handler"); + stop_signal_receiver +} diff --git a/core/lib/prover_utils/src/region_fetcher.rs b/core/lib/prover_utils/src/region_fetcher.rs index 882f96e08628..8d9159b64f23 100644 --- a/core/lib/prover_utils/src/region_fetcher.rs +++ b/core/lib/prover_utils/src/region_fetcher.rs @@ -1,3 +1,4 @@ +use regex::Regex; use reqwest::header::{HeaderMap, HeaderValue}; use reqwest::Method; @@ -6,24 +7,72 @@ use zksync_utils::http_with_retries::send_request_with_retries; pub async fn get_region() -> String { let prover_group_config = ProverGroupConfig::from_env(); + match prover_group_config.region_override { + Some(region) => region, + None => { + let url = prover_group_config.region_read_url; + fetch_from_url(url).await + } + } +} + +pub async fn get_zone() -> String { + let prover_group_config = ProverGroupConfig::from_env(); + match prover_group_config.zone_override { + Some(zone) => zone, + None => { + let url = prover_group_config.zone_read_url; + let data = fetch_from_url(url).await; + parse_zone(&data) + } + } +} + +async fn fetch_from_url(url: String) -> String { let mut headers = HeaderMap::new(); headers.insert("Metadata-Flavor", HeaderValue::from_static("Google")); - let response = send_request_with_retries( - &prover_group_config.region_read_url, - 5, - Method::GET, - Some(headers), - None, - ) - .await; + let response = send_request_with_retries(&url, 5, Method::GET, Some(headers), None).await; response - .unwrap_or_else(|_| { - panic!( - "Failed fetching response from url: {}", - prover_group_config.region_read_url - ) - }) + .unwrap_or_else(|_| panic!("Failed fetching response from url: {}", url)) .text() .await .expect("Failed to read response as text") } + +fn parse_zone(data: &str) -> String { + let re = Regex::new(r"^projects/\d+/zones/(\w+-\w+-\w+)$").unwrap(); + if let Some(caps) = re.captures(data) { + let zone = &caps[1]; + return zone.to_string(); + } + panic!("failed to extract zone from: {}", data) +} + +#[cfg(test)] +mod tests { + use crate::region_fetcher::{get_region, get_zone, parse_zone}; + + #[test] + fn test_parse_zone() { + let data = "projects/295056426491/zones/us-central1-a"; + let zone = parse_zone(data); + assert_eq!(zone, "us-central1-a"); + } + + #[test] + #[should_panic(expected = "failed to extract zone from: invalid data")] + fn test_parse_zone_panic() { + let data = "invalid data"; + let _ = parse_zone(data); + } + + #[tokio::test] + async fn test_get_region_with_override() { + assert_eq!("us-central-1", get_region().await); + } + + #[tokio::test] + async fn test_get_zone_with_override() { + assert_eq!("us-central-1-b", get_zone().await); + } +} diff --git a/core/lib/queued_job_processor/Cargo.toml b/core/lib/queued_job_processor/Cargo.toml index c46fe2ddf8c0..c31dc9976f30 100644 --- a/core/lib/queued_job_processor/Cargo.toml +++ b/core/lib/queued_job_processor/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/queued_job_processor/src/lib.rs b/core/lib/queued_job_processor/src/lib.rs index 1867d6607223..730af71c9ef4 100644 --- a/core/lib/queued_job_processor/src/lib.rs +++ b/core/lib/queued_job_processor/src/lib.rs @@ -1,20 +1,23 @@ use std::fmt::Debug; use std::time::{Duration, Instant}; + +pub use async_trait::async_trait; use tokio::sync::watch; use tokio::task::JoinHandle; use tokio::time::sleep; + use zksync_dal::ConnectionPool; use zksync_utils::panic_extractor::try_extract_panic_message; -pub use async_trait::async_trait; - #[async_trait] pub trait JobProcessor: Sync + Send { type Job: Send + 'static; type JobId: Send + Debug + 'static; type JobArtifacts: Send + 'static; - const POLLING_INTERVAL_MS: u64 = 250; + const POLLING_INTERVAL_MS: u64 = 1000; + const MAX_BACKOFF_MS: u64 = 60_000; + const BACKOFF_MULTIPLIER: u64 = 2; const SERVICE_NAME: &'static str; /// Returns None when there is no pending job @@ -28,14 +31,16 @@ pub trait JobProcessor: Sync + Send { /// Invoked when `process_job` panics /// Should mark the job as failed async fn save_failure( + &self, connection_pool: ConnectionPool, job_id: Self::JobId, started_at: Instant, error: String, - ) -> (); + ); /// Function that processes a job async fn process_job( + &self, connection_pool: ConnectionPool, job: Self::Job, started_at: Instant, @@ -53,6 +58,7 @@ pub trait JobProcessor: Sync + Send { ) where Self: Sized, { + let mut backoff: u64 = Self::POLLING_INTERVAL_MS; while iterations_left.map_or(true, |i| i > 0) { if *stop_receiver.borrow() { vlog::warn!( @@ -63,6 +69,7 @@ pub trait JobProcessor: Sync + Send { } if let Some((job_id, job)) = Self::get_next_job(&self, connection_pool.clone()).await { let started_at = Instant::now(); + backoff = Self::POLLING_INTERVAL_MS; iterations_left = iterations_left.map(|i| i - 1); let connection_pool_for_task = connection_pool.clone(); @@ -71,20 +78,26 @@ pub trait JobProcessor: Sync + Send { Self::SERVICE_NAME, job_id ); - let task = Self::process_job(connection_pool_for_task, job, started_at).await; + let task = self + .process_job(connection_pool_for_task, job, started_at) + .await; - Self::wait_for_task(connection_pool.clone(), job_id, started_at, task).await + self.wait_for_task(connection_pool.clone(), job_id, started_at, task) + .await } else if iterations_left.is_some() { vlog::info!("No more jobs to process. Server can stop now."); return; } else { - sleep(Duration::from_millis(Self::POLLING_INTERVAL_MS)).await; + vlog::trace!("Backing off for {} ms", backoff); + sleep(Duration::from_millis(backoff)).await; + backoff = (backoff * Self::BACKOFF_MULTIPLIER).min(Self::MAX_BACKOFF_MS); } } vlog::info!("Requested number of jobs is processed. Server can stop now.") } async fn wait_for_task( + &self, connection_pool: ConnectionPool, job_id: Self::JobId, started_at: Instant, @@ -106,7 +119,8 @@ pub trait JobProcessor: Sync + Send { Self::SERVICE_NAME, job_id ); - Self::save_result(connection_pool.clone(), job_id, started_at, data).await; + self.save_result(connection_pool.clone(), job_id, started_at, data) + .await; } Err(error) => { let error_message = try_extract_panic_message(error); @@ -116,7 +130,7 @@ pub trait JobProcessor: Sync + Send { job_id, error_message ); - Self::save_failure( + self.save_failure( connection_pool.clone(), job_id, started_at, @@ -132,6 +146,7 @@ pub trait JobProcessor: Sync + Send { } async fn save_result( + &self, connection_pool: ConnectionPool, job_id: Self::JobId, started_at: Instant, diff --git a/core/lib/state/Cargo.toml b/core/lib/state/Cargo.toml index f7902aa60ad8..7552bd9f75f6 100644 --- a/core/lib/state/Cargo.toml +++ b/core/lib/state/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -15,6 +15,7 @@ zksync_types = { path = "../types", version = "1.0" } zksync_storage = { path = "../storage", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } +metrics = "0.20" [dev-dependencies] tempfile = "3.0.2" diff --git a/core/lib/state/README.md b/core/lib/state/README.md new file mode 100644 index 000000000000..b0113a3d8475 --- /dev/null +++ b/core/lib/state/README.md @@ -0,0 +1,9 @@ +# State crate + +This crate is implementing the SecondaryStorage and StorageView. + +While most of the ZKSync data is currently stored in postgres - we also keep a secondary copy for part of it in RocksDB +for performance reasons. + +Currently we only keep the data that is needed by the VM (which is why we implement ZkSyncReadStorage for this +SecondaryStorage class). diff --git a/core/lib/state/src/secondary_storage.rs b/core/lib/state/src/secondary_storage.rs index 3982e035eadf..b0fce0ba78da 100644 --- a/core/lib/state/src/secondary_storage.rs +++ b/core/lib/state/src/secondary_storage.rs @@ -5,8 +5,7 @@ use zksync_storage::rocksdb::WriteBatch; use zksync_storage::util::{deserialize_block_number, serialize_block_number}; use zksync_storage::RocksDB; use zksync_types::{ - Address, L1BatchNumber, StorageKey, StorageLog, StorageLogKind, StorageValue, - ZkSyncReadStorage, H256, + L1BatchNumber, StorageKey, StorageLog, StorageLogKind, StorageValue, ZkSyncReadStorage, H256, }; const BLOCK_NUMBER_KEY: &[u8; 12] = b"block_number"; @@ -21,7 +20,6 @@ pub struct SecondaryStateStorage { #[derive(Default, Debug)] struct PendingPatch { state: HashMap, - contracts: HashMap>, factory_deps: HashMap>, } @@ -34,18 +32,6 @@ impl ZkSyncReadStorage for &SecondaryStateStorage { self.read_value_inner(key).is_none() } - fn load_contract(&mut self, address: Address) -> Option> { - if let Some(value) = self.pending_patch.contracts.get(&address) { - return Some(value.clone()); - } - let cf = self - .db - .cf_state_keeper_handle(StateKeeperColumnFamily::Contracts); - self.db - .get_cf(cf, address.to_fixed_bytes()) - .expect("failed to read rocksdb state value") - } - fn load_factory_dep(&mut self, hash: H256) -> Option> { self.load_factory_dependency(hash) } @@ -95,10 +81,6 @@ impl SecondaryStateStorage { } } - pub fn store_contract(&mut self, address: Address, bytecode: Vec) { - self.pending_patch.contracts.insert(address, bytecode); - } - pub fn store_factory_dep(&mut self, hash: H256, bytecode: Vec) { self.pending_patch.factory_deps.insert(hash, bytecode); } @@ -106,7 +88,6 @@ impl SecondaryStateStorage { pub fn rollback( &mut self, logs: Vec<(H256, Option)>, - contracts: Vec
, factory_deps: Vec, l1_batch_number: L1BatchNumber, ) { @@ -127,13 +108,6 @@ impl SecondaryStateStorage { serialize_block_number(l1_batch_number.0 + 1), ); - let cf = self - .db - .cf_state_keeper_handle(StateKeeperColumnFamily::Contracts); - for contract_address in contracts { - batch.delete_cf(cf, contract_address.to_fixed_bytes()); - } - let cf = self .db .cf_state_keeper_handle(StateKeeperColumnFamily::FactoryDeps); @@ -161,13 +135,6 @@ impl SecondaryStateStorage { batch.put_cf(cf, Self::serialize_state_key(key), value); } - let cf = self - .db - .cf_state_keeper_handle(StateKeeperColumnFamily::Contracts); - for (address, value) in self.pending_patch.contracts.iter() { - batch.put_cf(cf, address.to_fixed_bytes(), value); - } - let cf = self .db .cf_state_keeper_handle(StateKeeperColumnFamily::FactoryDeps); diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 771e3f039c3b..e82ba6d3e5b8 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use std::fmt::Debug; +use std::time::Duration; use zksync_types::{StorageKey, StorageValue, ZkSyncReadStorage, H256}; /// `StorageView` is buffer for `StorageLog`s between storage and transaction execution code. @@ -18,10 +19,13 @@ pub struct StorageView { // Cache for initial/repeated writes. It's only valid within one L1 batch execution. read_initial_writes: HashMap, - pub storage_invocations: usize, - pub new_storage_invocations: usize, + pub storage_invocations_missed: usize, pub get_value_storage_invocations: usize, pub set_value_storage_invocations: usize, + + pub time_spent_on_storage_missed: Duration, + pub time_spent_on_get_value: Duration, + pub time_spent_on_set_value: Duration, } impl StorageView { @@ -31,14 +35,35 @@ impl StorageView { modified_storage_keys: HashMap::new(), read_storage_keys: HashMap::new(), read_initial_writes: HashMap::new(), - storage_invocations: 0, + storage_invocations_missed: 0, get_value_storage_invocations: 0, set_value_storage_invocations: 0, - new_storage_invocations: 0, + time_spent_on_storage_missed: Default::default(), + time_spent_on_get_value: Default::default(), + time_spent_on_set_value: Default::default(), + } + } + + pub fn new_with_read_keys( + storage_handle: S, + read_storage_keys: HashMap, + ) -> Self { + Self { + storage_handle, + modified_storage_keys: HashMap::new(), + read_storage_keys, + read_initial_writes: HashMap::new(), + storage_invocations_missed: 0, + get_value_storage_invocations: 0, + set_value_storage_invocations: 0, + time_spent_on_storage_missed: Default::default(), + time_spent_on_get_value: Default::default(), + time_spent_on_set_value: Default::default(), } } pub fn get_value(&mut self, key: &StorageKey) -> StorageValue { + let started_at = std::time::Instant::now(); self.get_value_storage_invocations += 1; let value = self.get_value_no_log(key); @@ -50,13 +75,16 @@ impl StorageView { key.key() ); + self.time_spent_on_get_value += started_at.elapsed(); value } // returns the value before write. Doesn't generate read logs. // `None` for value is only possible for rolling back the transaction pub fn set_value(&mut self, key: &StorageKey, value: StorageValue) -> StorageValue { + let started_at = std::time::Instant::now(); self.set_value_storage_invocations += 1; + let original = self.get_value_no_log(key); vlog::trace!( @@ -69,19 +97,25 @@ impl StorageView { ); self.modified_storage_keys.insert(*key, value); + self.time_spent_on_set_value += started_at.elapsed(); + original } fn get_value_no_log(&mut self, key: &StorageKey) -> StorageValue { - self.storage_invocations += 1; + let started_at = std::time::Instant::now(); + if let Some(value) = self.modified_storage_keys.get(key) { *value } else if let Some(value) = self.read_storage_keys.get(key) { *value } else { - self.new_storage_invocations += 1; let value = self.storage_handle.read_value(key); self.read_storage_keys.insert(*key, value); + + self.time_spent_on_storage_missed += started_at.elapsed(); + self.storage_invocations_missed += 1; + value } } @@ -109,6 +143,10 @@ impl StorageView { + self.read_initial_writes.len() * std::mem::size_of::<(StorageKey, bool)>() + self.read_storage_keys.len() * std::mem::size_of::<(StorageKey, StorageValue)>() } + + pub fn take_read_storage_keys(self) -> HashMap { + self.read_storage_keys + } } #[cfg(test)] diff --git a/core/lib/storage/Cargo.toml b/core/lib/storage/Cargo.toml index 4c811cc15716..446e21fa8447 100644 --- a/core/lib/storage/Cargo.toml +++ b/core/lib/storage/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/storage/README.md b/core/lib/storage/README.md new file mode 100644 index 000000000000..bbc9392abfe9 --- /dev/null +++ b/core/lib/storage/README.md @@ -0,0 +1,30 @@ +# ZKSync Storage + +This crate adds the support for RocksDB storage - where we keep the information about the State and Merkle Tree. + +## MerkleTree + +This database is covering 2 column families: + +- Tree +- LeafIndices + +| Column | Key | Value | Description | +| ----------- | -------------------- | ------------------------------ | ----------- | +| LeafIndices | 'leaf_index' | u64 serialized | +| LeafIndices | tree leaf (32 bytes) | TODO: is it index of the tree? | TODO | + +## StateKeeper + +This database has 3 columns: + +- State +- Contracts +- FactoryDeps + +| Column | Key | Value | Description | +| ----------- | ------------------------------- | ----------------------- | ------------------------------------ | +| State | 'block_number' | serialized block number | Last processed L1 batch number (u32) | +| State | hash StorageKey (account + key) | 32 bytes value | State for the given key | +| Contracts | address (20 bytes) | `Vec` | Contract contents | +| FactoryDeps | hash (32 bytes) | `Vec` | TODO | diff --git a/core/lib/storage/src/db.rs b/core/lib/storage/src/db.rs index 5e908ecea89d..2fd5989b2591 100644 --- a/core/lib/storage/src/db.rs +++ b/core/lib/storage/src/db.rs @@ -1,7 +1,7 @@ use once_cell::sync::Lazy; use rocksdb::{ - AsColumnFamilyRef, BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Options, - WriteBatch, DB, + AsColumnFamilyRef, BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, DBIterator, + Options, WriteBatch, WriteOptions, DB, }; use std::path::Path; use std::sync::{Condvar, Mutex}; @@ -17,6 +17,7 @@ pub(crate) static ROCKSDB_INSTANCE_COUNTER: Lazy<(Mutex, Condvar)> = #[derive(Debug)] pub struct RocksDB { db: DB, + sync_writes: bool, _registry_entry: RegistryEntry, } @@ -92,10 +93,19 @@ impl RocksDB { Self { db, + sync_writes: false, _registry_entry: RegistryEntry::new(), } } + /// Switches on sync writes in [`Self::write()`] and [`Self::put()`]. This has a performance + /// penalty and is mostly useful for tests. + #[must_use] + pub fn with_sync_writes(mut self) -> Self { + self.sync_writes = true; + self + } + fn rocksdb_options(tune_options: bool) -> Options { let mut options = Options::default(); options.create_missing_column_families(true); @@ -139,7 +149,13 @@ impl RocksDB { } pub fn write(&self, batch: WriteBatch) -> Result<(), rocksdb::Error> { - self.db.write(batch) + if self.sync_writes { + let mut options = WriteOptions::new(); + options.set_sync(true); + self.db.write_opt(batch, &options) + } else { + self.db.write(batch) + } } pub fn put(&self, key: K, value: V) -> Result<(), rocksdb::Error> @@ -147,21 +163,27 @@ impl RocksDB { K: AsRef<[u8]>, V: AsRef<[u8]>, { - self.db.put(key, value) + if self.sync_writes { + let mut options = WriteOptions::new(); + options.set_sync(true); + self.db.put_opt(key, value, &options) + } else { + self.db.put(key, value) + } } pub fn get>(&self, key: K) -> Result>, rocksdb::Error> { self.db.get(key) } - /// Returns column family handle for State Keeper database + /// Returns column family handle for State Keeper database. pub fn cf_state_keeper_handle(&self, cf: StateKeeperColumnFamily) -> &ColumnFamily { self.db .cf_handle(&cf.to_string()) .unwrap_or_else(|| panic!("Column family '{}' doesn't exist", cf)) } - /// Returns column family handle for Merkle Tree database + /// Returns column family handle for Merkle Tree database. pub fn cf_merkle_tree_handle(&self, cf: MerkleTreeColumnFamily) -> &ColumnFamily { self.db .cf_handle(&cf.to_string()) @@ -176,7 +198,17 @@ impl RocksDB { self.db.get_cf(cf, key) } - /// awaits termination of all running rocksdb instances + /// Iterates over key-value pairs in the specified column family `cf` in the lexical + /// key order. The keys are filtered so that they start from the specified `prefix`. + pub fn prefix_iterator_cf>( + &self, + cf: &impl AsColumnFamilyRef, + prefix: P, + ) -> DBIterator<'_> { + self.db.prefix_iterator_cf(cf, prefix) + } + + /// Awaits termination of all running rocksdb instances. pub fn await_rocksdb_termination() { let (lock, cvar) = &*ROCKSDB_INSTANCE_COUNTER; let mut num_instances = lock.lock().unwrap(); diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 7545a325956f..3855d2332cc0 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -18,17 +18,10 @@ zksync_contracts = { path = "../contracts", version = "1.0" } zksync_mini_merkle_tree = { path = "../mini_merkle_tree", version = "1.0"} # We need this import because we wanat DAL to be responsible for (de)serialization codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.2" } +zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.2"} +zkevm-assembly = { git = "https://github.com/matter-labs/era-zkEVM-assembly.git", branch = "v1.3.2" } -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "main" } -#zkevm_test_harness = { path = "../../../../zkevm_test_harness" } - - -#codegen = { path = "../../../../solidity_plonk_verifier/codegen" } - -zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "main"} -#zk_evm = { path = "../../../../zk_evm" } - -zkevm-assembly = { git = "https://github.com/matter-labs/era-zkEVM-assembly.git", branch = "main" } rlp = "0.5" metrics = "0.20" @@ -55,5 +48,4 @@ blake2 = "0.10" [dev-dependencies] secp256k1 = {version = "0.21", features = ["recovery"] } tokio = { version = "1", features = ["rt", "macros"] } -serde_with = {version="1", features=["hex"]} - +serde_with = { version = "1", features = ["hex"] } diff --git a/core/lib/types/src/api.rs b/core/lib/types/src/api.rs index 4f2c5a8b03f7..f506631534dd 100644 --- a/core/lib/types/src/api.rs +++ b/core/lib/types/src/api.rs @@ -4,6 +4,7 @@ use crate::explorer_api::TransactionStatus; pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; +use crate::vm_trace::{Call, CallType}; use crate::web3::types::{AccessList, Index, H2048}; use crate::{Address, MiniblockNumber}; use chrono::{DateTime, Utc}; @@ -173,7 +174,7 @@ pub struct L2ToL1LogProof { } /// A struct with the two default bridge contracts. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BridgeAddresses { pub l1_erc20_default_bridge: Address, @@ -491,3 +492,57 @@ pub struct GetLogsFilter { pub addresses: Vec
, pub topics: Vec<(u32, Vec)>, } + +/// Result of debugging block +/// For some reasons geth returns result as {result: DebugCall} +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ResultDebugCall { + pub result: DebugCall, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub enum DebugCallType { + Call, + Create, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DebugCall { + pub r#type: DebugCallType, + pub from: Address, + pub to: Address, + pub gas: U256, + pub gas_used: U256, + pub value: U256, + pub output: Bytes, + pub input: Bytes, + pub error: Option, + pub revert_reason: Option, + pub calls: Vec, +} + +impl From for DebugCall { + fn from(value: Call) -> Self { + let calls = value.calls.into_iter().map(DebugCall::from).collect(); + let debug_type = match value.r#type { + CallType::Call(_) => DebugCallType::Call, + CallType::Create => DebugCallType::Create, + CallType::NearCall => unreachable!("We have to filter our near calls before"), + }; + Self { + r#type: debug_type, + from: value.from, + to: value.to, + gas: U256::from(value.gas), + gas_used: U256::from(value.gas_used), + value: value.value, + output: Bytes::from(value.output.clone()), + input: Bytes::from(value.input.clone()), + error: value.error.clone(), + revert_reason: value.revert_reason, + calls, + } + } +} diff --git a/core/lib/types/src/circuit.rs b/core/lib/types/src/circuit.rs index 72f2527f30b6..eaf8fdd0953e 100644 --- a/core/lib/types/src/circuit.rs +++ b/core/lib/types/src/circuit.rs @@ -3,6 +3,8 @@ use zkevm_test_harness::toolset::GeometryConfig; pub const LEAF_SPLITTING_FACTOR: usize = 50; pub const NODE_SPLITTING_FACTOR: usize = 48; + +/// Max number of basic circuits per L1 batch. pub const SCHEDULER_UPPER_BOUND: u32 = (LEAF_SPLITTING_FACTOR * NODE_SPLITTING_FACTOR) as u32; pub const LEAF_CIRCUIT_INDEX: u8 = 2; diff --git a/core/lib/types/src/explorer_api.rs b/core/lib/types/src/explorer_api.rs index 9ed08b3e112e..95c1dc680dd9 100644 --- a/core/lib/types/src/explorer_api.rs +++ b/core/lib/types/src/explorer_api.rs @@ -157,6 +157,7 @@ pub struct TransactionDetails { pub index_in_block: Option, pub initiator_address: Address, pub received_at: DateTime, + pub miniblock_timestamp: Option, pub eth_commit_tx_hash: Option, pub eth_prove_tx_hash: Option, pub eth_execute_tx_hash: Option, @@ -285,6 +286,7 @@ pub struct BlockDetails { pub l1_gas_price: u64, pub l2_fair_gas_price: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, + pub operator_address: Address, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -322,9 +324,11 @@ pub struct L1BatchPageItem { #[serde(tag = "codeFormat", content = "sourceCode")] pub enum SourceCodeData { #[serde(rename = "solidity-single-file")] - SingleFile(String), + SolSingleFile(String), #[serde(rename = "solidity-standard-json-input")] StandardJsonInput(serde_json::Map), + #[serde(rename = "yul-single-file")] + YulSingleFile(String), } // Implementing Custom deserializer which deserializes `SourceCodeData` @@ -362,7 +366,18 @@ impl<'de> Visitor<'de> for SourceCodeVisitor { let result = match r#type.as_deref() { Some("solidity-single-file") | None => { let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; - SourceCodeData::SingleFile( + SourceCodeData::SolSingleFile( + value + .as_str() + .ok_or_else(|| { + A::Error::invalid_type(Unexpected::Other(&value.to_string()), &self) + })? + .to_string(), + ) + } + Some("yul-single-file") => { + let value = source_code.ok_or_else(|| A::Error::missing_field("source_code"))?; + SourceCodeData::YulSingleFile( value .as_str() .ok_or_else(|| { @@ -385,7 +400,11 @@ impl<'de> Visitor<'de> for SourceCodeVisitor { Some(x) => { return Err(A::Error::unknown_variant( x, - &["solidity-single-file", "solidity-standard-json-input"], + &[ + "solidity-single-file", + "solidity-standard-json-input", + "yul-single-file", + ], )) } }; @@ -458,7 +477,7 @@ mod tests { let single_file_result = serde_json::from_str::(single_file_str); assert!(matches!( single_file_result, - Ok(SourceCodeData::SingleFile(_)) + Ok(SourceCodeData::SolSingleFile(_)) )); let stand_json_input_str = @@ -474,7 +493,7 @@ mod tests { serde_json::from_str::(type_not_specified_str); assert!(matches!( type_not_specified_result, - Ok(SourceCodeData::SingleFile(_)) + Ok(SourceCodeData::SolSingleFile(_)) )); let type_not_specified_object_str = r#"{"sourceCode": {}}"#; diff --git a/core/lib/types/src/fee.rs b/core/lib/types/src/fee.rs index 62fa203300fa..bb2d74c4bd77 100644 --- a/core/lib/types/src/fee.rs +++ b/core/lib/types/src/fee.rs @@ -21,6 +21,7 @@ pub struct TransactionExecutionMetrics { // and the number of precompile calls pub total_log_queries: usize, pub cycles_used: u32, + pub computational_gas_used: u32, } #[derive(Default, Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 20fc17fdee14..6aff38b5e520 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -1,3 +1,5 @@ +use std::convert::TryFrom; + use rlp::{Rlp, RlpStream}; use self::error::SignError; @@ -368,6 +370,26 @@ impl From for api::Transaction { } } +impl TryFrom for L2Tx { + type Error = (); + + fn try_from(value: Transaction) -> Result { + let Transaction { + common_data, + execute, + received_timestamp_ms, + } = value; + match common_data { + ExecuteTransactionCommon::L1(_) => Err(()), + ExecuteTransactionCommon::L2(common_data) => Ok(L2Tx { + execute, + common_data, + received_timestamp_ms, + }), + } + } +} + impl EIP712TypedStructure for L2Tx { const TYPE_NAME: &'static str = "Transaction"; diff --git a/core/lib/types/src/proofs.rs b/core/lib/types/src/proofs.rs index ab328031f3b5..3a5e5718bcb7 100644 --- a/core/lib/types/src/proofs.rs +++ b/core/lib/types/src/proofs.rs @@ -1,11 +1,15 @@ -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; use std::convert::{TryFrom, TryInto}; use std::fmt::Debug; use std::ops::Add; +use std::str::FromStr; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, Bytes}; use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; use zkevm_test_harness::bellman::bn256::Bn256; use zkevm_test_harness::bellman::plonk::better_better_cs::proof::Proof; +use zkevm_test_harness::encodings::{recursion_request::RecursionRequest, QueueSimulator}; use zkevm_test_harness::witness::full_block_artifact::{ BlockBasicCircuits, BlockBasicCircuitsPublicInputs, }; @@ -14,37 +18,43 @@ use zkevm_test_harness::{ LeafAggregationOutputDataWitness, NodeAggregationOutputDataWitness, SchedulerCircuitInstanceWitness, }; + use zksync_basic_types::{L1BatchNumber, H256, U256}; -/// Metadata emitted by merkle tree after processing single storage log -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +const HASH_LEN: usize = H256::len_bytes(); + +/// Metadata emitted by a Merkle tree after processing single storage log. +#[serde_as] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StorageLogMetadata { - pub root_hash: Vec, + #[serde_as(as = "Bytes")] + pub root_hash: [u8; HASH_LEN], pub is_write: bool, pub first_write: bool, - pub merkle_paths: Vec>, + #[serde_as(as = "Vec")] + pub merkle_paths: Vec<[u8; HASH_LEN]>, pub leaf_hashed_key: U256, pub leaf_enumeration_index: u64, - pub value_written: [u8; 32], - pub value_read: [u8; 32], + // **NB.** For compatibility reasons, `#[serde_as(as = "Bytes")]` attrs are not added below. + pub value_written: [u8; HASH_LEN], + pub value_read: [u8; HASH_LEN], } impl StorageLogMetadata { pub fn leaf_hashed_key_array(&self) -> [u8; 32] { - let mut result = [0u8; 32]; + let mut result = [0_u8; 32]; self.leaf_hashed_key.to_little_endian(&mut result); result } - pub fn merkle_paths_array(&self) -> Box<[[u8; 32]; 256]> { - let vec_of_arrays = self - .merkle_paths - .clone() - .into_iter() - .map(|vec| TryInto::<[u8; 32]>::try_into(vec).unwrap()) - .collect::>(); - - Box::new(TryInto::<[[u8; 32]; 256]>::try_into(vec_of_arrays).unwrap()) + pub fn into_merkle_paths_array(self) -> Box<[[u8; HASH_LEN]; PATH_LEN]> { + let actual_len = self.merkle_paths.len(); + self.merkle_paths.try_into().unwrap_or_else(|_| { + panic!( + "Unexpected length of Merkle paths in `StorageLogMetadata`: expected {}, got {}", + PATH_LEN, actual_len + ); + }) } } @@ -54,12 +64,6 @@ pub struct WitnessGeneratorJobMetadata { pub proofs: Vec>>>, } -#[derive(Clone)] -pub struct WitnessGeneratorJob { - pub block_number: L1BatchNumber, - pub job: WitnessGeneratorJobInput, -} - /// Represents the sequential number of the proof aggregation round. /// Mostly used to be stored in `aggregation_round` column in `prover_jobs` table #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -81,6 +85,23 @@ impl AggregationRound { } } +impl FromStr for AggregationRound { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "basic_circuits" => Ok(AggregationRound::BasicCircuits), + "leaf_aggregation" => Ok(AggregationRound::LeafAggregation), + "node_aggregation" => Ok(AggregationRound::NodeAggregation), + "scheduler" => Ok(AggregationRound::Scheduler), + other => Err(format!( + "{} is not a valid round name for witness generation", + other + )), + } + } +} + impl TryFrom for AggregationRound { type Error = (); @@ -99,35 +120,87 @@ impl TryFrom for AggregationRound { } } -/// Represents a job for one of the aggregation rounds. -/// `Box` is required by the linter, as the enum variants have vastly different memory footprints. -#[derive(Clone)] -pub enum WitnessGeneratorJobInput { - BasicCircuits(Box), - LeafAggregation(Box), - NodeAggregation(Box), - Scheduler(Box), +/// Witness data produced by the Merkle tree as a result of processing a single block. Used +/// as an input to the witness generator. +/// +/// # Stability +/// +/// This type is serialized using `bincode` to be passed from the metadata calculator +/// to the witness generator. As such, changes in its `serde` serialization +/// must be backwards-compatible. +/// +/// # Compact form +/// +/// In order to reduce storage space, this job supports a compact format. In this format, +/// only the first item in `merkle_paths` is guaranteed to have the full Merkle path (i.e., +/// 256 items with the current Merkle tree). The following items may have less hashes in their +/// Merkle paths; if this is the case, the starting hashes are skipped and are the same +/// as in the first path. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PrepareBasicCircuitsJob { + // Merkle paths and some auxiliary information for each read / write operation in a block. + merkle_paths: Vec, + next_enumeration_index: u64, } -impl WitnessGeneratorJobInput { - pub fn aggregation_round(&self) -> AggregationRound { - match self { - WitnessGeneratorJobInput::BasicCircuits(_) => AggregationRound::BasicCircuits, - WitnessGeneratorJobInput::LeafAggregation(_) => AggregationRound::LeafAggregation, - WitnessGeneratorJobInput::NodeAggregation(_) => AggregationRound::NodeAggregation, - WitnessGeneratorJobInput::Scheduler(_) => AggregationRound::Scheduler, +impl PrepareBasicCircuitsJob { + /// Creates a new job with the specified leaf index and no included paths. + pub fn new(next_enumeration_index: u64) -> Self { + Self { + merkle_paths: vec![], + next_enumeration_index, } } -} -#[derive(Clone)] -pub struct PrepareBasicCircuitsJob { - pub merkle_paths: Vec, - pub next_enumeration_index: u64, + /// Returns the next leaf index at the beginning of the block. + pub fn next_enumeration_index(&self) -> u64 { + self.next_enumeration_index + } + + /// Reserves additional capacity for Merkle paths. + pub fn reserve(&mut self, additional_capacity: usize) { + self.merkle_paths.reserve(additional_capacity); + } + + /// Pushes an additional Merkle path. + pub fn push_merkle_path(&mut self, mut path: StorageLogMetadata) { + let Some(first_path) = self.merkle_paths.first() else { + self.merkle_paths.push(path); + return; + }; + assert_eq!(first_path.merkle_paths.len(), path.merkle_paths.len()); + + let mut hash_pairs = path.merkle_paths.iter().zip(&first_path.merkle_paths); + let first_unique_idx = + hash_pairs.position(|(hash, first_path_hash)| hash != first_path_hash); + let first_unique_idx = first_unique_idx.unwrap_or(path.merkle_paths.len()); + path.merkle_paths = path.merkle_paths.split_off(first_unique_idx); + self.merkle_paths.push(path); + } + + /// Converts this job into an iterator over the contained Merkle paths. + pub fn into_merkle_paths(self) -> impl ExactSizeIterator { + let mut merkle_paths = self.merkle_paths; + if let [first, rest @ ..] = merkle_paths.as_mut_slice() { + for path in rest { + assert!( + path.merkle_paths.len() <= first.merkle_paths.len(), + "Merkle paths in `PrepareBasicCircuitsJob` are malformed; the first path is not \ + the longest one" + ); + let spliced_len = first.merkle_paths.len() - path.merkle_paths.len(); + let spliced_hashes = &first.merkle_paths[0..spliced_len]; + path.merkle_paths + .splice(0..0, spliced_hashes.iter().cloned()); + debug_assert_eq!(path.merkle_paths.len(), first.merkle_paths.len()); + } + } + merkle_paths.into_iter() + } } /// Enriched `PrepareBasicCircuitsJob`. All the other fields are taken from the `l1_batches` table. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct BasicCircuitWitnessGeneratorInput { pub block_number: L1BatchNumber, pub previous_block_hash: H256, @@ -149,14 +222,7 @@ pub struct PrepareLeafAggregationCircuitsJob { pub struct PrepareNodeAggregationCircuitJob { pub previous_level_proofs: Vec>>>, pub previous_level_leafs_aggregations: Vec>, - pub previous_sequence: Vec< - zkevm_test_harness::encodings::QueueSimulator< - Bn256, - zkevm_test_harness::encodings::recursion_request::RecursionRequest, - 2, - 2, - >, - >, + pub previous_sequence: Vec, 2, 2>>, } #[derive(Clone)] @@ -177,22 +243,24 @@ pub struct ProverJobMetadata { pub sequence_number: usize, } -pub struct ProverJob { - pub metadata: ProverJobMetadata, - pub circuit_input: Vec, -} - +#[derive(Debug)] pub struct JobPosition { pub aggregation_round: AggregationRound, pub sequence_number: usize, } -#[derive(Default)] +#[derive(Debug, Default)] pub struct ProverJobStatusFailed { pub started_at: DateTime, pub error: String, } +#[derive(Debug)] +pub struct ProverJobStatusSuccessful { + pub started_at: DateTime, + pub time_taken: chrono::Duration, +} + impl Default for ProverJobStatusSuccessful { fn default() -> Self { ProverJobStatusSuccessful { @@ -202,16 +270,12 @@ impl Default for ProverJobStatusSuccessful { } } -pub struct ProverJobStatusSuccessful { - pub started_at: DateTime, - pub time_taken: chrono::Duration, -} - -#[derive(Default)] +#[derive(Debug, Default)] pub struct ProverJobStatusInProgress { pub started_at: DateTime, } +#[derive(Debug)] pub struct WitnessJobStatusSuccessful { pub started_at: DateTime, pub time_taken: chrono::Duration, @@ -226,13 +290,13 @@ impl Default for WitnessJobStatusSuccessful { } } -#[derive(Default)] +#[derive(Debug, Default)] pub struct WitnessJobStatusFailed { pub started_at: DateTime, pub error: String, } -#[derive(strum::Display, strum::EnumString, strum::AsRefStr)] +#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] pub enum ProverJobStatus { #[strum(serialize = "queued")] Queued, @@ -248,7 +312,7 @@ pub enum ProverJobStatus { Ignored, } -#[derive(strum::Display, strum::EnumString, strum::AsRefStr)] +#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] pub enum WitnessJobStatus { #[strum(serialize = "failed")] Failed(WitnessJobStatusFailed), @@ -266,6 +330,7 @@ pub enum WitnessJobStatus { Queued, } +#[derive(Debug)] pub struct WitnessJobInfo { pub block_number: L1BatchNumber, pub created_at: DateTime, @@ -274,6 +339,7 @@ pub struct WitnessJobInfo { pub position: JobPosition, } +#[derive(Debug)] pub struct ProverJobInfo { pub id: u32, pub block_number: L1BatchNumber, @@ -286,6 +352,7 @@ pub struct ProverJobInfo { pub updated_at: DateTime, } +#[derive(Debug)] pub struct JobExtendedStatistics { pub successful_padding: L1BatchNumber, pub queued_padding: L1BatchNumber, @@ -293,7 +360,7 @@ pub struct JobExtendedStatistics { pub active_area: Vec, } -#[derive(Debug)] +#[derive(Debug, Clone, Copy, Default)] pub struct JobCountStatistics { pub queued: usize, pub in_progress: usize, @@ -313,3 +380,43 @@ impl Add for JobCountStatistics { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn prepare_basic_circuits_job_roundtrip() { + let zero_hash = [0_u8; 32]; + let logs = (0..10).map(|i| { + let mut merkle_paths = vec![zero_hash; 255]; + merkle_paths.push([i as u8; 32]); + StorageLogMetadata { + root_hash: zero_hash, + is_write: i % 2 == 0, + first_write: i % 3 == 0, + merkle_paths, + leaf_hashed_key: U256::from(i), + leaf_enumeration_index: i + 1, + value_written: [i as u8; 32], + value_read: [0; 32], + } + }); + let logs: Vec<_> = logs.collect(); + + let mut job = PrepareBasicCircuitsJob::new(4); + job.reserve(logs.len()); + for log in &logs { + job.push_merkle_path(log.clone()); + } + + // Check that Merkle paths are compacted. + for (i, log) in job.merkle_paths.iter().enumerate() { + let expected_merkle_path_len = if i == 0 { 256 } else { 1 }; + assert_eq!(log.merkle_paths.len(), expected_merkle_path_len); + } + + let logs_from_job: Vec<_> = job.into_merkle_paths().collect(); + assert_eq!(logs_from_job, logs); + } +} diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index 2e7202dfdc42..b6e1638bd561 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -2,7 +2,7 @@ use core::fmt::Debug; use blake2::{Blake2s256, Digest}; use serde::{Deserialize, Serialize}; -use zksync_basic_types::web3::signing::keccak256; +use zksync_basic_types::{web3::signing::keccak256, L2ChainId}; use crate::{AccountTreeId, Address, H160, H256, U256}; @@ -110,18 +110,15 @@ pub trait ZkSyncReadStorage: Debug { /// Returns if the write to the given key is initial. fn is_write_initial(&mut self, key: &StorageKey) -> bool; - /// Load the contract code deployed to the provided address. - fn load_contract(&mut self, address: Address) -> Option>; - /// Load the factory dependency code by its hash. fn load_factory_dep(&mut self, hash: H256) -> Option>; } -pub fn get_system_context_init_logs(chain_id: H256) -> Vec { +pub fn get_system_context_init_logs(chain_id: L2ChainId) -> Vec { vec![ StorageLog::new_write_log( get_system_context_key(SYSTEM_CONTEXT_CHAIN_ID_POSITION), - chain_id, + H256::from_low_u64_be(chain_id.0 as u64), ), StorageLog::new_write_log( get_system_context_key(SYSTEM_CONTEXT_BLOCK_GAS_LIMIT_POSITION), diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 718473cdc8e5..794e25bd102e 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -6,9 +6,8 @@ use rlp::{DecoderError, Rlp, RlpStream}; use serde::{Deserialize, Serialize}; use thiserror::Error; use tiny_keccak::keccak256; -use zk_evm::abstractions::MAX_MEMORY_BYTES; use zksync_basic_types::H256; -use zksync_config::constants::MAX_GAS_PER_PUBDATA_BYTE; +use zksync_config::constants::{MAX_GAS_PER_PUBDATA_BYTE, USED_BOOTLOADER_MEMORY_BYTES}; use zksync_utils::bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}; use zksync_utils::u256_to_h256; @@ -48,13 +47,15 @@ pub struct CallRequest { pub max_fee_per_gas: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub max_priority_fee_per_gas: Option, - /// Transfered value (None for no transfer) + /// Transferred value (None for no transfer) #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, /// Data (None for empty data) #[serde(default, skip_serializing_if = "Option::is_none")] pub data: Option, - + /// Nonce + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nonce: Option, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub transaction_type: Option, /// Access list @@ -66,7 +67,7 @@ pub struct CallRequest { } impl CallRequest { - /// Funtion to return a builder for a Call Request + /// Function to return a builder for a Call Request pub fn builder() -> CallRequestBuilder { CallRequestBuilder::default() } @@ -186,6 +187,8 @@ pub enum SerializationTransactionError { /// making the transaction invalid, rather a DOS protection. #[error("oversized data. max: {0}; actual: {0}")] OversizedData(usize, usize), + #[error("gas per pub data limit is zero")] + GasPerPubDataLimitZero, } /// Description of a Transaction, pending or in the chain. @@ -704,6 +707,8 @@ impl TransactionRequest { return Err(SerializationTransactionError::TooHighGas( "max fee per pubdata byte higher than 2^64-1".to_string(), )); + } else if meta.gas_per_pubdata == U256::zero() { + return Err(SerializationTransactionError::GasPerPubDataLimitZero); } meta.gas_per_pubdata } else { @@ -815,7 +820,7 @@ pub fn tx_req_from_call_req( let calldata = call_request.data.unwrap_or_default(); let transaction_request = TransactionRequest { - nonce: Default::default(), + nonce: call_request.nonce.unwrap_or_default(), from: call_request.from, to: call_request.to, value: call_request.value.unwrap_or_default(), @@ -844,7 +849,7 @@ impl TryFrom for L1Tx { type Error = SerializationTransactionError; fn try_from(tx: CallRequest) -> Result { // L1 transactions have no limitations on the transaction size. - let tx: L2Tx = l2_tx_from_call_req(tx, MAX_MEMORY_BYTES)?; + let tx: L2Tx = l2_tx_from_call_req(tx, USED_BOOTLOADER_MEMORY_BYTES)?; // Note, that while the user has theoretically provided the fee for ETH on L1, // the payment to the operator as well as refunds happen on L2 and so all the ETH @@ -1436,6 +1441,7 @@ mod tests { max_priority_fee_per_gas: Some(U256::from(12u32)), value: Some(U256::from(12u32)), data: Some(Bytes(factory_dep)), + nonce: None, transaction_type: Some(U64::from(EIP_712_TX_TYPE)), access_list: None, eip712_meta: None, @@ -1449,4 +1455,35 @@ mod tests { Err(SerializationTransactionError::OversizedData(_, _)) )); } + + #[test] + fn test_tx_req_from_call_req_nonce_pass() { + let call_request_with_nonce = CallRequest { + from: Some(Address::random()), + to: Some(Address::random()), + gas: Some(U256::from(12u32)), + gas_price: Some(U256::from(12u32)), + max_fee_per_gas: Some(U256::from(12u32)), + max_priority_fee_per_gas: Some(U256::from(12u32)), + value: Some(U256::from(12u32)), + data: Some(Bytes(vec![1, 2, 3])), + nonce: Some(U256::from(123u32)), + transaction_type: Some(U64::from(EIP_712_TX_TYPE)), + access_list: None, + eip712_meta: None, + }; + let tx_request = tx_req_from_call_req( + call_request_with_nonce.clone(), + USED_BOOTLOADER_MEMORY_BYTES, + ) + .unwrap(); + assert_eq!(tx_request.nonce, U256::from(123u32)); + + let mut call_request_without_nonce = call_request_with_nonce; + call_request_without_nonce.nonce = None; + + let tx_request = + tx_req_from_call_req(call_request_without_nonce, USED_BOOTLOADER_MEMORY_BYTES).unwrap(); + assert_eq!(tx_request.nonce, U256::from(0u32)); + } } diff --git a/core/lib/types/src/tx/mod.rs b/core/lib/types/src/tx/mod.rs index bd8a81f458b4..bd2e6e46694d 100644 --- a/core/lib/types/src/tx/mod.rs +++ b/core/lib/types/src/tx/mod.rs @@ -13,6 +13,7 @@ pub mod primitives; pub mod tx_execution_info; pub use self::execute::Execute; +use crate::vm_trace::Call; use crate::Transaction; pub use tx_execution_info::ExecutionMetrics; use tx_execution_info::TxExecutionStatus; @@ -26,6 +27,26 @@ pub struct TransactionExecutionResult { pub refunded_gas: u32, pub operator_suggested_refund: u32, pub compressed_bytecodes: Vec, + pub call_traces: Vec, + pub revert_reason: Option, +} + +impl TransactionExecutionResult { + pub fn call_trace(&self) -> Option { + if self.call_traces.is_empty() { + None + } else { + Some(Call::new_high_level( + self.transaction.gas_limit().as_u32(), + self.transaction.gas_limit().as_u32() - self.refunded_gas, + self.transaction.execute.value, + self.transaction.execute.calldata.clone(), + vec![], + self.revert_reason.clone(), + self.call_traces.clone(), + )) + } + } } #[derive(Debug, Clone)] diff --git a/core/lib/types/src/tx/tx_execution_info.rs b/core/lib/types/src/tx/tx_execution_info.rs index 83d9801f8f1a..d76e8a1a536b 100644 --- a/core/lib/types/src/tx/tx_execution_info.rs +++ b/core/lib/types/src/tx/tx_execution_info.rs @@ -56,6 +56,7 @@ pub struct ExecutionMetrics { pub storage_logs: usize, pub total_log_queries: usize, pub cycles_used: u32, + pub computational_gas_used: u32, } impl ExecutionMetrics { @@ -71,6 +72,7 @@ impl ExecutionMetrics { contracts_deployed: u16, contracts_used: usize, cycles_used: u32, + computational_gas_used: u32, ) -> Self { // We published the data as ABI-encoded `bytes`, so the total length is: // - message length in bytes, rounded up to a multiple of 32 @@ -99,6 +101,7 @@ impl ExecutionMetrics { storage_logs: logs.storage_logs.len(), total_log_queries: logs.total_log_queries_count, cycles_used, + computational_gas_used, } } } @@ -119,6 +122,7 @@ impl Add for ExecutionMetrics { storage_logs: self.storage_logs + other.storage_logs, total_log_queries: self.total_log_queries + other.total_log_queries, cycles_used: self.cycles_used + other.cycles_used, + computational_gas_used: self.computational_gas_used + other.computational_gas_used, } } } diff --git a/core/lib/types/src/vm_trace.rs b/core/lib/types/src/vm_trace.rs index 5317e2e303bf..34ac1d77d63f 100644 --- a/core/lib/types/src/vm_trace.rs +++ b/core/lib/types/src/vm_trace.rs @@ -1,6 +1,15 @@ use crate::{Address, U256}; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::collections::{HashMap, HashSet}; +use std::fmt; +use zk_evm::zkevm_opcode_defs::FarCallOpcode; +use zksync_config::constants::BOOTLOADER_ADDRESS; + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub enum VmTrace { + ExecutionTrace(VmExecutionTrace), + CallTrace(Vec), +} #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] pub struct VmExecutionTrace { @@ -48,3 +57,140 @@ pub struct VmDebugTrace { pub steps: Vec, pub sources: HashMap>, } + +#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] +pub enum CallType { + #[serde(serialize_with = "far_call_type_to_u8")] + #[serde(deserialize_with = "far_call_type_from_u8")] + Call(FarCallOpcode), + Create, + NearCall, +} + +#[derive(Clone, Serialize, Deserialize)] +/// Represents a call in the VM trace. +pub struct Call { + /// Type of the call. + pub r#type: CallType, + /// Address of the caller. + pub from: Address, + /// Address of the callee. + pub to: Address, + /// Gas from the parent call. + pub parent_gas: u32, + /// Gas provided for the call. + pub gas: u32, + /// Gas used by the call. + pub gas_used: u32, + /// Value transferred. + pub value: U256, + /// Input data. + pub input: Vec, + /// Output data. + pub output: Vec, + /// Error message provided by vm or some unexpected errors. + pub error: Option, + /// Revert reason. + pub revert_reason: Option, + /// Subcalls. + pub calls: Vec, +} + +impl Call { + pub fn new_high_level( + gas: u32, + gas_used: u32, + value: U256, + input: Vec, + output: Vec, + revert_reason: Option, + calls: Vec, + ) -> Self { + Self { + r#type: CallType::Call(FarCallOpcode::Normal), + from: Address::zero(), + to: BOOTLOADER_ADDRESS, + parent_gas: gas, + gas, + gas_used, + value, + input, + output, + error: None, + revert_reason, + calls, + } + } +} + +impl PartialEq for Call { + fn eq(&self, other: &Self) -> bool { + self.revert_reason == other.revert_reason + && self.input == other.input + && self.from == other.from + && self.to == other.to + && self.r#type == other.r#type + && self.value == other.value + && self.error == other.error + && self.output == other.output + && self.calls == other.calls + } +} + +fn far_call_type_from_u8<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let res = u8::deserialize(deserializer)?; + match res { + 0 => Ok(FarCallOpcode::Normal), + 1 => Ok(FarCallOpcode::Delegate), + 2 => Ok(FarCallOpcode::Mimic), + _ => Err(serde::de::Error::custom("Invalid FarCallOpcode")), + } +} + +fn far_call_type_to_u8(far_call_type: &FarCallOpcode, s: S) -> Result +where + S: Serializer, +{ + s.serialize_u8(*far_call_type as u8) +} + +impl Default for Call { + fn default() -> Self { + Self { + r#type: CallType::Call(FarCallOpcode::Normal), + from: Default::default(), + to: Default::default(), + parent_gas: 0, + gas: 0, + gas_used: 0, + value: Default::default(), + input: vec![], + output: vec![], + error: None, + revert_reason: None, + calls: vec![], + } + } +} + +impl fmt::Debug for Call { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Call") + .field("type", &self.r#type) + .field("to", &self.to) + .field("from", &self.from) + .field("parent_gas", &self.parent_gas) + .field("gas_used", &self.gas_used) + .field("gas", &self.gas) + .field("value", &self.value) + .field("input", &format_args!("{:?}", self.input)) + .field("output", &format_args!("{:?}", self.output)) + .field("error", &self.error) + .field("revert_reason", &format_args!("{:?}", self.revert_reason)) + .field("call_traces", &self.calls) + .finish() + } +} diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index c7935d048001..74db646cd72f 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -4,15 +4,15 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] zksync_basic_types = { path = "../../lib/basic_types", version = "1.0" } -zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "main"} -#zk_evm = { path = "../../../../zk_evm" } +vlog = { path = "../../lib/vlog", version = "1.0" } +zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.2"} num = { version = "0.3.1", features = ["serde"] } bigdecimal = { version = "=0.2.0", features = ["serde"]} diff --git a/core/lib/utils/src/http_with_retries.rs b/core/lib/utils/src/http_with_retries.rs index b66ac76bfad7..008d51b96e7b 100644 --- a/core/lib/utils/src/http_with_retries.rs +++ b/core/lib/utils/src/http_with_retries.rs @@ -2,6 +2,12 @@ use reqwest::header::HeaderMap; use reqwest::{Client, Error, Method, Response}; use tokio::time::{sleep, Duration}; +#[derive(Debug)] +pub enum HttpError { + ReqwestError(Error), + RetryExhausted(String), +} + /// Method to send HTTP request with fixed number of retires with exponential back-offs. pub async fn send_request_with_retries( url: &str, @@ -9,21 +15,25 @@ pub async fn send_request_with_retries( method: Method, headers: Option, body: Option>, -) -> Result { +) -> Result { let mut retries = 0usize; let mut delay = Duration::from_secs(1); loop { - match send_request(url, method.clone(), headers.clone(), body.clone()).await { - Ok(response) => return Ok(response), - Err(err) => { - if retries >= max_retries { - return Err(err); - } - retries += 1; - sleep(delay).await; - delay = delay.checked_mul(2).unwrap_or(Duration::MAX); - } + let result = send_request(url, method.clone(), headers.clone(), body.clone()).await; + match result { + Ok(response) if response.status().is_success() => return Ok(response), + Ok(response) => vlog::error!("Received non OK http response {:?}", response.status()), + Err(err) => vlog::error!("Error while sending http request {:?}", err), + } + if retries >= max_retries { + return Err(HttpError::RetryExhausted(format!( + "All {} http retires failed", + max_retries + ))); } + retries += 1; + sleep(delay).await; + delay = delay.checked_mul(2).unwrap_or(Duration::MAX); } } diff --git a/core/lib/utils/src/misc.rs b/core/lib/utils/src/misc.rs index baac59b11ba7..da04e90efe06 100644 --- a/core/lib/utils/src/misc.rs +++ b/core/lib/utils/src/misc.rs @@ -6,9 +6,38 @@ pub fn miniblock_hash(miniblock_number: MiniblockNumber) -> H256 { } pub const fn ceil_div(a: u64, b: u64) -> u64 { - (a + b - 1) / b + if a == 0 { + a + } else { + (a - 1) / b + 1 + } } pub fn ceil_div_u256(a: U256, b: U256) -> U256 { (a + b - U256::from(1)) / b } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ceil_div_u64_max() { + assert_eq!(0, ceil_div(u64::MIN, u64::MAX)); + assert_eq!(1, ceil_div(u64::MAX, u64::MAX)); + } + + #[test] + fn test_ceil_div_roundup_required() { + assert_eq!(3, ceil_div(5, 2)); + assert_eq!(4, ceil_div(10, 3)); + assert_eq!(3, ceil_div(15, 7)); + } + + #[test] + fn test_ceil_div_no_roundup_required() { + assert_eq!(2, ceil_div(4, 2)); + assert_eq!(2, ceil_div(6, 3)); + assert_eq!(2, ceil_div(14, 7)); + } +} diff --git a/core/lib/vlog/Cargo.toml b/core/lib/vlog/Cargo.toml index 58c4d3624ad9..f1ffe013fc12 100644 --- a/core/lib/vlog/Cargo.toml +++ b/core/lib/vlog/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/vm/Cargo.toml b/core/lib/vm/Cargo.toml index 7bac5424ab85..8136c49ace39 100644 --- a/core/lib/vm/Cargo.toml +++ b/core/lib/vm/Cargo.toml @@ -4,13 +4,14 @@ version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zkevm-assembly = { git = "https://github.com/matter-labs/era-zkEVM-assembly.git", branch = "main" } +zkevm-assembly = { git = "https://github.com/matter-labs/era-zkEVM-assembly.git", branch = "v1.3.2" } + zksync_crypto = { path = "../crypto", version = "1.0" } zksync_types = { path = "../types", version = "1.0" } zksync_utils = { path = "../utils", version = "1.0" } @@ -18,7 +19,8 @@ zksync_config = { path = "../config", version = "1.0" } zksync_state = {path = "../state", version = "1.0" } zksync_storage = {path = "../storage", version = "1.0" } -zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "main"} +zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.2"} + zksync_contracts = { path = "../contracts" } hex = "0.4" @@ -34,4 +36,3 @@ tracing = "0.1" tempfile = "3.0.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" - diff --git a/core/lib/vm/README.md b/core/lib/vm/README.md new file mode 100644 index 000000000000..deb11bb51371 --- /dev/null +++ b/core/lib/vm/README.md @@ -0,0 +1,41 @@ +# VM Crate + +This crate contains code that interacts with the VM - the VM itself is in a separate repo internal: +[zk_evm][zk_evm_repo] or external:[era-zk_evm][zk_evm_repo_ext] + +## VM dependencies + +The VM relies on several subcomponents or traits, such as Memory and Storage. These traits are defined in the `zk_evm` +repo, while their implementations can be found in this crate, like the storage implementation in `oracles/storage.rs` +and the Memory implementation in `memory.rs`. + +Many of these implementations also support easy rollbacks and history, which is useful when creating a block with +multiple transactions and needing to return the VM to a previous state if transaction doesn't fit. + +### Tracers + +The VM implementation allows for the addition of `Tracers`, which are activated before and after each instruction. This +gives a more in-depth look into the VM, collecting detailed debugging information and logs. More details can be found in +the `tracer/` directory. + +## Running the VM + +To interact with the VM, first create it using methods in `vm_with_bootloader.rs`, such as `init_vm()`. Then, inject a +transaction using `push_transaction_to_bootloader_memory()` and execute the VM, for example using +`execute_till_block_end()` from vm.rs. + +### Bootloader + +In the context of zkEVM, we usually think about transactions. However, from the VM's perspective, it runs a single +program called the bootloader, which internally processes multiple transactions. + +### Rollbacks + +The `VMInstance` in `vm.rs` allows for easy rollbacks. You can save the current state at any moment by calling +`save_current_vm_as_snapshot()` and return to that state using `rollback_to_latest_snapshot()`. + +This rollback affects all subcomponents, like memory, storage, and events, and is mainly used if a transaction doesn't +fit in a block. + +[zk_evm_repo]: https://github.com/matter-labs/zk_evm 'internal zk EVM repo' +[zk_evm_repo_ext]: https://github.com/matter-labs/era-zk_evm 'external zk EVM repo' diff --git a/core/lib/vm/fuzz/.gitignore b/core/lib/vm/fuzz/.gitignore new file mode 100644 index 000000000000..1a45eee7760d --- /dev/null +++ b/core/lib/vm/fuzz/.gitignore @@ -0,0 +1,4 @@ +target +corpus +artifacts +coverage diff --git a/core/lib/vm/fuzz/Cargo.toml b/core/lib/vm/fuzz/Cargo.toml new file mode 100644 index 000000000000..fb659b9c6284 --- /dev/null +++ b/core/lib/vm/fuzz/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "vm-fuzz" +version = "0.0.0" +publish = false +edition = "2018" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = "0.4" +vm-benchmark = {path = "../../../tests/vm-benchmark"} +zksync_types = {path = "../../types"} + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[profile.release] +debug = 1 + +[[bin]] +name = "deploy_transaction" +path = "fuzz_targets/deploy_transaction.rs" +test = false +doc = false diff --git a/core/lib/vm/fuzz/compare.sh b/core/lib/vm/fuzz/compare.sh new file mode 100644 index 000000000000..6edf91e091a3 --- /dev/null +++ b/core/lib/vm/fuzz/compare.sh @@ -0,0 +1,3 @@ +gdb ./bad --command=show_cycle.gdb -ex "show_cycle $1" > badout +gdb ./good --command=show_cycle.gdb -ex "show_cycle $1" > goodout +delta goodout badout --max-line-length 100000 diff --git a/core/lib/vm/fuzz/fuzz.sh b/core/lib/vm/fuzz/fuzz.sh new file mode 100644 index 000000000000..3b5b5e89f9cb --- /dev/null +++ b/core/lib/vm/fuzz/fuzz.sh @@ -0,0 +1 @@ +cargo +nightly fuzz run --no-cfg-fuzzing --strip-dead-code --sanitizer none --release deploy_transaction diff --git a/core/lib/vm/fuzz/fuzz_targets/deploy_transaction.rs b/core/lib/vm/fuzz/fuzz_targets/deploy_transaction.rs new file mode 100644 index 000000000000..e116eeecae96 --- /dev/null +++ b/core/lib/vm/fuzz/fuzz_targets/deploy_transaction.rs @@ -0,0 +1,27 @@ +#![no_main] + +use libfuzzer_sys::fuzz_target; +use vm_benchmark::{BenchmarkingVm, get_deploy_tx}; +use zksync_types::tx::tx_execution_info::TxExecutionStatus::Success; + +fuzz_target!(|input: &[u8]| { + if let Some(contract_code) = cut_to_allowed_bytecode_size(input) { + if let Ok(x) = BenchmarkingVm::new().run_transaction(&get_deploy_tx(contract_code)) { + if x.status == Success { + panic!("managed to produce valid code!"); + } + } + } +}); + +fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> Option<&[u8]> { + let mut words = bytes.len() / 32; + if words == 0 { + return None; + } + + if words & 1 == 0 { + words -= 1; + } + Some(&bytes[..32 * words]) +} diff --git a/core/lib/vm/fuzz/show_cycle.gdb b/core/lib/vm/fuzz/show_cycle.gdb new file mode 100644 index 000000000000..7069afaa34a8 --- /dev/null +++ b/core/lib/vm/fuzz/show_cycle.gdb @@ -0,0 +1,8 @@ +set pagination off + +define show_cycle + break cycle.rs:395 if self.local_state.monotonic_cycle_counter == $arg0 + run + p self.local_state.registers + quit +end diff --git a/core/lib/vm/src/bootloader_state.rs b/core/lib/vm/src/bootloader_state.rs index 2ecb845dfa64..8aa81b289da8 100644 --- a/core/lib/vm/src/bootloader_state.rs +++ b/core/lib/vm/src/bootloader_state.rs @@ -69,6 +69,7 @@ impl BootloaderState { /// Returns the size of the transaction with given index. /// Panics if there is no such transaction. + /// Use it after #[allow(dead_code)] pub(crate) fn get_tx_size(&self, tx_index: usize) -> usize { self.tx_sizes[tx_index] diff --git a/core/lib/vm/src/errors/tx_revert_reason.rs b/core/lib/vm/src/errors/tx_revert_reason.rs index 9259dd87a376..4775d8339f79 100644 --- a/core/lib/vm/src/errors/tx_revert_reason.rs +++ b/core/lib/vm/src/errors/tx_revert_reason.rs @@ -35,6 +35,8 @@ pub enum TxRevertReason { TooBigGasLimit, // The bootloader did not have enough gas to start the transaction in the first place NotEnoughGasProvided, + // The tx consumes too much missing invocations to memory + MissingInvocationLimitReached, } impl TxRevertReason { @@ -75,7 +77,8 @@ impl TxRevertReason { BootloaderErrorCode::FailedToChargeFee => Self::FailedToChargeFee(revert_reason), BootloaderErrorCode::FromIsNotAnAccount => Self::FromIsNotAnAccount, BootloaderErrorCode::FailedToCheckAccount => Self::ValidationFailed(VmRevertReason::General { - msg: "Failed to check if `from` is an account. Most likely not enough gas provided".to_string() + msg: "Failed to check if `from` is an account. Most likely not enough gas provided".to_string(), + data: vec![], }), BootloaderErrorCode::UnacceptableGasPrice => Self::UnexpectedVMBehavior( "The operator included transaction with an unacceptable gas price".to_owned(), @@ -110,11 +113,13 @@ impl TxRevertReason { BootloaderErrorCode::PaymasterReturnedInvalidContext => { Self::PaymasterValidationFailed(VmRevertReason::General { msg: String::from("Paymaster returned invalid context"), + data: vec![], }) } BootloaderErrorCode::PaymasterContextIsTooLong => { Self::PaymasterValidationFailed(VmRevertReason::General { msg: String::from("Paymaster returned context that is too long"), + data: vec![], }) } BootloaderErrorCode::AssertionError => { @@ -127,26 +132,26 @@ impl TxRevertReason { Self::PayForTxFailed(revert_reason) }, BootloaderErrorCode::FailedToMarkFactoryDeps => { - let msg = if let VmRevertReason::General { msg } = revert_reason { - msg + let (msg, data) = if let VmRevertReason::General { msg , data} = revert_reason { + (msg, data) } else { - String::from("Most likely not enough gas provided") + (String::from("Most likely not enough gas provided"), vec![]) }; Self::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg + msg, data }) }, BootloaderErrorCode::TxValidationOutOfGas => { - Self::ValidationFailed(VmRevertReason::General { msg: String::from("Not enough gas for transaction validation") }) + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Not enough gas for transaction validation"), data: vec![] }) }, BootloaderErrorCode::NotEnoughGasProvided => { Self::NotEnoughGasProvided }, BootloaderErrorCode::AccountReturnedInvalidMagic => { - Self::ValidationFailed(VmRevertReason::General { msg: String::from("Account validation returned invalid magic value. Most often this means that the signature is incorrect") }) + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Account validation returned invalid magic value. Most often this means that the signature is incorrect"), data: vec![] }) }, BootloaderErrorCode::PaymasterReturnedInvalidMagic => { - Self::ValidationFailed(VmRevertReason::General { msg: String::from("Paymaster validation returned invalid magic value. Please refer to the documentation of the paymaster for more details") }) + Self::ValidationFailed(VmRevertReason::General { msg: String::from("Paymaster validation returned invalid magic value. Please refer to the documentation of the paymaster for more details"), data: vec![] }) } BootloaderErrorCode::Unknown => Self::UnexpectedVMBehavior(format!( "Unsupported error code: {}. Revert reason: {}", @@ -202,6 +207,9 @@ impl Display for TxRevertReason { "Transaction has a too big ergs limit and will not be executed by the server" ) } + TxRevertReason::MissingInvocationLimitReached => { + write!(f, "Tx produced too much cold storage accesses") + } } } } diff --git a/core/lib/vm/src/errors/vm_revert_reason.rs b/core/lib/vm/src/errors/vm_revert_reason.rs index a38b99935e92..93d6cd370705 100644 --- a/core/lib/vm/src/errors/vm_revert_reason.rs +++ b/core/lib/vm/src/errors/vm_revert_reason.rs @@ -20,6 +20,7 @@ pub enum VmRevertReasonParsingError { pub enum VmRevertReason { General { msg: String, + data: Vec, }, InnerTxError, VmError, @@ -31,8 +32,8 @@ pub enum VmRevertReason { impl VmRevertReason { const GENERAL_ERROR_SELECTOR: &'static [u8] = &[0x08, 0xc3, 0x79, 0xa0]; - - fn parse_general_error(bytes: &[u8]) -> Result { + fn parse_general_error(original_bytes: &[u8]) -> Result { + let bytes = &original_bytes[4..]; if bytes.len() < 32 { return Err(VmRevertReasonParsingError::InputIsTooShort(bytes.to_vec())); } @@ -61,10 +62,29 @@ impl VmRevertReason { )); }; + let raw_data = &data[32..32 + string_length]; Ok(Self::General { - msg: String::from_utf8_lossy(&data[32..32 + string_length]).to_string(), + msg: String::from_utf8_lossy(raw_data).to_string(), + data: original_bytes.to_vec(), }) } + + pub fn to_user_friendly_string(&self) -> String { + match self { + // In case of `Unknown` reason we suppress it to prevent verbose Error function_selector = 0x{} + // message shown to user. + VmRevertReason::Unknown { .. } => "".to_owned(), + _ => self.to_string(), + } + } + + pub fn encoded_data(&self) -> Vec { + match self { + VmRevertReason::Unknown { data, .. } => data.clone(), + VmRevertReason::General { data, .. } => data.clone(), + _ => vec![], + } + } } impl TryFrom<&[u8]> for VmRevertReason { @@ -91,13 +111,12 @@ impl TryFrom<&[u8]> for VmRevertReason { } let function_selector = &bytes[0..4]; - let error_data = &bytes[4..]; match function_selector { - VmRevertReason::GENERAL_ERROR_SELECTOR => Self::parse_general_error(error_data), + VmRevertReason::GENERAL_ERROR_SELECTOR => Self::parse_general_error(bytes), _ => { let result = VmRevertReason::Unknown { function_selector: function_selector.to_vec(), - data: error_data.to_vec(), + data: bytes.to_vec(), }; vlog::warn!("Unsupported error type: {}", result); Ok(result) @@ -111,7 +130,7 @@ impl Display for VmRevertReason { use VmRevertReason::{General, InnerTxError, Unknown, VmError}; match self { - General { msg } => write!(f, "{}", msg), + General { msg, .. } => write!(f, "{}", msg), VmError => write!(f, "VM Error",), InnerTxError => write!(f, "Bootloader-based tx failed"), Unknown { @@ -163,7 +182,8 @@ mod tests { assert_eq!( reason, VmRevertReason::General { - msg: "ERC20: transfer amount exceeds balance".to_string() + msg: "ERC20: transfer amount exceeds balance".to_string(), + data: msg } ); } diff --git a/core/lib/vm/src/event_sink.rs b/core/lib/vm/src/event_sink.rs index 5bfd279b22f7..868f06482e48 100644 --- a/core/lib/vm/src/event_sink.rs +++ b/core/lib/vm/src/event_sink.rs @@ -1,54 +1,47 @@ -use crate::{oracles::OracleWithHistory, utils::collect_log_queries_after_timestamp}; +use crate::{ + history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, + oracles::OracleWithHistory, + utils::collect_log_queries_after_timestamp, +}; use std::collections::HashMap; use zk_evm::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, - reference_impls::event_sink::{ApplicationData, EventMessage}, + reference_impls::event_sink::EventMessage, zkevm_opcode_defs::system_params::{ BOOTLOADER_FORMAL_ADDRESS, EVENT_AUX_BYTE, L1_MESSAGE_AUX_BYTE, }, }; -use crate::history_recorder::{AppDataFrameManagerWithHistory, FrameManager, WithHistory}; - -#[derive(Debug, Default, Clone, PartialEq)] -pub struct InMemoryEventSink { - pub frames_stack: AppDataFrameManagerWithHistory, +#[derive(Debug, Clone, PartialEq, Default)] +pub struct InMemoryEventSink { + pub frames_stack: AppDataFrameManagerWithHistory, } -impl OracleWithHistory for InMemoryEventSink { +impl OracleWithHistory for InMemoryEventSink { fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { self.frames_stack.rollback_to_timestamp(timestamp); } - - fn delete_history(&mut self) { - self.frames_stack.delete_history(); - } } // as usual, if we rollback the current frame then we apply changes to storage immediately, // otherwise we carry rollbacks to the parent's frames -impl InMemoryEventSink { +impl InMemoryEventSink { pub fn flatten(&self) -> (Vec, Vec, Vec) { assert_eq!( - self.frames_stack.inner().len(), + self.frames_stack.len(), 1, "there must exist an initial keeper frame" ); - let full_history = self.frames_stack.inner().current_frame().clone(); // we forget rollbacks as we have finished the execution and can just apply them - let ApplicationData { - forward, - rollbacks: _, - } = full_history; - let history = forward.clone(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(forward); - (history, events, l1_messages) + let history = self.frames_stack.forward().current_frame(); + let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); + (history.to_vec(), events, l1_messages) } pub fn get_log_queries(&self) -> usize { - let history = &self.frames_stack.inner().current_frame().forward; + let history = &self.frames_stack.forward().current_frame(); history.len() } @@ -57,32 +50,32 @@ impl InMemoryEventSink { from_timestamp: Timestamp, ) -> (Vec, Vec) { let history = collect_log_queries_after_timestamp( - &self.frames_stack.inner().current_frame().forward, + self.frames_stack.forward().current_frame(), from_timestamp, ); - Self::events_and_l1_messages_from_history(history) + Self::events_and_l1_messages_from_history(&history) } fn events_and_l1_messages_from_history( - history: Vec, + history: &[LogQuery], ) -> (Vec, Vec) { let mut tmp = HashMap::::with_capacity(history.len()); // note that we only use "forward" part and discard the rollbacks at the end, // since if rollbacks of parents were not appended anywhere we just still keep them - for el in history.into_iter() { + for el in history { // we are time ordered here in terms of rollbacks if tmp.get(&el.timestamp.0).is_some() { assert!(el.rollback); tmp.remove(&el.timestamp.0); } else { assert!(!el.rollback); - tmp.insert(el.timestamp.0, el); + tmp.insert(el.timestamp.0, *el); } } // naturally sorted by timestamp - let mut keys: Vec<_> = tmp.keys().into_iter().cloned().collect(); + let mut keys: Vec<_> = tmp.keys().cloned().collect(); keys.sort_unstable(); let mut events = vec![]; @@ -121,25 +114,19 @@ impl InMemoryEventSink { } pub fn get_size(&self) -> usize { - self.frames_stack - .inner() - .get_frames() - .iter() - .map(|frame| { - (frame.forward.len() + frame.rollbacks.len()) * std::mem::size_of::() - }) - .sum::() + self.frames_stack.get_size() } pub fn get_history_size(&self) -> usize { - self.frames_stack.history().len() - * std::mem::size_of::< - > as WithHistory>::HistoryRecord, - >() + self.frames_stack.get_history_size() + } + + pub fn delete_history(&mut self) { + self.frames_stack.delete_history(); } } -impl EventSink for InMemoryEventSink { +impl EventSink for InMemoryEventSink { // when we enter a new frame we should remember all our current applications and rollbacks // when we exit the current frame then if we did panic we should concatenate all current // forward and rollback cases @@ -163,26 +150,12 @@ impl EventSink for InMemoryEventSink { fn finish_frame(&mut self, panicked: bool, timestamp: Timestamp) { // if we panic then we append forward and rollbacks to the forward of parent, // otherwise we place rollbacks of child before rollbacks of the parent - let ApplicationData { forward, rollbacks } = self.frames_stack.drain_frame(timestamp); if panicked { - for query in forward { - self.frames_stack.push_forward(query, timestamp); - } - for query in rollbacks.into_iter().rev().into_iter().filter(|q| { - // As of now, the bootloader only emits debug logs - // for events, so we keep them here for now. - // They will be cleared on the server level. - q.address != *BOOTLOADER_FORMAL_ADDRESS || q.aux_byte != EVENT_AUX_BYTE - }) { - self.frames_stack.push_forward(query, timestamp); - } - } else { - for query in forward { - self.frames_stack.push_forward(query, timestamp); - } // we need to prepend rollbacks. No reverse here, as we do not care yet! - for query in rollbacks { - self.frames_stack.push_rollback(query, timestamp); - } + self.frames_stack.move_rollback_to_forward( + |q| q.address != *BOOTLOADER_FORMAL_ADDRESS || q.aux_byte != EVENT_AUX_BYTE, + timestamp, + ); } + self.frames_stack.merge_frame(timestamp); } } diff --git a/core/lib/vm/src/history_recorder.rs b/core/lib/vm/src/history_recorder.rs index 2dabb13be173..b3ae1e756765 100644 --- a/core/lib/vm/src/history_recorder.rs +++ b/core/lib/vm/src/history_recorder.rs @@ -1,5 +1,6 @@ use std::{ collections::HashMap, + fmt::Debug, hash::{BuildHasherDefault, Hash, Hasher}, }; @@ -7,7 +8,6 @@ use crate::storage::StoragePtr; use zk_evm::{ aux_structures::Timestamp, - reference_impls::event_sink::ApplicationData, vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; @@ -15,14 +15,13 @@ use zk_evm::{ use zksync_types::{StorageKey, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; -pub type AppDataFrameManagerWithHistory = FrameManagerWithHistory>; -pub type MemoryWithHistory = HistoryRecorder; -pub type FrameManagerWithHistory = HistoryRecorder>; -pub type IntFrameManagerWithHistory = FrameManagerWithHistory>; +pub type MemoryWithHistory = HistoryRecorder; +pub type IntFrameManagerWithHistory = HistoryRecorder, H>; // Within the same cycle, timestamps in range timestamp..timestamp+TIME_DELTA_PER_CYCLE-1 // can be used. This can sometimes vioalate monotonicity of the timestamp within the // same cycle, so it should be normalized. +#[inline] fn normalize_timestamp(timestamp: Timestamp) -> Timestamp { let timestamp = timestamp.0; @@ -43,31 +42,143 @@ pub trait WithHistory { ) -> (Self::HistoryRecord, Self::ReturnValue); } +type EventList = Vec<(Timestamp, ::HistoryRecord)>; + +/// Controls if rolling back is possible or not. +/// Either [HistoryEnabled] or [HistoryDisabled]. +pub trait HistoryMode: private::Sealed + Debug + Clone + Default { + type History: Default; + + fn clone_history(history: &Self::History) -> Self::History + where + T::HistoryRecord: Clone; + fn mutate_history)>( + recorder: &mut HistoryRecorder, + f: F, + ); + fn borrow_history) -> R, R>( + recorder: &HistoryRecorder, + f: F, + default: R, + ) -> R; +} + +mod private { + pub trait Sealed {} + impl Sealed for super::HistoryEnabled {} + impl Sealed for super::HistoryDisabled {} +} + +// derives require that all type parameters implement the trait, which is why +// HistoryEnabled/Disabled derive so many traits even though they mostly don't +// exist at runtime. + +/// A data structure with this parameter can be rolled back. +/// See also: [HistoryDisabled] +#[derive(Debug, Clone, Default, PartialEq)] +pub struct HistoryEnabled; + +/// A data structure with this parameter cannot be rolled back. +/// It won't even have rollback methods. +/// See also: [HistoryEnabled] +#[derive(Debug, Clone, Default)] +pub struct HistoryDisabled; + +impl HistoryMode for HistoryEnabled { + type History = EventList; + + fn clone_history(history: &Self::History) -> Self::History + where + T::HistoryRecord: Clone, + { + history.clone() + } + fn mutate_history)>( + recorder: &mut HistoryRecorder, + f: F, + ) { + f(&mut recorder.history) + } + fn borrow_history) -> R, R>( + recorder: &HistoryRecorder, + f: F, + _: R, + ) -> R { + f(&recorder.history) + } +} + +impl HistoryMode for HistoryDisabled { + type History = (); + + fn clone_history(_: &Self::History) -> Self::History {} + fn mutate_history)>( + _: &mut HistoryRecorder, + _: F, + ) { + } + fn borrow_history) -> R, R>( + _: &HistoryRecorder, + _: F, + default: R, + ) -> R { + default + } +} + /// A struct responsible for tracking history for /// a component that is passed as a generic parameter to it (`inner`). -#[derive(Debug, PartialEq)] -pub struct HistoryRecorder { +#[derive(Default)] +pub struct HistoryRecorder { inner: T, - history: Vec<(Timestamp, T::HistoryRecord)>, + history: H::History, +} + +impl PartialEq for HistoryRecorder +where + T::HistoryRecord: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + self.inner == other.inner + && self.borrow_history(|h1| other.borrow_history(|h2| h1 == h2, true), true) + } +} + +impl Debug for HistoryRecorder +where + T::HistoryRecord: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut debug_struct = f.debug_struct("HistoryRecorder"); + debug_struct.field("inner", &self.inner); + self.borrow_history( + |h| { + debug_struct.field("history", h); + }, + (), + ); + debug_struct.finish() + } } -impl Clone for HistoryRecorder +impl Clone for HistoryRecorder where T::HistoryRecord: Clone, + H: HistoryMode, { fn clone(&self) -> Self { Self { inner: self.inner.clone(), - history: self.history.clone(), + history: H::clone_history(&self.history), } } } -impl HistoryRecorder { +impl HistoryRecorder { pub fn from_inner(inner: T) -> Self { Self { inner, - history: vec![], + history: Default::default(), } } @@ -75,8 +186,14 @@ impl HistoryRecorder { &self.inner } - pub fn history(&self) -> &Vec<(Timestamp, T::HistoryRecord)> { - &self.history + /// If history exists, modify it using `f`. + pub fn mutate_history)>(&mut self, f: F) { + H::mutate_history(self, f); + } + + /// If history exists, feed it into `f`. Otherwise return `default`. + pub fn borrow_history) -> R, R>(&self, f: F, default: R) -> R { + H::borrow_history(self, f, default) } pub fn apply_historic_record( @@ -84,19 +201,33 @@ impl HistoryRecorder { item: T::HistoryRecord, timestamp: Timestamp, ) -> T::ReturnValue { - let timestamp = normalize_timestamp(timestamp); - let last_recorded_timestamp = self.history.last().map(|(t, _)| *t).unwrap_or(Timestamp(0)); - assert!( - last_recorded_timestamp <= timestamp, - "Timestamps are not monotonic" - ); - let (reversed_item, return_value) = self.inner.apply_historic_record(item); - self.history.push((timestamp, reversed_item)); + + self.mutate_history(|history| { + let last_recorded_timestamp = history.last().map(|(t, _)| *t).unwrap_or(Timestamp(0)); + let timestamp = normalize_timestamp(timestamp); + assert!( + last_recorded_timestamp <= timestamp, + "Timestamps are not monotonic" + ); + history.push((timestamp, reversed_item)); + }); return_value } + /// Deletes all the history for its component, making + /// its current state irreversible + pub fn delete_history(&mut self) { + self.mutate_history(|h| h.clear()) + } +} + +impl HistoryRecorder { + pub fn history(&self) -> &Vec<(Timestamp, T::HistoryRecord)> { + &self.history + } + pub fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { loop { let should_undo = self @@ -112,170 +243,6 @@ impl HistoryRecorder { self.inner.apply_historic_record(item_to_apply); } } - - /// Deletes all the history for its component, making - /// its current state irreversible - pub fn delete_history(&mut self) { - self.history.clear(); - } -} - -impl Default for HistoryRecorder { - fn default() -> Self { - Self::from_inner(T::default()) - } -} - -/// Frame manager is basically a wrapper -/// over a stack of items, which typically constitute -/// frames in oracles like StorageOracle, Memory, etc. -#[derive(Debug, PartialEq, Clone)] -pub struct FrameManager { - frame_stack: Vec, -} - -#[derive(Debug, Clone, PartialEq)] -pub enum FrameManagerHistoryRecord { - PushFrame, - PopFrame, - /// The operation should be handled by the current frame itself - InnerOperation(V), -} - -impl Default for FrameManager { - fn default() -> Self { - Self { - // We typically require at least the first frame to be there - // since the last user-provided frame might be reverted - frame_stack: vec![T::default()], - } - } -} - -impl WithHistory for FrameManager { - type HistoryRecord = FrameManagerHistoryRecord; - type ReturnValue = Option; - - fn apply_historic_record( - &mut self, - item: FrameManagerHistoryRecord, - ) -> (Self::HistoryRecord, Self::ReturnValue) { - match item { - FrameManagerHistoryRecord::PopFrame => { - self.frame_stack.pop().unwrap(); - (FrameManagerHistoryRecord::PushFrame, None) - } - FrameManagerHistoryRecord::PushFrame => { - self.frame_stack.push(T::default()); - (FrameManagerHistoryRecord::PopFrame, None) - } - FrameManagerHistoryRecord::InnerOperation(record) => { - let (resulting_op, return_value) = self - .frame_stack - .last_mut() - .unwrap() - .apply_historic_record(record); - ( - FrameManagerHistoryRecord::InnerOperation(resulting_op), - Some(return_value), - ) - } - } - } -} - -impl FrameManager -where - T: WithHistory + Default, -{ - pub fn current_frame(&self) -> &T { - self.frame_stack - .last() - .expect("Frame stack should never be empty") - } - - pub fn len(&self) -> usize { - self.frame_stack.len() - } - - pub fn get_frames(&self) -> &[T] { - &self.frame_stack - } -} - -impl HistoryRecorder> { - /// Add a new frame. - pub fn push_frame(&mut self, timestamp: Timestamp) { - self.apply_historic_record(FrameManagerHistoryRecord::PushFrame, timestamp); - } - - /// Remove the current frame. - pub fn pop_frame(&mut self, timestamp: Timestamp) { - self.apply_historic_record(FrameManagerHistoryRecord::PopFrame, timestamp); - } -} - -impl HistoryRecorder>> { - /// Push an element to the forward queue - pub fn push_forward(&mut self, elem: T, timestamp: Timestamp) { - let forward_event = - ApplicationDataHistoryEvent::ForwardEvent(VectorHistoryEvent::Push(elem)); - let event = FrameManagerHistoryRecord::InnerOperation(forward_event); - - self.apply_historic_record(event, timestamp); - } - - /// Pop an element from the forward queue - pub fn pop_forward(&mut self, timestamp: Timestamp) -> T { - let forward_event = ApplicationDataHistoryEvent::ForwardEvent(VectorHistoryEvent::Pop); - let event = FrameManagerHistoryRecord::InnerOperation(forward_event); - - self.apply_historic_record(event, timestamp) - .flatten() - .unwrap() - } - - /// Push an element to the rollback queue - pub fn push_rollback(&mut self, elem: T, timestamp: Timestamp) { - let rollback_event = - ApplicationDataHistoryEvent::RollbacksEvent(VectorHistoryEvent::Push(elem)); - let event = FrameManagerHistoryRecord::InnerOperation(rollback_event); - - self.apply_historic_record(event, timestamp); - } - - /// Pop an element from the rollback queue - pub fn pop_rollback(&mut self, timestamp: Timestamp) -> T { - let rollback_event = ApplicationDataHistoryEvent::RollbacksEvent(VectorHistoryEvent::Pop); - let event = FrameManagerHistoryRecord::InnerOperation(rollback_event); - - self.apply_historic_record(event, timestamp) - .flatten() - .unwrap() - } - - /// Pops the current frame and returns its value - pub fn drain_frame(&mut self, timestamp: Timestamp) -> ApplicationData { - let mut forward = vec![]; - while !self.inner.current_frame().forward.is_empty() { - let popped_item = self.pop_forward(timestamp); - forward.push(popped_item); - } - - let mut rollbacks = vec![]; - while !self.inner.current_frame().rollbacks.is_empty() { - let popped_item = self.pop_rollback(timestamp); - rollbacks.push(popped_item); - } - - self.pop_frame(timestamp); - - // items are in reversed order: - ApplicationData { - forward: forward.into_iter().rev().collect(), - rollbacks: rollbacks.into_iter().rev().collect(), - } - } } #[derive(Debug, Clone, PartialEq)] @@ -284,7 +251,7 @@ pub enum VectorHistoryEvent { Pop, } -impl WithHistory for Vec { +impl WithHistory for Vec { type HistoryRecord = VectorHistoryEvent; type ReturnValue = Option; fn apply_historic_record( @@ -309,7 +276,7 @@ impl WithHistory for Vec { } } -impl HistoryRecorder> { +impl HistoryRecorder, H> { pub fn push(&mut self, elem: T, timestamp: Timestamp) { self.apply_historic_record(VectorHistoryEvent::Push(elem), timestamp); } @@ -328,54 +295,13 @@ impl HistoryRecorder> { } } -impl HistoryRecorder>> { - /// Push an element to the current frame - pub fn push_to_frame(&mut self, elem: T, timestamp: Timestamp) { - self.apply_historic_record( - FrameManagerHistoryRecord::InnerOperation(VectorHistoryEvent::Push(elem)), - timestamp, - ); - } - - /// Pop an element from the current frame - pub fn pop_from_frame(&mut self, timestamp: Timestamp) -> T { - self.apply_historic_record( - FrameManagerHistoryRecord::InnerOperation(VectorHistoryEvent::Pop), - timestamp, - ) - .flatten() - .unwrap() - } - - /// Drains the top frame and returns its value - pub fn drain_frame(&mut self, timestamp: Timestamp) -> Vec { - let mut items = vec![]; - while !self.inner.current_frame().is_empty() { - let popped_item = self.pop_from_frame(timestamp); - items.push(popped_item); - } - - self.pop_frame(timestamp); - - // items are in reversed order: - items.into_iter().rev().collect() - } - - /// Extends the top frame with a vector of items - pub fn extend_frame(&mut self, items: Vec, timestamp: Timestamp) { - for item in items { - self.push_to_frame(item, timestamp); - } - } -} - #[derive(Debug, Clone, PartialEq)] pub struct HashMapHistoryEvent { pub key: K, pub value: Option, } -impl WithHistory for HashMap { +impl WithHistory for HashMap { type HistoryRecord = HashMapHistoryEvent; type ReturnValue = Option; fn apply_historic_record( @@ -399,7 +325,7 @@ impl WithHistory for HashMap { } } -impl HistoryRecorder> { +impl HistoryRecorder, H> { pub fn insert(&mut self, key: K, value: V, timestamp: Timestamp) -> Option { self.apply_historic_record( HashMapHistoryEvent { @@ -411,35 +337,179 @@ impl HistoryRecorder> { } } +/// A stack of stacks. The inner stacks are called frames. +/// +/// Does not support popping from the outer stack. Instead, the outer stack can +/// push its topmost frame's contents onto the previous frame. #[derive(Debug, Clone, PartialEq)] -pub enum ApplicationDataHistoryEvent { - // The event about the forward queue - ForwardEvent(VectorHistoryEvent), - // The event about the rollbacks queue - RollbacksEvent(VectorHistoryEvent), +pub struct FramedStack { + data: Vec, + frame_start_indices: Vec, } -impl WithHistory for ApplicationData { - type HistoryRecord = ApplicationDataHistoryEvent; - type ReturnValue = Option; +impl Default for FramedStack { + fn default() -> Self { + // We typically require at least the first frame to be there + // since the last user-provided frame might be reverted + Self { + data: vec![], + frame_start_indices: vec![0], + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum FramedStackEvent { + Push(T), + Pop, + PushFrame(usize), + MergeFrame, +} + +impl WithHistory for FramedStack { + type HistoryRecord = FramedStackEvent; + type ReturnValue = (); fn apply_historic_record( &mut self, - item: ApplicationDataHistoryEvent, + item: Self::HistoryRecord, ) -> (Self::HistoryRecord, Self::ReturnValue) { + use FramedStackEvent::*; match item { - ApplicationDataHistoryEvent::ForwardEvent(e) => { - let (vec_event, result) = self.forward.apply_historic_record(e); - (ApplicationDataHistoryEvent::ForwardEvent(vec_event), result) + Push(x) => { + self.data.push(x); + (Pop, ()) + } + Pop => { + let x = self.data.pop().unwrap(); + (Push(x), ()) } - ApplicationDataHistoryEvent::RollbacksEvent(e) => { - let (vec_event, result) = self.rollbacks.apply_historic_record(e); - ( - ApplicationDataHistoryEvent::RollbacksEvent(vec_event), - result, - ) + PushFrame(i) => { + self.frame_start_indices.push(i); + (MergeFrame, ()) + } + MergeFrame => { + let pos = self.frame_start_indices.pop().unwrap(); + (PushFrame(pos), ()) + } + } + } +} + +impl FramedStack { + fn push_frame(&self) -> FramedStackEvent { + FramedStackEvent::PushFrame(self.data.len()) + } + + pub fn current_frame(&self) -> &[T] { + &self.data[*self.frame_start_indices.last().unwrap()..self.data.len()] + } + + fn len(&self) -> usize { + self.frame_start_indices.len() + } + + /// Returns the amount of memory taken up by the stored items + pub fn get_size(&self) -> usize { + self.data.len() * std::mem::size_of::() + } +} + +impl HistoryRecorder, H> { + pub fn push_to_frame(&mut self, x: T, timestamp: Timestamp) { + self.apply_historic_record(FramedStackEvent::Push(x), timestamp); + } + pub fn clear_frame(&mut self, timestamp: Timestamp) { + let start = *self.inner.frame_start_indices.last().unwrap(); + while self.inner.data.len() > start { + self.apply_historic_record(FramedStackEvent::Pop, timestamp); + } + } + pub fn extend_frame(&mut self, items: impl IntoIterator, timestamp: Timestamp) { + for x in items { + self.push_to_frame(x, timestamp); + } + } + pub fn push_frame(&mut self, timestamp: Timestamp) { + self.apply_historic_record(self.inner.push_frame(), timestamp); + } + pub fn merge_frame(&mut self, timestamp: Timestamp) { + self.apply_historic_record(FramedStackEvent::MergeFrame, timestamp); + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct AppDataFrameManagerWithHistory { + forward: HistoryRecorder, H>, + rollback: HistoryRecorder, H>, +} + +impl Default for AppDataFrameManagerWithHistory { + fn default() -> Self { + Self { + forward: Default::default(), + rollback: Default::default(), + } + } +} + +impl AppDataFrameManagerWithHistory { + pub fn delete_history(&mut self) { + self.forward.delete_history(); + self.rollback.delete_history(); + } + + pub fn push_forward(&mut self, item: T, timestamp: Timestamp) { + self.forward.push_to_frame(item, timestamp); + } + pub fn push_rollback(&mut self, item: T, timestamp: Timestamp) { + self.rollback.push_to_frame(item, timestamp); + } + pub fn push_frame(&mut self, timestamp: Timestamp) { + self.forward.push_frame(timestamp); + self.rollback.push_frame(timestamp); + } + pub fn merge_frame(&mut self, timestamp: Timestamp) { + self.forward.merge_frame(timestamp); + self.rollback.merge_frame(timestamp); + } + + pub fn len(&self) -> usize { + self.forward.inner.len() + } + pub fn forward(&self) -> &FramedStack { + &self.forward.inner + } + pub fn rollback(&self) -> &FramedStack { + &self.rollback.inner + } + + /// Returns the amount of memory taken up by the stored items + pub fn get_size(&self) -> usize { + self.forward().get_size() + self.rollback().get_size() + } + + pub fn get_history_size(&self) -> usize { + (self.forward.borrow_history(|h| h.len(), 0) + self.rollback.borrow_history(|h| h.len(), 0)) + * std::mem::size_of::< as WithHistory>::HistoryRecord>() + } +} + +impl AppDataFrameManagerWithHistory { + pub fn move_rollback_to_forward bool>(&mut self, filter: F, timestamp: Timestamp) { + for x in self.rollback.inner.current_frame().iter().rev() { + if filter(x) { + self.forward.push_to_frame(x.clone(), timestamp); } } + self.rollback.clear_frame(timestamp); + } +} + +impl AppDataFrameManagerWithHistory { + pub fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.forward.rollback_to_timestamp(timestamp); + self.rollback.rollback_to_timestamp(timestamp); } } @@ -469,7 +539,7 @@ pub struct MemoryWrapper { pub struct MemoryHistoryRecord { pub page: usize, pub slot: usize, - pub set_value: Option, + pub set_value: PrimitiveValue, } impl MemoryWrapper { @@ -507,11 +577,20 @@ impl MemoryWrapper { vec![PrimitiveValue::empty(); range.len()] } } + + const EMPTY: PrimitiveValue = PrimitiveValue::empty(); + + pub fn read_slot(&self, page: usize, slot: usize) -> &PrimitiveValue { + self.memory + .get(page) + .and_then(|page| page.get(&slot)) + .unwrap_or(&Self::EMPTY) + } } impl WithHistory for MemoryWrapper { type HistoryRecord = MemoryHistoryRecord; - type ReturnValue = Option; + type ReturnValue = PrimitiveValue; fn apply_historic_record( &mut self, @@ -525,10 +604,12 @@ impl WithHistory for MemoryWrapper { self.ensure_page_exists(page); let page_handle = self.memory.get_mut(page).unwrap(); - let prev_value = match set_value { - Some(x) => page_handle.insert(slot, x), - None => page_handle.remove(&slot), - }; + let prev_value = if set_value == PrimitiveValue::empty() { + page_handle.remove(&slot) + } else { + page_handle.insert(slot, set_value) + } + .unwrap_or(PrimitiveValue::empty()); self.shrink_pages(); let reserved_item = MemoryHistoryRecord { @@ -541,14 +622,14 @@ impl WithHistory for MemoryWrapper { } } -impl HistoryRecorder { +impl HistoryRecorder { pub fn write_to_memory( &mut self, page: usize, slot: usize, - value: Option, + value: PrimitiveValue, timestamp: Timestamp, - ) -> Option { + ) -> PrimitiveValue { self.apply_historic_record( MemoryHistoryRecord { page, @@ -567,7 +648,7 @@ impl HistoryRecorder { // We manually clear the page to preserve correct history for slot in slots_to_clear { - self.write_to_memory(page, slot, None, timestamp); + self.write_to_memory(page, slot, PrimitiveValue::empty(), timestamp); } } } @@ -620,7 +701,7 @@ impl<'a> WithHistory for StorageWrapper<'a> { } } -impl<'a> HistoryRecorder> { +impl<'a, H: HistoryMode> HistoryRecorder, H> { pub fn read_from_storage(&self, key: &StorageKey) -> U256 { self.inner.read_from_storage(key) } diff --git a/core/lib/vm/src/lib.rs b/core/lib/vm/src/lib.rs index 2126a022fe80..b185fc326280 100644 --- a/core/lib/vm/src/lib.rs +++ b/core/lib/vm/src/lib.rs @@ -21,11 +21,10 @@ pub mod vm_with_bootloader; mod tests; pub use crate::errors::TxRevertReason; +pub use crate::history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}; pub use crate::oracle_tools::OracleTools; pub use crate::oracles::storage::StorageOracle; -pub use crate::vm::VmBlockResult; -pub use crate::vm::VmExecutionResult; -pub use crate::vm::VmInstance; +pub use crate::vm::{VmBlockResult, VmExecutionResult, VmInstance}; pub use zk_evm; pub use zksync_types::vm_trace::VmExecutionTrace; diff --git a/core/lib/vm/src/memory.rs b/core/lib/vm/src/memory.rs index 105670226ed2..7c39027852aa 100644 --- a/core/lib/vm/src/memory.rs +++ b/core/lib/vm/src/memory.rs @@ -1,48 +1,39 @@ -use zk_evm::abstractions::{Memory, MemoryType, MEMORY_CELLS_OTHER_PAGES}; +use zk_evm::abstractions::{Memory, MemoryType}; use zk_evm::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; use zk_evm::vm_state::PrimitiveValue; use zk_evm::zkevm_opcode_defs::FatPointer; use zksync_types::U256; use crate::history_recorder::{ - FrameManager, IntFrameManagerWithHistory, MemoryWithHistory, MemoryWrapper, WithHistory, + FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, + MemoryWrapper, WithHistory, }; use crate::oracles::OracleWithHistory; use crate::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; -#[derive(Debug, Default, Clone, PartialEq)] -pub struct SimpleMemory { - pub memory: MemoryWithHistory, - - pub observable_pages: IntFrameManagerWithHistory, +#[derive(Debug, Clone, PartialEq, Default)] +pub struct SimpleMemory { + pub memory: MemoryWithHistory, + pub observable_pages: IntFrameManagerWithHistory, } -impl OracleWithHistory for SimpleMemory { +impl OracleWithHistory for SimpleMemory { fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { self.memory.rollback_to_timestamp(timestamp); self.observable_pages.rollback_to_timestamp(timestamp); } - - fn delete_history(&mut self) { - self.memory.delete_history(); - self.observable_pages.delete_history(); - } } -impl SimpleMemory { +impl SimpleMemory { pub fn populate(&mut self, elements: Vec<(u32, Vec)>, timestamp: Timestamp) { for (page, values) in elements.into_iter() { - // Resizing the pages array to fit the page. - let len = values.len(); - assert!(len <= MEMORY_CELLS_OTHER_PAGES); - for (i, value) in values.into_iter().enumerate() { let value = PrimitiveValue { value, is_pointer: false, }; self.memory - .write_to_memory(page as usize, i, Some(value), timestamp); + .write_to_memory(page as usize, i, value, timestamp); } } } @@ -59,8 +50,7 @@ impl SimpleMemory { is_pointer: false, }; - self.memory - .write_to_memory(page, offset, Some(value), timestamp); + self.memory.write_to_memory(page, offset, value, timestamp); }); } @@ -77,12 +67,8 @@ impl SimpleMemory { .collect() } - pub fn read_slot(&self, page: usize, slot: usize) -> PrimitiveValue { - let slot = slot as u32; - let page = page as u32; - self.memory - .inner() - .dump_page_content_as_u256_words(page, slot..slot + 1)[0] + pub fn read_slot(&self, page: usize, slot: usize) -> &PrimitiveValue { + self.memory.inner().read_slot(page, slot) } // This method should be used with relatively small lengths, since @@ -128,28 +114,27 @@ impl SimpleMemory { .iter() .map(|page| page.len() * std::mem::size_of::<(usize, PrimitiveValue)>()) .sum::(); - let observable_pages_size = self - .observable_pages - .inner() - .get_frames() - .iter() - .map(|frame| frame.len() * std::mem::size_of::()) - .sum::(); + let observable_pages_size = self.observable_pages.inner().get_size(); memory_size + observable_pages_size } pub fn get_history_size(&self) -> usize { - let memory_size = self.memory.history().len() + let memory_size = self.memory.borrow_history(|h| h.len(), 0) * std::mem::size_of::<::HistoryRecord>(); - let observable_pages_size = self.observable_pages.history().len() - * std::mem::size_of::<> as WithHistory>::HistoryRecord>(); + let observable_pages_size = self.observable_pages.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); memory_size + observable_pages_size } + + pub fn delete_history(&mut self) { + self.memory.delete_history(); + self.observable_pages.delete_history(); + } } -impl Memory for SimpleMemory { +impl Memory for SimpleMemory { fn execute_partial_query( &mut self, _monotonic_cycle_counter: u32, @@ -185,10 +170,10 @@ impl Memory for SimpleMemory { self.memory.write_to_memory( page, slot, - Some(PrimitiveValue { + PrimitiveValue { value: query.value, is_pointer: query.value_is_pointer, - }), + }, query.timestamp, ); } else { @@ -218,10 +203,10 @@ impl Memory for SimpleMemory { self.memory.write_to_memory( page, slot, - Some(PrimitiveValue { + PrimitiveValue { value: query.value, is_pointer: query.value_is_pointer, - }), + }, query.timestamp, ); } else { @@ -284,10 +269,10 @@ impl Memory for SimpleMemory { timestamp: Timestamp, ) { // Safe to unwrap here, since `finish_global_frame` is never called with empty stack - let current_observable_pages = self.observable_pages.drain_frame(timestamp); + let current_observable_pages = self.observable_pages.inner().current_frame(); let returndata_page = returndata_fat_pointer.memory_page; - for page in current_observable_pages { + for &page in current_observable_pages { // If the page's number is greater than or equal to the base_page, // it means that it was created by the internal calls of this contract. // We need to add this check as the calldata pointer is also part of the @@ -297,7 +282,9 @@ impl Memory for SimpleMemory { } } - // Push to the parent's frame + self.observable_pages.clear_frame(timestamp); + self.observable_pages.merge_frame(timestamp); + self.observable_pages .push_to_frame(returndata_page, timestamp); } diff --git a/core/lib/vm/src/oracle_tools.rs b/core/lib/vm/src/oracle_tools.rs index 571d7d0827f3..b1f8c4de2913 100644 --- a/core/lib/vm/src/oracle_tools.rs +++ b/core/lib/vm/src/oracle_tools.rs @@ -5,31 +5,39 @@ use std::fmt::Debug; use std::rc::Rc; use crate::event_sink::InMemoryEventSink; -use crate::oracles::decommitter::DecommitterOracle; -use crate::oracles::precompile::PrecompilesProcessorWithHistory; -use crate::oracles::storage::StorageOracle; +use crate::history_recorder::HistoryMode; +use crate::oracles::{ + decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, +}; use crate::storage::Storage; use zk_evm::witness_trace::DummyTracer; +/// zkEVM requires a bunch of objects implementing given traits to work. +/// For example: Storage, Memory, PrecompilerProcessor etc +/// (you can find all these traites in zk_evm crate -> src/abstractions/mod.rs) +/// For each of these traits, we have a local implementation (for example StorageOracle) +/// that also support additional features (like rollbacks & history). +/// The OracleTools struct, holds all these things together in one place. #[derive(Debug)] -pub struct OracleTools<'a, const B: bool> { - pub storage: StorageOracle<'a>, - pub memory: SimpleMemory, - pub event_sink: InMemoryEventSink, - pub precompiles_processor: PrecompilesProcessorWithHistory, - pub decommittment_processor: DecommitterOracle<'a, B>, +pub struct OracleTools<'a, const B: bool, H: HistoryMode> { + pub storage: StorageOracle<'a, H>, + pub memory: SimpleMemory, + pub event_sink: InMemoryEventSink, + pub precompiles_processor: PrecompilesProcessorWithHistory, + pub decommittment_processor: DecommitterOracle<'a, B, H>, pub witness_tracer: DummyTracer, } -impl<'a> OracleTools<'a, false> { - pub fn new(storage_view: &'a mut dyn Storage) -> Self { +impl<'a, H: HistoryMode> OracleTools<'a, false, H> { + pub fn new(storage_view: &'a mut dyn Storage, _: H) -> Self { let pointer: Rc> = Rc::new(RefCell::new(storage_view)); Self { storage: StorageOracle::new(pointer.clone()), - memory: SimpleMemory::default(), - event_sink: InMemoryEventSink::default(), - precompiles_processor: PrecompilesProcessorWithHistory::default(), + memory: Default::default(), + event_sink: Default::default(), + precompiles_processor: Default::default(), decommittment_processor: DecommitterOracle::new(pointer.clone()), witness_tracer: DummyTracer {}, } diff --git a/core/lib/vm/src/oracles/decommitter.rs b/core/lib/vm/src/oracles/decommitter.rs index 5ffafabcf3f1..8001f7df69a1 100644 --- a/core/lib/vm/src/oracles/decommitter.rs +++ b/core/lib/vm/src/oracles/decommitter.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use crate::history_recorder::{HistoryRecorder, WithHistory}; +use crate::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory}; use crate::storage::StoragePtr; use zk_evm::abstractions::MemoryType; @@ -15,19 +15,24 @@ use zksync_utils::{bytes_to_be_words, u256_to_h256}; use super::OracleWithHistory; +/// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is +/// used by the VM to 'load' bytecodes into memory. #[derive(Debug)] -pub struct DecommitterOracle<'a, const B: bool> { +pub struct DecommitterOracle<'a, const B: bool, H: HistoryMode> { /// Pointer that enables to read contract bytecodes from the database. storage: StoragePtr<'a>, /// The cache of bytecodes that the bootloader "knows", but that are not necessarily in the database. - pub known_bytecodes: HistoryRecorder>>, + /// And it is also used as a database cache. + pub known_bytecodes: HistoryRecorder>, H>, /// Stores pages of memory where certain code hashes have already been decommitted. - decommitted_code_hashes: HistoryRecorder>, + /// It is expected that they all are present in the DB. + // `decommitted_code_hashes` history is necessary + pub decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, /// Stores history of decommitment requests. - decommitment_requests: HistoryRecorder>, + decommitment_requests: HistoryRecorder, H>, } -impl<'a, const B: bool> DecommitterOracle<'a, B> { +impl<'a, const B: bool, H: HistoryMode> DecommitterOracle<'a, B, H> { pub fn new(storage: StoragePtr<'a>) -> Self { Self { storage, @@ -37,6 +42,8 @@ impl<'a, const B: bool> DecommitterOracle<'a, B> { } } + /// Gets the bytecode for a given hash (either from storage, or from 'known_bytecodes' that were populated by `populate` method). + /// Panics if bytecode doesn't exist. pub fn get_bytecode(&mut self, hash: U256, timestamp: Timestamp) -> Vec { let entry = self.known_bytecodes.inner().get(&hash); @@ -59,6 +66,7 @@ impl<'a, const B: bool> DecommitterOracle<'a, B> { } } + /// Adds additional bytecodes. They will take precendent over the bytecodes from storage. pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { for (hash, bytecode) in bytecodes { self.known_bytecodes.insert(hash, bytecode, timestamp); @@ -73,7 +81,7 @@ impl<'a, const B: bool> DecommitterOracle<'a, B> { .collect() } - pub fn get_decommitted_bytes_after_timestamp(&self, timestamp: Timestamp) -> usize { + pub fn get_decommitted_bytecodes_after_timestamp(&self, timestamp: Timestamp) -> usize { // Note, that here we rely on the fact that for each used bytecode // there is one and only one corresponding event in the history of it. self.decommitted_code_hashes @@ -84,26 +92,18 @@ impl<'a, const B: bool> DecommitterOracle<'a, B> { .count() } - pub fn get_number_of_decommitment_requests_after_timestamp( + pub fn get_decommitted_code_hashes_with_history( &self, - timestamp: Timestamp, - ) -> usize { - self.decommitment_requests - .history() - .iter() - .rev() - .take_while(|(t, _)| *t >= timestamp) - .count() - } - - pub fn get_decommitted_code_hashes_with_history(&self) -> &HistoryRecorder> { + ) -> &HistoryRecorder, HistoryEnabled> { &self.decommitted_code_hashes } + /// Returns the storage handle. Used only in tests. pub fn get_storage(&self) -> StoragePtr<'a> { self.storage.clone() } + /// Measures the amount of memory used by this Oracle (used for metrics only). pub fn get_size(&self) -> usize { // Hashmap memory overhead is neglected. let known_bytecodes_size = self @@ -119,43 +119,47 @@ impl<'a, const B: bool> DecommitterOracle<'a, B> { } pub fn get_history_size(&self) -> usize { - let known_bytecodes_stack_size = self.known_bytecodes.history().len() + let known_bytecodes_stack_size = self.known_bytecodes.borrow_history(|h| h.len(), 0) * std::mem::size_of::<> as WithHistory>::HistoryRecord>(); - let known_bytecodes_heap_size = self - .known_bytecodes - .history() - .iter() - .map(|(_, event)| { - if let Some(bytecode) = event.value.as_ref() { - bytecode.len() * std::mem::size_of::() - } else { - 0 - } - }) - .sum::(); - let decommitted_code_hashes_size = self.decommitted_code_hashes.history().len() - * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + let known_bytecodes_heap_size = self.known_bytecodes.borrow_history( + |h| { + h.iter() + .map(|(_, event)| { + if let Some(bytecode) = event.value.as_ref() { + bytecode.len() * std::mem::size_of::() + } else { + 0 + } + }) + .sum::() + }, + 0, + ); + let decommitted_code_hashes_size = + self.decommitted_code_hashes.borrow_history(|h| h.len(), 0) + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); known_bytecodes_stack_size + known_bytecodes_heap_size + decommitted_code_hashes_size } + + pub fn delete_history(&mut self) { + self.decommitted_code_hashes.delete_history(); + self.known_bytecodes.delete_history(); + self.decommitment_requests.delete_history(); + } } -impl<'a, const B: bool> OracleWithHistory for DecommitterOracle<'a, B> { +impl<'a, const B: bool> OracleWithHistory for DecommitterOracle<'a, B, HistoryEnabled> { fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { self.decommitted_code_hashes .rollback_to_timestamp(timestamp); self.known_bytecodes.rollback_to_timestamp(timestamp); self.decommitment_requests.rollback_to_timestamp(timestamp); } - - fn delete_history(&mut self) { - self.decommitted_code_hashes.delete_history(); - self.known_bytecodes.delete_history(); - self.decommitment_requests.delete_history(); - } } -impl<'a, const B: bool> DecommittmentProcessor for DecommitterOracle<'a, B> { +impl<'a, const B: bool, H: HistoryMode> DecommittmentProcessor for DecommitterOracle<'a, B, H> { + /// Loads a given bytecode hash into memory (see trait description for more details). fn decommit_into_memory( &mut self, monotonic_cycle_counter: u32, @@ -163,6 +167,8 @@ impl<'a, const B: bool> DecommittmentProcessor for DecommitterOracle<'a, B> { memory: &mut M, ) -> (DecommittmentQuery, Option>) { self.decommitment_requests.push((), partial_query.timestamp); + // First - check if we didn't fetch this bytecode in the past. + // If we did - we can just return the page that we used before (as the memory is read only). if let Some(memory_page) = self .decommitted_code_hashes .inner() @@ -176,14 +182,15 @@ impl<'a, const B: bool> DecommittmentProcessor for DecommitterOracle<'a, B> { (partial_query, None) } else { - // fresh one + // We are fetching a fresh bytecode that we didn't read before. let values = self.get_bytecode(partial_query.hash, partial_query.timestamp); let page_to_use = partial_query.memory_page; let timestamp = partial_query.timestamp; partial_query.decommitted_length = values.len() as u16; partial_query.is_fresh = true; - // write into memory + // Create a template query, that we'll use for writing into memory. + // value & index are set to 0 - as they will be updated in the inner loop below. let mut tmp_q = MemoryQuery { timestamp, location: MemoryLocation { @@ -199,13 +206,14 @@ impl<'a, const B: bool> DecommittmentProcessor for DecommitterOracle<'a, B> { self.decommitted_code_hashes .insert(partial_query.hash, page_to_use.0, timestamp); + // Copy the bytecode (that is stored in 'values' Vec) into the memory page. if B { for (i, value) in values.iter().enumerate() { tmp_q.location.index = MemoryIndex(i as u32); tmp_q.value = *value; memory.specialized_code_query(monotonic_cycle_counter, tmp_q); } - + // If we're in the witness mode - we also have to return the values. (partial_query, Some(values)) } else { for (i, value) in values.into_iter().enumerate() { diff --git a/core/lib/vm/src/oracles/mod.rs b/core/lib/vm/src/oracles/mod.rs index 5b9378729ed8..d219216b25f8 100644 --- a/core/lib/vm/src/oracles/mod.rs +++ b/core/lib/vm/src/oracles/mod.rs @@ -15,5 +15,4 @@ pub mod tracer; pub trait OracleWithHistory { fn rollback_to_timestamp(&mut self, timestamp: Timestamp); - fn delete_history(&mut self); } diff --git a/core/lib/vm/src/oracles/precompile.rs b/core/lib/vm/src/oracles/precompile.rs index 3374be5caa96..c75a8899f8f5 100644 --- a/core/lib/vm/src/oracles/precompile.rs +++ b/core/lib/vm/src/oracles/precompile.rs @@ -6,7 +6,7 @@ use zk_evm::{ precompiles::DefaultPrecompilesProcessor, }; -use crate::history_recorder::HistoryRecorder; +use crate::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; use super::OracleWithHistory; @@ -16,40 +16,37 @@ use super::OracleWithHistory; /// saving timestamps allows us to check the exact number /// of log queries, that were used during the tx execution. #[derive(Debug, Clone)] -pub struct PrecompilesProcessorWithHistory { - pub timestamp_history: HistoryRecorder>, +pub struct PrecompilesProcessorWithHistory { + pub timestamp_history: HistoryRecorder, H>, pub default_precompiles_processor: DefaultPrecompilesProcessor, } -impl OracleWithHistory for PrecompilesProcessorWithHistory { - fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { - self.timestamp_history.rollback_to_timestamp(timestamp); +impl Default for PrecompilesProcessorWithHistory { + fn default() -> Self { + Self { + timestamp_history: Default::default(), + default_precompiles_processor: DefaultPrecompilesProcessor, + } } +} - fn delete_history(&mut self) { - self.timestamp_history.delete_history(); +impl OracleWithHistory for PrecompilesProcessorWithHistory { + fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { + self.timestamp_history.rollback_to_timestamp(timestamp); } } -impl PrecompilesProcessorWithHistory { - pub fn new() -> Self { - Self { - timestamp_history: Default::default(), - default_precompiles_processor: DefaultPrecompilesProcessor {}, - } - } +impl PrecompilesProcessorWithHistory { pub fn get_timestamp_history(&self) -> &Vec { self.timestamp_history.inner() } -} -impl Default for PrecompilesProcessorWithHistory { - fn default() -> Self { - Self::new() + pub fn delete_history(&mut self) { + self.timestamp_history.delete_history(); } } -impl PrecompilesProcessor for PrecompilesProcessorWithHistory { +impl PrecompilesProcessor for PrecompilesProcessorWithHistory { fn start_frame(&mut self) { self.default_precompiles_processor.start_frame(); } diff --git a/core/lib/vm/src/oracles/storage.rs b/core/lib/vm/src/oracles/storage.rs index afb1c8bbff30..bc09f05fb7b2 100644 --- a/core/lib/vm/src/oracles/storage.rs +++ b/core/lib/vm/src/oracles/storage.rs @@ -3,8 +3,8 @@ use std::collections::HashMap; use crate::storage::StoragePtr; use crate::history_recorder::{ - AppDataFrameManagerWithHistory, FrameManager, HashMapHistoryEvent, HistoryRecorder, - StorageWrapper, WithHistory, + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, }; use zk_evm::abstractions::RefundedAmounts; @@ -12,7 +12,6 @@ use zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTE use zk_evm::{ abstractions::{RefundType, Storage as VmStorageOracle}, aux_structures::{LogQuery, Timestamp}, - reference_impls::event_sink::ApplicationData, }; use zksync_types::utils::storage_key_for_eth_balance; use zksync_types::{ @@ -34,35 +33,30 @@ pub fn storage_key_of_log(query: &LogQuery) -> StorageKey { } #[derive(Debug)] -pub struct StorageOracle<'a> { +pub struct StorageOracle<'a, H: HistoryMode> { // Access to the persistent storage. Please note that it // is used only for read access. All the actual writes happen // after the execution ended. - pub storage: HistoryRecorder>, + pub storage: HistoryRecorder, H>, - pub frames_stack: AppDataFrameManagerWithHistory, + pub frames_stack: AppDataFrameManagerWithHistory, // The changes that have been paid for in previous transactions. // It is a mapping from storage key to the number of *bytes* that was paid by the user // to cover this slot. - pub paid_changes: HistoryRecorder>, + // `paid_changes` history is necessary + pub paid_changes: HistoryRecorder, HistoryEnabled>, } -impl<'a> OracleWithHistory for StorageOracle<'a> { +impl OracleWithHistory for StorageOracle<'_, HistoryEnabled> { fn rollback_to_timestamp(&mut self, timestamp: Timestamp) { self.frames_stack.rollback_to_timestamp(timestamp); self.storage.rollback_to_timestamp(timestamp); self.paid_changes.rollback_to_timestamp(timestamp); } - - fn delete_history(&mut self) { - self.frames_stack.delete_history(); - self.storage.delete_history(); - self.paid_changes.delete_history(); - } } -impl<'a> StorageOracle<'a> { +impl<'a, H: HistoryMode> StorageOracle<'a, H> { pub fn new(storage: StoragePtr<'a>) -> Self { Self { storage: HistoryRecorder::from_inner(StorageWrapper::new(storage)), @@ -71,6 +65,12 @@ impl<'a> StorageOracle<'a> { } } + pub fn delete_history(&mut self) { + self.frames_stack.delete_history(); + self.storage.delete_history(); + self.paid_changes.delete_history(); + } + fn is_storage_key_free(&self, key: &StorageKey) -> bool { key.address() == &zksync_config::constants::SYSTEM_CONTEXT_ADDRESS || *key == storage_key_for_eth_balance(&BOOTLOADER_ADDRESS) @@ -163,16 +163,7 @@ impl<'a> StorageOracle<'a> { } pub fn get_size(&self) -> usize { - let frames_stack_size = self - .frames_stack - .inner() - .get_frames() - .iter() - .map(|frame| { - (frame.rollbacks.len() + frame.forward.len()) - * std::mem::size_of::() - }) - .sum::(); + let frames_stack_size = self.frames_stack.get_size(); let paid_changes_size = self.paid_changes.inner().len() * std::mem::size_of::<(StorageKey, u32)>(); @@ -180,19 +171,16 @@ impl<'a> StorageOracle<'a> { } pub fn get_history_size(&self) -> usize { - let storage_size = self.storage.history().len() + let storage_size = self.storage.borrow_history(|h| h.len(), 0) * std::mem::size_of::<::HistoryRecord>(); - let frames_stack_size = self.frames_stack.history().len() - * std::mem::size_of::< - > as WithHistory>::HistoryRecord, - >(); - let paid_changes_size = self.paid_changes.history().len() + let frames_stack_size = self.frames_stack.get_history_size(); + let paid_changes_size = self.paid_changes.borrow_history(|h| h.len(), 0) * std::mem::size_of::< as WithHistory>::HistoryRecord>(); storage_size + frames_stack_size + paid_changes_size } } -impl<'a> VmStorageOracle for StorageOracle<'a> { +impl VmStorageOracle for StorageOracle<'_, H> { // Perform a storage read/write access by taking an partially filled query // and returning filled query and cold/warm marker for pricing purposes fn execute_partial_query( @@ -260,12 +248,9 @@ impl<'a> VmStorageOracle for StorageOracle<'a> { fn finish_frame(&mut self, timestamp: Timestamp, panicked: bool) { // If we panic then we append forward and rollbacks to the forward of parent, // otherwise we place rollbacks of child before rollbacks of the parent - let current_frame = self.frames_stack.drain_frame(timestamp); - let ApplicationData { forward, rollbacks } = current_frame; - if panicked { // perform actual rollback - for query in rollbacks.iter().rev() { + for query in self.frames_stack.rollback().current_frame().iter().rev() { let read_value = match query.log_type { StorageLogQueryType::Read => { // Having Read logs in rollback is not possible @@ -296,23 +281,21 @@ impl<'a> VmStorageOracle for StorageOracle<'a> { assert_eq!(current_value, written_value); } - for query in forward { - self.frames_stack.push_forward(query, timestamp) - } - for query in rollbacks.into_iter().rev() { - self.frames_stack.push_forward(query, timestamp) - } - } else { - for query in forward { - self.frames_stack.push_forward(query, timestamp) - } - for query in rollbacks { - self.frames_stack.push_rollback(query, timestamp) - } + self.frames_stack + .move_rollback_to_forward(|_| true, timestamp); } + self.frames_stack.merge_frame(timestamp); } } +/// Returns the number of bytes needed to publish a slot. +// Since we need to publish the state diffs onchain, for each of the updated storage slot +// we basically need to publish the following pair: (). +// While new_value is always 32 bytes long, for key we use the following optimization: +// - The first time we publish it, we use 32 bytes. +// Then, we remember a 8-byte id for this slot and assign it to it. We call this initial write. +// - The second time we publish it, we will use this 8-byte instead of the 32 bytes of the entire key. +// So the total size of the publish pubdata is 40 bytes. We call this kind of write the repeated one fn get_pubdata_price_bytes(_query: &LogQuery, is_initial: bool) -> u32 { if is_initial { zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32 diff --git a/core/lib/vm/src/oracles/tracer/bootloader.rs b/core/lib/vm/src/oracles/tracer/bootloader.rs index 49ac06cd5258..c2a02a5690bb 100644 --- a/core/lib/vm/src/oracles/tracer/bootloader.rs +++ b/core/lib/vm/src/oracles/tracer/bootloader.rs @@ -1,5 +1,11 @@ +use std::marker::PhantomData; + +use crate::history_recorder::HistoryMode; use crate::memory::SimpleMemory; -use crate::oracles::tracer::{ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer}; +use crate::oracles::tracer::{ + utils::gas_spent_on_bytecodes_and_long_messages_this_opcode, ExecutionEndTracer, + PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, +}; use zk_evm::{ abstractions::{ @@ -13,15 +19,18 @@ use zk_evm::{ /// Tells the VM to end the execution before `ret` from the booloader if there is no panic or revert. /// Also, saves the information if this `ret` was caused by "out of gas" panic. #[derive(Debug, Clone, Default)] -pub struct BootloaderTracer { +pub struct BootloaderTracer { is_bootloader_out_of_gas: bool, ret_from_the_bootloader: Option, + gas_spent_on_bytecodes_and_long_messages: u32, + _marker: PhantomData, } -impl Tracer for BootloaderTracer { +impl Tracer for BootloaderTracer { const CALL_AFTER_DECODING: bool = true; + const CALL_BEFORE_EXECUTION: bool = true; const CALL_AFTER_EXECUTION: bool = true; - type SupportedMemory = SimpleMemory; + type SupportedMemory = SimpleMemory; fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} fn after_decoding( @@ -42,10 +51,12 @@ impl Tracer for BootloaderTracer { fn before_execution( &mut self, - _state: VmLocalStateData<'_>, - _data: BeforeExecutionData, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, _memory: &Self::SupportedMemory, ) { + self.gas_spent_on_bytecodes_and_long_messages += + gas_spent_on_bytecodes_and_long_messages_this_opcode(&state, &data); } fn after_execution( @@ -66,16 +77,22 @@ impl Tracer for BootloaderTracer { } } -impl ExecutionEndTracer for BootloaderTracer { +impl ExecutionEndTracer for BootloaderTracer { fn should_stop_execution(&self) -> bool { self.ret_from_the_bootloader == Some(RetOpcode::Ok) } } -impl PendingRefundTracer for BootloaderTracer {} -impl PubdataSpentTracer for BootloaderTracer {} +impl PendingRefundTracer for BootloaderTracer {} +impl StorageInvocationTracer for BootloaderTracer {} + +impl PubdataSpentTracer for BootloaderTracer { + fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter + } +} -impl BootloaderTracer { +impl BootloaderTracer { fn current_frame_is_bootloader(local_state: &VmLocalState) -> bool { // The current frame is bootloader if the callstack depth is 1. // Some of the near calls inside the bootloader can be out of gas, which is totally normal behavior diff --git a/core/lib/vm/src/oracles/tracer/call.rs b/core/lib/vm/src/oracles/tracer/call.rs new file mode 100644 index 000000000000..695197f1614d --- /dev/null +++ b/core/lib/vm/src/oracles/tracer/call.rs @@ -0,0 +1,312 @@ +use crate::errors::VmRevertReason; +use crate::history_recorder::HistoryMode; +use crate::memory::SimpleMemory; +use std::convert::TryFrom; +use std::marker::PhantomData; +use std::mem; +use zk_evm::abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, +}; +use zk_evm::zkevm_opcode_defs::{ + FarCallABI, FarCallOpcode, Opcode, RetOpcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, +}; +use zkevm_assembly::zkevm_opcode_defs::FatPointer; +use zksync_config::constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_types::vm_trace::{Call, CallType}; +use zksync_types::U256; + +/// NOTE Auto implementing clone for this tracer can cause stack overflow. +/// This is because of the stack field which is a Vec with nested vecs inside. +/// If you will need to implement clone for this tracer, please consider to not copy the stack field. +/// Method `extract_calls` will extract the necessary stack for you. +#[derive(Debug, Default)] +pub struct CallTracer { + stack: Vec, + _phantom: PhantomData, +} + +impl CallTracer { + pub fn new() -> Self { + Self { + stack: vec![], + _phantom: PhantomData, + } + } +} + +impl Tracer for CallTracer { + const CALL_AFTER_EXECUTION: bool = true; + + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + + fn before_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: BeforeExecutionData, + _memory: &Self::SupportedMemory, + ) { + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + let call_type = match data.opcode.variant.opcode { + Opcode::NearCall(_) => CallType::NearCall, + Opcode::FarCall(far_call) => CallType::Call(far_call), + Opcode::Ret(ret_code) => { + self.handle_ret_op_code(state, data, memory, ret_code); + return; + } + _ => { + return; + } + }; + + let mut current_call = Call { + r#type: call_type, + gas: 0, + ..Default::default() + }; + match call_type { + CallType::Call(_) | CallType::Create => { + self.handle_far_call_op_code(state, data, memory, &mut current_call) + } + CallType::NearCall => { + self.handle_near_call_op_code(state, data, memory, &mut current_call); + } + } + self.stack.push(current_call); + } +} + +impl CallTracer { + /// We use parent gas for propery calculation of gas used in the trace. + /// This method updates parent gas for the current call. + fn update_parent_gas(&mut self, state: &VmLocalStateData<'_>, current_call: &mut Call) { + let current = state.vm_local_state.callstack.current; + let parent_gas = state + .vm_local_state + .callstack + .inner + .last() + .map(|call| call.ergs_remaining + current.ergs_remaining) + .unwrap_or(current.ergs_remaining); + current_call.parent_gas = parent_gas; + } + + fn handle_near_call_op_code( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + _memory: &SimpleMemory, + current_call: &mut Call, + ) { + self.update_parent_gas(&state, current_call); + } + + fn handle_far_call_op_code( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &SimpleMemory, + current_call: &mut Call, + ) { + self.update_parent_gas(&state, current_call); + let current = state.vm_local_state.callstack.current; + // All calls from the actual users are mimic calls, + // so we need to check that the previous call was to the deployer. + // Actually it's a call of the constructor. + // And at this stage caller is user and callee is deployed contract. + let call_type = if let CallType::Call(far_call) = current_call.r#type { + if matches!(far_call, FarCallOpcode::Mimic) { + let previous_caller = state + .vm_local_state + .callstack + .inner + .last() + .map(|call| call.this_address) + // Actually it's safe to just unwrap here, because we have at least one call in the stack + // But i want to be sure that we will not have any problems in the future + .unwrap_or(current.this_address); + if previous_caller == CONTRACT_DEPLOYER_ADDRESS { + CallType::Create + } else { + CallType::Call(far_call) + } + } else { + CallType::Call(far_call) + } + } else { + unreachable!() + }; + let calldata = if current.code_page.0 == 0 || current.ergs_remaining == 0 { + vec![] + } else { + let packed_abi = + state.vm_local_state.registers[CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER as usize]; + assert!(packed_abi.is_pointer); + let far_call_abi = FarCallABI::from_u256(packed_abi.value); + memory.read_unaligned_bytes( + far_call_abi.memory_quasi_fat_pointer.memory_page as usize, + far_call_abi.memory_quasi_fat_pointer.start as usize, + far_call_abi.memory_quasi_fat_pointer.length as usize, + ) + }; + + current_call.input = calldata; + current_call.r#type = call_type; + current_call.from = current.msg_sender; + current_call.to = current.this_address; + current_call.value = U256::from(current.context_u128_value); + current_call.gas = current.ergs_remaining; + } + + fn save_output( + &mut self, + state: VmLocalStateData<'_>, + memory: &SimpleMemory, + ret_opcode: RetOpcode, + current_call: &mut Call, + ) { + let fat_data_pointer = + state.vm_local_state.registers[RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER as usize]; + + // if fat_data_pointer is not a pointer then there is no output + let output = if fat_data_pointer.is_pointer { + let fat_data_pointer = FatPointer::from_u256(fat_data_pointer.value); + if !fat_data_pointer.is_trivial() { + Some(memory.read_unaligned_bytes( + fat_data_pointer.memory_page as usize, + fat_data_pointer.start as usize, + fat_data_pointer.length as usize, + )) + } else { + None + } + } else { + None + }; + + match ret_opcode { + RetOpcode::Ok => { + current_call.output = output.unwrap_or_default(); + } + RetOpcode::Revert => { + if let Some(output) = output { + match VmRevertReason::try_from(output.as_slice()) { + Ok(rev) => { + current_call.revert_reason = Some(rev.to_string()); + } + Err(_) => { + current_call.revert_reason = Some(format!("{:?}", hex::encode(output))); + } + } + } else { + current_call.revert_reason = Some("Unknown revert reason".to_string()); + } + } + RetOpcode::Panic => { + current_call.error = Some("Panic".to_string()); + } + } + } + + fn handle_ret_op_code( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &SimpleMemory, + ret_opcode: RetOpcode, + ) { + // It's safe to unwrap here because we are sure that we have at least one call in the stack + let mut current_call = self.stack.pop().unwrap(); + current_call.gas_used = + current_call.parent_gas - state.vm_local_state.callstack.current.ergs_remaining; + + if current_call.r#type != CallType::NearCall { + self.save_output(state, memory, ret_opcode, &mut current_call); + } + + // If there is a parent call, push the current call to it + // Otherwise, push the current call to the stack, because it's the top level call + if let Some(parent_call) = self.stack.last_mut() { + parent_call.calls.push(current_call); + } else { + self.stack.push(current_call); + } + } + + // Filter all near calls from the call stack + // Important that the very first call is near call + // And this NearCall includes several Normal or Mimic calls + // So we return all childrens of this NearCall + pub fn extract_calls(&mut self) -> Vec { + if let Some(current_call) = self.stack.pop() { + filter_near_call(current_call) + } else { + vec![] + } + } +} + +// Filter all near calls from the call stack +// Normally wr are not interested in NearCall, because it's just a wrapper for internal calls +fn filter_near_call(mut call: Call) -> Vec { + let mut calls = vec![]; + let original_calls = std::mem::take(&mut call.calls); + for call in original_calls { + calls.append(&mut filter_near_call(call)); + } + call.calls = calls; + + if call.r#type == CallType::NearCall { + mem::take(&mut call.calls) + } else { + vec![call] + } +} + +#[cfg(test)] +mod tests { + use crate::oracles::tracer::call::{filter_near_call, Call, CallType}; + use zk_evm::zkevm_opcode_defs::FarCallOpcode; + + #[test] + fn test_filter_near_calls() { + let mut call = Call::default(); + let filtered_call = filter_near_call(call.clone()); + assert_eq!(filtered_call.len(), 1); + + let mut near_call = call.clone(); + near_call.r#type = CallType::NearCall; + let filtered_call = filter_near_call(near_call.clone()); + assert_eq!(filtered_call.len(), 0); + + call.r#type = CallType::Call(FarCallOpcode::Mimic); + call.calls = vec![Call::default(), Call::default(), near_call.clone()]; + let filtered_call = filter_near_call(call.clone()); + assert_eq!(filtered_call.len(), 1); + assert_eq!(filtered_call[0].calls.len(), 2); + + let mut near_call = near_call; + near_call.calls = vec![Call::default(), Call::default(), near_call.clone()]; + call.calls = vec![Call::default(), Call::default(), near_call]; + let filtered_call = filter_near_call(call); + assert_eq!(filtered_call.len(), 1); + assert_eq!(filtered_call[0].calls.len(), 4); + } +} diff --git a/core/lib/vm/src/oracles/tracer/mod.rs b/core/lib/vm/src/oracles/tracer/mod.rs index 57464bb221e4..f4a3dcda1b5f 100644 --- a/core/lib/vm/src/oracles/tracer/mod.rs +++ b/core/lib/vm/src/oracles/tracer/mod.rs @@ -1,38 +1,52 @@ -use crate::memory::SimpleMemory; use zk_evm::abstractions::Tracer; use zk_evm::vm_state::VmLocalState; mod bootloader; +mod call; mod one_tx; mod transaction_result; mod utils; mod validation; pub use bootloader::BootloaderTracer; +pub use call::CallTracer; pub use one_tx::OneTxTracer; pub use validation::{ValidationError, ValidationTracer, ValidationTracerParams}; pub(crate) use transaction_result::TransactionResultTracer; -pub trait ExecutionEndTracer: Tracer { +use crate::history_recorder::HistoryMode; +use crate::memory::SimpleMemory; + +pub trait ExecutionEndTracer: Tracer> { // Returns whether the vm execution should stop. fn should_stop_execution(&self) -> bool; } -pub trait PendingRefundTracer: Tracer { - // Some(x) means that the bootloader has asked the operator to provide the refund for the - // transaction, where `x` is the refund that the bootloader has suggested on its own. +pub trait PendingRefundTracer: Tracer> { + /// Some(x) means that the bootloader has asked the operator to provide the refund for the + /// transaction, where `x` is the refund that the bootloader has suggested on its own. fn requested_refund(&self) -> Option { None } - // Set the current request for refund as fulfilled + /// Set the current request for refund as fulfilled fn set_refund_as_done(&mut self) {} } -pub trait PubdataSpentTracer: Tracer { - // Returns how much gas was spent on pubdata. +pub trait PubdataSpentTracer: Tracer> { + /// Returns how much gas was spent on pubdata. fn gas_spent_on_pubdata(&self, _vm_local_state: &VmLocalState) -> u32 { 0 } } + +pub trait StorageInvocationTracer: + Tracer> +{ + /// Set how many invocation of the storage oracle were missed. + fn set_missed_storage_invocations(&mut self, _missed_storage_invocation: usize) {} + fn is_limit_reached(&self) -> bool { + false + } +} diff --git a/core/lib/vm/src/oracles/tracer/one_tx.rs b/core/lib/vm/src/oracles/tracer/one_tx.rs index 003d8abf3e92..b2b04b13181c 100644 --- a/core/lib/vm/src/oracles/tracer/one_tx.rs +++ b/core/lib/vm/src/oracles/tracer/one_tx.rs @@ -1,25 +1,27 @@ use super::utils::{computational_gas_price, print_debug_if_needed}; use crate::{ + history_recorder::HistoryMode, memory::SimpleMemory, oracles::tracer::{ - utils::VmHook, BootloaderTracer, ExecutionEndTracer, PendingRefundTracer, - PubdataSpentTracer, + utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, VmHook}, + BootloaderTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, }, vm::get_vm_hook_params, }; +use crate::oracles::tracer::{CallTracer, StorageInvocationTracer}; use zk_evm::{ abstractions::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, vm_state::VmLocalState, - zkevm_opcode_defs::{LogOpcode, Opcode}, }; -use zksync_config::constants::{KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_types::vm_trace::Call; /// Allows any opcodes, but tells the VM to end the execution once the tx is over. -#[derive(Debug, Clone)] -pub struct OneTxTracer { +// Internally depeds on Bootloader's VMHooks to get the notification once the transaction is finished. +#[derive(Debug)] +pub struct OneTxTracer { tx_has_been_processed: bool, // Some(x) means that the bootloader has asked the operator @@ -30,19 +32,23 @@ pub struct OneTxTracer { pub refund_gas: u32, pub gas_spent_on_bytecodes_and_long_messages: u32, + // Amount of gas used during account validation. computational_gas_used: u32, + // Maximum number of gas that we're allowed to use during account validation. computational_gas_limit: u32, in_account_validation: bool, - bootloader_tracer: BootloaderTracer, + bootloader_tracer: BootloaderTracer, + call_tracer: Option>, } -impl Tracer for OneTxTracer { +impl Tracer for OneTxTracer { const CALL_BEFORE_EXECUTION: bool = true; const CALL_AFTER_EXECUTION: bool = true; - type SupportedMemory = SimpleMemory; + type SupportedMemory = SimpleMemory; fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( &mut self, _state: VmLocalStateData<'_>, @@ -77,17 +83,8 @@ impl Tracer for OneTxTracer { _ => {} } - if data.opcode.variant.opcode == Opcode::Log(LogOpcode::PrecompileCall) { - let current_stack = state.vm_local_state.callstack.get_current_stack(); - // Trace for precompile calls from `KNOWN_CODES_STORAGE_ADDRESS` and `L1_MESSENGER_ADDRESS` that burn some gas. - // Note, that if there is less gas left than requested to burn it will be burnt anyway. - if current_stack.this_address == KNOWN_CODES_STORAGE_ADDRESS - || current_stack.this_address == L1_MESSENGER_ADDRESS - { - self.gas_spent_on_bytecodes_and_long_messages += - std::cmp::min(data.src1_value.value.as_u32(), current_stack.ergs_remaining); - } - } + self.gas_spent_on_bytecodes_and_long_messages += + gas_spent_on_bytecodes_and_long_messages_this_opcode(&state, &data); } fn after_execution( @@ -96,11 +93,14 @@ impl Tracer for OneTxTracer { data: AfterExecutionData, memory: &Self::SupportedMemory, ) { - self.bootloader_tracer.after_execution(state, data, memory) + self.bootloader_tracer.after_execution(state, data, memory); + if let Some(call_tracer) = self.call_tracer.as_mut() { + call_tracer.after_execution(state, data, memory); + } } } -impl ExecutionEndTracer for OneTxTracer { +impl ExecutionEndTracer for OneTxTracer { fn should_stop_execution(&self) -> bool { self.tx_has_been_processed || self.bootloader_tracer.should_stop_execution() @@ -108,7 +108,7 @@ impl ExecutionEndTracer for OneTxTracer { } } -impl PendingRefundTracer for OneTxTracer { +impl PendingRefundTracer for OneTxTracer { fn requested_refund(&self) -> Option { self.pending_operator_refund } @@ -118,14 +118,21 @@ impl PendingRefundTracer for OneTxTracer { } } -impl PubdataSpentTracer for OneTxTracer { +impl PubdataSpentTracer for OneTxTracer { fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter } } -impl OneTxTracer { - pub fn new(computational_gas_limit: u32) -> Self { +impl StorageInvocationTracer for OneTxTracer {} + +impl OneTxTracer { + pub fn new(computational_gas_limit: u32, with_call_tracer: bool) -> Self { + let call_tracer = if with_call_tracer { + Some(CallTracer::new()) + } else { + None + }; Self { tx_has_been_processed: false, pending_operator_refund: None, @@ -135,6 +142,7 @@ impl OneTxTracer { computational_gas_limit, in_account_validation: false, bootloader_tracer: BootloaderTracer::default(), + call_tracer, } } @@ -149,4 +157,10 @@ impl OneTxTracer { pub fn validation_run_out_of_gas(&self) -> bool { self.computational_gas_used > self.computational_gas_limit } + + pub fn call_traces(&mut self) -> Vec { + self.call_tracer + .as_mut() + .map_or(vec![], |call_tracer| call_tracer.extract_calls()) + } } diff --git a/core/lib/vm/src/oracles/tracer/transaction_result.rs b/core/lib/vm/src/oracles/tracer/transaction_result.rs index d68d8b3e31d4..0adbf36a9bed 100644 --- a/core/lib/vm/src/oracles/tracer/transaction_result.rs +++ b/core/lib/vm/src/oracles/tracer/transaction_result.rs @@ -2,27 +2,60 @@ use zk_evm::{ abstractions::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, - witness_trace::VmWitnessTracer, - zkevm_opcode_defs::decoding::VmEncodingMode, + vm_state::VmLocalState, zkevm_opcode_defs::FatPointer, }; -use zksync_types::U256; +use zksync_types::{vm_trace, U256}; use crate::memory::SimpleMemory; -use crate::oracles::tracer::utils::{print_debug_if_needed, read_pointer, VmHook}; -use crate::oracles::tracer::{ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer}; +use crate::oracles::tracer::{ + CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, +}; use crate::vm::get_vm_hook_params; +use crate::{ + history_recorder::HistoryMode, + oracles::tracer::utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, read_pointer, + VmHook, + }, +}; -#[derive(Debug, Clone, Default)] -pub(crate) struct TransactionResultTracer { +#[derive(Debug)] +pub(crate) struct TransactionResultTracer { pub(crate) revert_reason: Option>, + gas_spent_on_bytecodes_and_long_messages: u32, + pub(crate) call_tracer: Option>, + missed_storage_invocation_limit: usize, + missed_storage_invocation: usize, } -impl> VmWitnessTracer for TransactionResultTracer {} +impl TransactionResultTracer { + pub(crate) fn new(missed_storage_invocation_limit: usize, with_call_tracer: bool) -> Self { + let call_tracer = if with_call_tracer { + Some(CallTracer::new()) + } else { + None + }; + Self { + missed_storage_invocation_limit, + revert_reason: None, + gas_spent_on_bytecodes_and_long_messages: 0, + missed_storage_invocation: 0, + call_tracer, + } + } + pub fn call_trace(&mut self) -> Option> { + self.call_tracer + .as_mut() + .map(|call_tracer| call_tracer.extract_calls()) + } +} -impl Tracer for TransactionResultTracer { - type SupportedMemory = SimpleMemory; +impl Tracer for TransactionResultTracer { const CALL_BEFORE_EXECUTION: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} fn after_decoding( @@ -54,23 +87,43 @@ impl Tracer for TransactionResultTracer { self.revert_reason = None; } } + + self.gas_spent_on_bytecodes_and_long_messages += + gas_spent_on_bytecodes_and_long_messages_this_opcode(&state, &data); } + fn after_execution( &mut self, - _state: VmLocalStateData<'_>, - _data: AfterExecutionData, - _memory: &Self::SupportedMemory, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &Self::SupportedMemory, ) { + if let Some(call_tracer) = self.call_tracer.as_mut() { + call_tracer.after_execution(state, data, memory); + } } } -impl ExecutionEndTracer for TransactionResultTracer { +impl ExecutionEndTracer for TransactionResultTracer { + // If we reach the limit of memory invocations, we stop the execution and return the error to user fn should_stop_execution(&self) -> bool { - // This tracer will not prevent the execution from going forward - // until the end of the block. - false + self.is_limit_reached() + } +} + +impl PendingRefundTracer for TransactionResultTracer {} + +impl PubdataSpentTracer for TransactionResultTracer { + fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter } } -impl PendingRefundTracer for TransactionResultTracer {} -impl PubdataSpentTracer for TransactionResultTracer {} +impl StorageInvocationTracer for TransactionResultTracer { + fn set_missed_storage_invocations(&mut self, missed_storage_invocation: usize) { + self.missed_storage_invocation = missed_storage_invocation; + } + fn is_limit_reached(&self) -> bool { + self.missed_storage_invocation > self.missed_storage_invocation_limit + } +} diff --git a/core/lib/vm/src/oracles/tracer/utils.rs b/core/lib/vm/src/oracles/tracer/utils.rs index f68df0d09e68..c500a213cced 100644 --- a/core/lib/vm/src/oracles/tracer/utils.rs +++ b/core/lib/vm/src/oracles/tracer/utils.rs @@ -1,14 +1,18 @@ +use crate::history_recorder::HistoryMode; use crate::memory::SimpleMemory; -use crate::utils::heap_page_from_base; +use crate::utils::{aux_heap_page_from_base, heap_page_from_base}; use crate::vm::{get_vm_hook_params, VM_HOOK_POSITION}; use crate::vm_with_bootloader::BOOTLOADER_HEAP_PAGE; +use zk_evm::aux_structures::MemoryPage; +use zk_evm::zkevm_opcode_defs::{FarCallABI, FarCallForwardPageType}; use zk_evm::{ abstractions::{BeforeExecutionData, VmLocalStateData}, zkevm_opcode_defs::{FatPointer, LogOpcode, Opcode, UMAOpcode}, }; use zksync_config::constants::{ - ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, SHA256_PRECOMPILE_ADDRESS, + ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, + L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; use zksync_types::U256; use zksync_utils::u256_to_h256; @@ -66,7 +70,10 @@ impl VmHook { } } -pub(crate) fn get_debug_log(state: &VmLocalStateData<'_>, memory: &SimpleMemory) -> String { +pub(crate) fn get_debug_log( + state: &VmLocalStateData<'_>, + memory: &SimpleMemory, +) -> String { let vm_hook_params: Vec<_> = get_vm_hook_params(memory) .into_iter() .map(u256_to_h256) @@ -93,7 +100,10 @@ pub(crate) fn get_debug_log(state: &VmLocalStateData<'_>, memory: &SimpleMemory) /// Reads the memory slice represented by the fat pointer. /// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). -pub(crate) fn read_pointer(memory: &SimpleMemory, pointer: FatPointer) -> Vec { +pub(crate) fn read_pointer( + memory: &SimpleMemory, + pointer: FatPointer, +) -> Vec { let FatPointer { offset, length, @@ -114,7 +124,7 @@ pub(crate) fn read_pointer(memory: &SimpleMemory, pointer: FatPointer) -> Vec String { +pub(crate) fn get_debug_returndata(memory: &SimpleMemory) -> String { let vm_hook_params: Vec<_> = get_vm_hook_params(memory); let returndata_ptr = FatPointer::from_u256(vm_hook_params[0]); let returndata = read_pointer(memory, returndata_ptr); @@ -123,10 +133,10 @@ pub(crate) fn get_debug_returndata(memory: &SimpleMemory) -> String { } /// Accepts a vm hook and, if it requires to output some debug log, outputs it. -pub(crate) fn print_debug_if_needed( +pub(crate) fn print_debug_if_needed( hook: &VmHook, state: &VmLocalStateData<'_>, - memory: &SimpleMemory, + memory: &SimpleMemory, ) { let log = match hook { VmHook::DebugLog => get_debug_log(state, memory), @@ -163,3 +173,33 @@ pub(crate) fn computational_gas_price( }; base_price + precompile_price } + +pub(crate) fn gas_spent_on_bytecodes_and_long_messages_this_opcode( + state: &VmLocalStateData<'_>, + data: &BeforeExecutionData, +) -> u32 { + if data.opcode.variant.opcode == Opcode::Log(LogOpcode::PrecompileCall) { + let current_stack = state.vm_local_state.callstack.get_current_stack(); + // Trace for precompile calls from `KNOWN_CODES_STORAGE_ADDRESS` and `L1_MESSENGER_ADDRESS` that burn some gas. + // Note, that if there is less gas left than requested to burn it will be burnt anyway. + if current_stack.this_address == KNOWN_CODES_STORAGE_ADDRESS + || current_stack.this_address == L1_MESSENGER_ADDRESS + { + std::cmp::min(data.src1_value.value.as_u32(), current_stack.ergs_remaining) + } else { + 0 + } + } else { + 0 + } +} + +pub(crate) fn get_calldata_page_via_abi(far_call_abi: &FarCallABI, base_page: MemoryPage) -> u32 { + match far_call_abi.forwarding_mode { + FarCallForwardPageType::ForwardFatPointer => { + far_call_abi.memory_quasi_fat_pointer.memory_page + } + FarCallForwardPageType::UseAuxHeap => aux_heap_page_from_base(base_page).0, + FarCallForwardPageType::UseHeap => heap_page_from_base(base_page).0, + } +} diff --git a/core/lib/vm/src/oracles/tracer/validation.rs b/core/lib/vm/src/oracles/tracer/validation.rs index 215b1db8ebaf..0c338b20fb95 100644 --- a/core/lib/vm/src/oracles/tracer/validation.rs +++ b/core/lib/vm/src/oracles/tracer/validation.rs @@ -1,25 +1,25 @@ -use std::collections::HashSet; use std::fmt; use std::fmt::Display; +use std::{collections::HashSet, marker::PhantomData}; use crate::{ errors::VmRevertReasonParsingResult, + history_recorder::HistoryMode, memory::SimpleMemory, oracles::tracer::{ utils::{computational_gas_price, print_debug_if_needed, VmHook}, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, }, - utils::{aux_heap_page_from_base, heap_page_from_base}, }; use zk_evm::{ abstractions::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, - aux_structures::MemoryPage, - zkevm_opcode_defs::{ContextOpcode, FarCallABI, FarCallForwardPageType, LogOpcode, Opcode}, + zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; +use crate::oracles::tracer::{utils::get_calldata_page_via_abi, StorageInvocationTracer}; use crate::storage::StoragePtr; use zksync_config::constants::{ ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, @@ -130,7 +130,7 @@ fn valid_eth_token_call(address: Address, msg_sender: Address) -> bool { /// Tracer that is used to ensure that the validation adheres to all the rules /// to prevent DDoS attacks on the server. #[derive(Clone)] -pub struct ValidationTracer<'a> { +pub struct ValidationTracer<'a, H> { // A copy of it should be used in the Storage oracle pub storage: StoragePtr<'a>, pub validation_mode: ValidationTracerMode, @@ -145,9 +145,11 @@ pub struct ValidationTracer<'a> { trusted_address_slots: HashSet<(Address, U256)>, computational_gas_used: u32, computational_gas_limit: u32, + + _marker: PhantomData, } -impl fmt::Debug for ValidationTracer<'_> { +impl fmt::Debug for ValidationTracer<'_, H> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidationTracer") .field("storage", &"StoragePtr") @@ -188,7 +190,7 @@ pub struct NewTrustedValidationItems { type ValidationRoundResult = Result; -impl<'a> ValidationTracer<'a> { +impl<'a, H: HistoryMode> ValidationTracer<'a, H> { pub fn new(storage: StoragePtr<'a>, params: ValidationTracerParams) -> Self { ValidationTracer { storage, @@ -204,6 +206,8 @@ impl<'a> ValidationTracer<'a> { trusted_address_slots: params.trusted_address_slots, computational_gas_used: 0, computational_gas_limit: params.computational_gas_limit, + + _marker: PhantomData, } } @@ -305,7 +309,7 @@ impl<'a> ValidationTracer<'a> { &mut self, state: VmLocalStateData<'_>, data: BeforeExecutionData, - memory: &SimpleMemory, + memory: &SimpleMemory, ) -> ValidationRoundResult { if self.computational_gas_used > self.computational_gas_limit { return Err(ViolatedValidationRule::TookTooManyComputationalGas( @@ -362,7 +366,6 @@ impl<'a> ValidationTracer<'a> { return Err(ViolatedValidationRule::TouchedUnallowedContext); } ContextOpcode::ErgsLeft => { - // T } _ => {} } @@ -398,10 +401,10 @@ impl<'a> ValidationTracer<'a> { } } -impl Tracer for ValidationTracer<'_> { +impl Tracer for ValidationTracer<'_, H> { const CALL_BEFORE_EXECUTION: bool = true; - type SupportedMemory = SimpleMemory; + type SupportedMemory = SimpleMemory; fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} fn after_decoding( &mut self, @@ -467,21 +470,13 @@ impl Tracer for ValidationTracer<'_> { } } -fn get_calldata_page_via_abi(far_call_abi: &FarCallABI, base_page: MemoryPage) -> u32 { - match far_call_abi.forwarding_mode { - FarCallForwardPageType::ForwardFatPointer => { - far_call_abi.memory_quasi_fat_pointer.memory_page - } - FarCallForwardPageType::UseAuxHeap => aux_heap_page_from_base(base_page).0, - FarCallForwardPageType::UseHeap => heap_page_from_base(base_page).0, - } -} - -impl ExecutionEndTracer for ValidationTracer<'_> { +impl ExecutionEndTracer for ValidationTracer<'_, H> { fn should_stop_execution(&self) -> bool { self.should_stop_execution || self.validation_error.is_some() } } -impl PendingRefundTracer for ValidationTracer<'_> {} -impl PubdataSpentTracer for ValidationTracer<'_> {} +impl PendingRefundTracer for ValidationTracer<'_, H> {} +impl PubdataSpentTracer for ValidationTracer<'_, H> {} + +impl StorageInvocationTracer for ValidationTracer<'_, H> {} diff --git a/core/lib/vm/src/pubdata_utils.rs b/core/lib/vm/src/pubdata_utils.rs index a384d50cd4f3..630053f9ab39 100644 --- a/core/lib/vm/src/pubdata_utils.rs +++ b/core/lib/vm/src/pubdata_utils.rs @@ -1,3 +1,4 @@ +use crate::history_recorder::HistoryMode; use crate::oracles::storage::storage_key_of_log; use crate::utils::collect_storage_log_queries_after_timestamp; use crate::VmInstance; @@ -8,7 +9,7 @@ use zksync_types::zkevm_test_harness::witness::sort_storage_access::sort_storage use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; use zksync_utils::bytecode::bytecode_len_in_bytes; -impl<'a> VmInstance<'a> { +impl VmInstance<'_, H> { pub fn pubdata_published(&self, from_timestamp: Timestamp) -> u32 { let storage_writes_pubdata_published = self.pubdata_published_for_writes(from_timestamp); @@ -68,13 +69,7 @@ impl<'a> VmInstance<'a> { }; let storage_logs = collect_storage_log_queries_after_timestamp( - &self - .state - .storage - .frames_stack - .inner() - .current_frame() - .forward, + self.state.storage.frames_stack.forward().current_frame(), from_timestamp, ); let (_, deduplicated_logs) = diff --git a/core/lib/vm/src/refunds.rs b/core/lib/vm/src/refunds.rs index 9d2eab776d41..ce0cda572396 100644 --- a/core/lib/vm/src/refunds.rs +++ b/core/lib/vm/src/refunds.rs @@ -1,3 +1,4 @@ +use crate::history_recorder::HistoryMode; use crate::vm_with_bootloader::{ eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET, }; @@ -6,7 +7,7 @@ use zk_evm::aux_structures::Timestamp; use zksync_types::U256; use zksync_utils::ceil_div_u256; -impl<'a> VmInstance<'a> { +impl VmInstance<'_, H> { pub(crate) fn tx_body_refund( &self, from_timestamp: Timestamp, @@ -184,6 +185,7 @@ impl<'a> VmInstance<'a> { // overhead.as_u32() } + /// Returns the given transactions' gas limit - by reading it directly from the VM memory. pub(crate) fn get_tx_gas_limit(&self, tx_index: usize) -> u32 { let tx_description_offset = self.bootloader_state.get_tx_description_offset(tx_index); self.state diff --git a/core/lib/vm/src/storage.rs b/core/lib/vm/src/storage.rs index 2aeba1140488..5a7c2d5c8e0e 100644 --- a/core/lib/vm/src/storage.rs +++ b/core/lib/vm/src/storage.rs @@ -7,17 +7,29 @@ use zksync_state::storage_view::StorageView; use zksync_types::{get_known_code_key, StorageKey, StorageValue, ZkSyncReadStorage, H256}; pub trait Storage: Debug + Sync + Send { + /// Returns a value from a given key. If value never existed, returns 0. fn get_value(&mut self, key: &StorageKey) -> StorageValue; - // Returns the original value. + // Sets the new value under a given key - returns the original value. fn set_value(&mut self, key: &StorageKey, value: StorageValue) -> StorageValue; + /// The function returns true if it's the first time writing to this storage slot. + /// The initial write uses 64 gas, while subsequent writes use only 40. fn is_write_initial(&mut self, key: &StorageKey) -> bool; fn load_factory_dep(&mut self, hash: H256) -> Option>; - fn number_of_updated_storage_slots(&self) -> usize; + fn number_of_updated_storage_slots(&self) -> usize { + self.get_modified_storage_keys().len() + } fn get_modified_storage_keys(&self) -> &HashMap; - fn is_bytecode_known(&mut self, bytecode_hash: &H256) -> bool; + /// Returns whether a bytecode hash is "known", i.e. whether + /// it has been published on L1 + fn is_bytecode_known(&mut self, bytecode_hash: &H256) -> bool { + let code_key = get_known_code_key(bytecode_hash); + self.get_value(&code_key) != H256::zero() + } + + fn missed_storage_invocations(&self) -> usize; } impl Storage for StorageView { @@ -38,19 +50,12 @@ impl Storage for StorageView { self.load_factory_dep(hash) } - fn number_of_updated_storage_slots(&self) -> usize { - self.get_modified_storage_keys().len() - } - fn get_modified_storage_keys(&self) -> &HashMap { self.get_modified_storage_keys() } - /// Returns whether a bytecode hash is "known", i.e. whether - /// it has been published on L1 - fn is_bytecode_known(&mut self, bytecode_hash: &H256) -> bool { - let code_key = get_known_code_key(bytecode_hash); - self.get_value(&code_key) != H256::zero() + fn missed_storage_invocations(&self) -> usize { + self.storage_invocations_missed } } diff --git a/core/lib/vm/src/test_utils.rs b/core/lib/vm/src/test_utils.rs index 907dcae8f0fc..9404f4359b22 100644 --- a/core/lib/vm/src/test_utils.rs +++ b/core/lib/vm/src/test_utils.rs @@ -9,9 +9,7 @@ use std::collections::HashMap; use itertools::Itertools; -use zk_evm::{ - aux_structures::Timestamp, reference_impls::event_sink::ApplicationData, vm_state::VmLocalState, -}; +use zk_evm::{aux_structures::Timestamp, vm_state::VmLocalState}; use zksync_contracts::{deployer_contract, get_loadnext_contract, load_contract}; use zksync_types::{ ethabi::{Address, Token}, @@ -29,7 +27,9 @@ use zksync_utils::{ /// The tests here help us with the testing the VM use crate::{ event_sink::InMemoryEventSink, - history_recorder::{FrameManager, HistoryRecorder}, + history_recorder::{ + AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode, HistoryRecorder, + }, memory::SimpleMemory, VmInstance, }; @@ -56,43 +56,43 @@ impl PartialEq for ModifiedKeysMap { } #[derive(Clone, PartialEq, Debug)] -pub struct DecommitterTestInnerState { +pub struct DecommitterTestInnerState { /// There is no way to "trully" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, - pub known_bytecodes: HistoryRecorder>>, - pub decommitted_code_hashes: HistoryRecorder>, + pub known_bytecodes: HistoryRecorder>, H>, + pub decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, } #[derive(Clone, PartialEq, Debug)] -pub struct StorageOracleInnerState { +pub struct StorageOracleInnerState { /// There is no way to "trully" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, - pub frames_stack: HistoryRecorder>>, + pub frames_stack: AppDataFrameManagerWithHistory, } #[derive(Clone, PartialEq, Debug)] -pub struct PrecompileProcessorTestInnerState { - pub timestamp_history: HistoryRecorder>, +pub struct PrecompileProcessorTestInnerState { + pub timestamp_history: HistoryRecorder, H>, } /// A struct that encapsulates the state of the VM's oracles /// The state is to be used in tests. #[derive(Clone, PartialEq, Debug)] -pub struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, +pub struct VmInstanceInnerState { + event_sink: InMemoryEventSink, + precompile_processor_state: PrecompileProcessorTestInnerState, + memory: SimpleMemory, + decommitter_state: DecommitterTestInnerState, + storage_oracle_state: StorageOracleInnerState, local_state: VmLocalState, } -impl<'a> VmInstance<'a> { +impl VmInstance<'_, H> { /// This method is mostly to be used in tests. It dumps the inner state of all the oracles and the VM itself. - pub fn dump_inner_state(&self) -> VmInstanceInnerState { + pub fn dump_inner_state(&self) -> VmInstanceInnerState { let event_sink = self.state.event_sink.clone(); let precompile_processor_state = PrecompileProcessorTestInnerState { timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), diff --git a/core/lib/vm/src/tests/bootloader.rs b/core/lib/vm/src/tests/bootloader.rs index 7484bf40ed05..27f14f457943 100644 --- a/core/lib/vm/src/tests/bootloader.rs +++ b/core/lib/vm/src/tests/bootloader.rs @@ -2,100 +2,81 @@ //! Tests for the bootloader //! The description for each of the tests can be found in the corresponding `.yul` file. //! -#![cfg_attr(test, allow(unused_imports))] - -use crate::errors::{VmRevertReason, VmRevertReasonParsingResult}; -use crate::memory::SimpleMemory; -use crate::oracles::tracer::{ - ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, TransactionResultTracer, -}; -use crate::storage::{Storage, StoragePtr}; -use crate::test_utils::{ - get_create_execute, get_create_zksync_address, get_deploy_tx, get_error_tx, - mock_loadnext_test_call, VmInstanceInnerState, -}; -use crate::utils::{ - create_test_block_params, insert_system_contracts, read_bootloader_test_code, - BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, +use itertools::Itertools; +use std::{ + collections::{HashMap, HashSet}, + convert::TryFrom, }; -use crate::vm::{ - get_vm_hook_params, tx_has_failed, VmBlockResult, VmExecutionStopReason, ZkSyncVmState, - MAX_MEM_SIZE_BYTES, +use tempfile::TempDir; + +use crate::{ + errors::VmRevertReason, + history_recorder::HistoryMode, + oracles::tracer::{StorageInvocationTracer, TransactionResultTracer}, + storage::{Storage, StoragePtr}, + test_utils::{ + get_create_execute, get_create_zksync_address, get_deploy_tx, get_error_tx, + mock_loadnext_test_call, + }, + transaction_data::TransactionData, + utils::{ + create_test_block_params, insert_system_contracts, read_bootloader_test_code, + BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, + }, + vm::{tx_has_failed, VmExecutionStopReason, ZkSyncVmState}, + vm_with_bootloader::{ + bytecode_to_factory_dep, get_bootloader_memory, get_bootloader_memory_for_encoded_tx, + push_raw_transaction_to_bootloader_memory, BlockContext, BlockContextMode, + BootloaderJobType, TxExecutionMode, + }, + vm_with_bootloader::{ + init_vm_inner, push_transaction_to_bootloader_memory, DerivedBlockContext, + BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, + }, + HistoryEnabled, OracleTools, TxRevertReason, VmBlockResult, VmExecutionResult, VmInstance, }; -use crate::vm_with_bootloader::{ - bytecode_to_factory_dep, get_bootloader_memory, get_bootloader_memory_for_encoded_tx, - init_vm_inner, push_raw_transaction_to_bootloader_memory, - push_transaction_to_bootloader_memory, BlockContext, DerivedBlockContext, BOOTLOADER_HEAP_PAGE, - BOOTLOADER_TX_DESCRIPTION_OFFSET, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, + +use zk_evm::{ + aux_structures::Timestamp, block_properties::BlockProperties, zkevm_opcode_defs::FarCallOpcode, }; -use crate::vm_with_bootloader::{BlockContextMode, BootloaderJobType, TxExecutionMode}; -use crate::{test_utils, VmInstance}; -use crate::{TxRevertReason, VmExecutionResult}; -use itertools::Itertools; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::ops::{Add, DivAssign}; -use std::rc::Rc; -use tempfile::TempDir; -use zk_evm::abstractions::{ - AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, - MAX_HEAP_PAGE_SIZE_IN_WORDS, MAX_MEMORY_BYTES, + +use zksync_types::{ + block::DeployedContract, + ethabi::encode, + get_is_account_key, + storage_writes_deduplicator::StorageWritesDeduplicator, + system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, + tx::tx_execution_info::TxExecutionStatus, + utils::{ + deployed_address_create, storage_key_for_eth_balance, + storage_key_for_standard_token_balance, + }, + vm_trace::{Call, CallType}, + Execute, L1BatchNumber, L1TxCommonData, StorageKey, StorageLog, L1_MESSENGER_ADDRESS, + {ethabi::Token, AccountTreeId, Address, ExecuteTransactionCommon, Transaction, H256, U256}, + {fee::Fee, l2_to_l1_log::L2ToL1Log}, + { + get_code_key, get_known_code_key, get_nonce_key, Nonce, BOOTLOADER_ADDRESS, H160, + L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, SYSTEM_CONTEXT_ADDRESS, + }, }; -use zk_evm::aux_structures::Timestamp; -use zk_evm::block_properties::BlockProperties; -use zk_evm::opcodes::execution::ret; -use zk_evm::sha3::digest::typenum::U830; -use zk_evm::witness_trace::VmWitnessTracer; -use zk_evm::zkevm_opcode_defs::decoding::VmEncodingMode; -use zk_evm::zkevm_opcode_defs::FatPointer; -use zksync_types::block::DeployedContract; -use zksync_types::ethabi::encode; -use zksync_types::l1::L1Tx; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use zksync_utils::test_utils::LoadnextContractExecutionParams; + use zksync_utils::{ - address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, bytes_to_le_words, h256_to_u256, - u256_to_h256, + bytecode::CompressedBytecodeInfo, + test_utils::LoadnextContractExecutionParams, + {bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}, }; -use zksync_utils::{h256_to_account_address, u256_to_account_address}; -use crate::{transaction_data::TransactionData, OracleTools}; -use std::time; use zksync_contracts::{ - default_erc20_bytecode, get_loadnext_contract, known_codes_contract, load_contract, - load_sys_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, - BaseSystemContracts, SystemContractCode, PLAYGROUND_BLOCK_BOOTLOADER_CODE, -}; -use zksync_crypto::rand::random; -use zksync_state::secondary_storage::SecondaryStateStorage; -use zksync_state::storage_view::StorageView; -use zksync_storage::db::Database; -use zksync_storage::RocksDB; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; -use zksync_types::utils::{ - deployed_address_create, storage_key_for_eth_balance, storage_key_for_standard_token_balance, -}; -use zksync_types::{ - ethabi::Token, AccountTreeId, Address, Execute, ExecuteTransactionCommon, L1BatchNumber, - L2ChainId, PackedEthSignature, StorageKey, StorageLogQueryType, Transaction, H256, - KNOWN_CODES_STORAGE_ADDRESS, U256, -}; -use zksync_types::{fee::Fee, l2::L2Tx, l2_to_l1_log::L2ToL1Log}; -use zksync_types::{ - get_code_key, get_is_account_key, get_known_code_key, get_nonce_key, L1TxCommonData, Nonce, - PriorityOpId, SerialId, StorageLog, ZkSyncReadStorage, BOOTLOADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, H160, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, - MAX_TXS_IN_BLOCK, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, - SYSTEM_CONTEXT_MINIMAL_BASE_FEE, SYSTEM_CONTEXT_TX_ORIGIN_POSITION, + get_loadnext_contract, load_contract, read_bytecode, SystemContractCode, + PLAYGROUND_BLOCK_BOOTLOADER_CODE, }; -use once_cell::sync::Lazy; -use zksync_config::constants::ZKPORTER_IS_AVAILABLE; +use zksync_state::{secondary_storage::SecondaryStateStorage, storage_view::StorageView}; +use zksync_storage::{db::Database, RocksDB}; -fn run_vm_with_custom_factory_deps<'a>( - oracle_tools: &'a mut OracleTools<'a, false>, +fn run_vm_with_custom_factory_deps<'a, H: HistoryMode>( + oracle_tools: &'a mut OracleTools<'a, false, H>, block_context: BlockContext, block_properties: &'a BlockProperties, encoded_tx: Vec, @@ -130,7 +111,7 @@ fn run_vm_with_custom_factory_deps<'a>( Timestamp(0), ); - let result = vm.execute_next_tx(u32::MAX).err(); + let result = vm.execute_next_tx(u32::MAX, false).err(); assert_eq!(expected_error, result); } @@ -149,7 +130,7 @@ fn test_dummy_bootloader() { let mut storage_accessor = StorageView::new(&raw_storage); let storage_ptr: &mut dyn Storage = &mut storage_accessor; - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let (block_context, block_properties) = create_test_block_params(); let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); let bootloader_code = read_bootloader_test_code("dummy"); @@ -193,7 +174,7 @@ fn test_bootloader_out_of_gas() { let mut storage_accessor = StorageView::new(&raw_storage); let storage_ptr: &mut dyn Storage = &mut storage_accessor; - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let (block_context, block_properties) = create_test_block_params(); let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); @@ -221,7 +202,10 @@ fn test_bootloader_out_of_gas() { assert_eq!(res.revert_reason, Some(TxRevertReason::BootloaderOutOfGas)); } -fn verify_required_storage(state: &ZkSyncVmState<'_>, required_values: Vec<(H256, StorageKey)>) { +fn verify_required_storage( + state: &ZkSyncVmState<'_, H>, + required_values: Vec<(H256, StorageKey)>, +) { for (required_value, key) in required_values { let current_value = state.storage.storage.read_from_storage(&key); @@ -233,11 +217,15 @@ fn verify_required_storage(state: &ZkSyncVmState<'_>, required_values: Vec<(H256 } } -fn verify_required_memory(state: &ZkSyncVmState<'_>, required_values: Vec<(U256, u32, u32)>) { +fn verify_required_memory( + state: &ZkSyncVmState<'_, H>, + required_values: Vec<(U256, u32, u32)>, +) { for (required_value, memory_page, cell) in required_values { let current_value = state .memory - .dump_page_content_as_u256_words(memory_page, cell..cell + 1)[0]; + .read_slot(memory_page as usize, cell as usize) + .value; assert_eq!(current_value, required_value); } } @@ -284,7 +272,7 @@ fn test_default_aa_interaction() { let key = storage_key_for_eth_balance(&sender_address); storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, @@ -297,7 +285,7 @@ fn test_default_aa_interaction() { push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); let tx_execution_result = vm - .execute_next_tx(u32::MAX) + .execute_next_tx(u32::MAX, false) .expect("Bootloader failed while processing transaction"); assert_eq!( @@ -372,7 +360,7 @@ fn execute_vm_with_predetermined_refund( storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); } - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, @@ -458,7 +446,7 @@ fn test_predetermined_refunded_gas() { let key = storage_key_for_eth_balance(&sender_address); storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, @@ -472,7 +460,7 @@ fn test_predetermined_refunded_gas() { push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); let tx_execution_result = vm - .execute_next_tx(u32::MAX) + .execute_next_tx(u32::MAX, false) .expect("Bootloader failed while processing transaction"); assert_eq!( @@ -602,7 +590,7 @@ fn execute_vm_with_possible_rollbacks( let key = storage_key_for_eth_balance(&sender_address); storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, @@ -623,7 +611,7 @@ fn execute_vm_with_possible_rollbacks( None, ); - match vm.execute_next_tx(u32::MAX) { + match vm.execute_next_tx(u32::MAX, false) { Err(reason) => { assert_eq!(test_info.rejection_reason(), Some(reason)); } @@ -762,18 +750,45 @@ fn test_vm_rollbacks() { let incorrect_nonce = TxRevertReason::ValidationFailed(VmRevertReason::General { msg: "Incorrect nonce".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, + 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], }); let reusing_nonce_twice = TxRevertReason::ValidationFailed(VmRevertReason::General { msg: "Reusing the same nonce twice".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, + 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, + 0, 0, 0, + ], }); let signature_length_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { msg: "Signature length is incorrect".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, + 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, + 116, 0, 0, 0, + ], }); let v_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { msg: "v is neither 27 nor 28".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, + 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], }); let signature_is_incorrect = TxRevertReason::ValidationFailed(VmRevertReason::General { msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), + data: vec![], }); let result_with_rollbacks = execute_vm_with_possible_rollbacks( @@ -986,7 +1001,6 @@ fn insert_contracts( raw_storage.process_transaction_logs(&logs); for (contract, _) in contracts { - raw_storage.store_contract(*contract.account_id.address(), contract.bytecode.clone()); raw_storage.store_factory_dep(hash_bytecode(&contract.bytecode), contract.bytecode); } raw_storage.save(L1BatchNumber(0)); @@ -1040,8 +1054,8 @@ fn get_nonce_holder_test_tx( } } -fn run_vm_with_raw_tx<'a>( - oracle_tools: &'a mut OracleTools<'a, false>, +fn run_vm_with_raw_tx<'a, H: HistoryMode>( + oracle_tools: &'a mut OracleTools<'a, false, H>, block_context: DerivedBlockContext, block_properties: &'a BlockProperties, tx: TransactionData, @@ -1108,17 +1122,19 @@ fn test_nonce_holder() { comment: &'static str| { let tx = get_nonce_holder_test_tx(nonce, account_address, test_mode, &block_context); - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let (result, tx_has_failed) = run_vm_with_raw_tx(&mut oracle_tools, block_context, &block_properties, tx); if let Some(msg) = error_message { - let expected_error = TxRevertReason::ValidationFailed(VmRevertReason::General { msg }); + let expected_error = + TxRevertReason::ValidationFailed(VmRevertReason::General { msg, data: vec![] }); assert_eq!( result .revert_reason .expect("No revert reason") - .revert_reason, - expected_error, + .revert_reason + .to_string(), + expected_error.to_string(), "{}", comment ); @@ -1227,7 +1243,7 @@ fn test_l1_tx_execution() { let mut storage_accessor = StorageView::new(&raw_storage); let storage_ptr: &mut dyn Storage = &mut storage_accessor; - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let (block_context, block_properties) = create_test_block_params(); // Here instead of marking code hash via the bootloader means, we will @@ -1281,7 +1297,7 @@ fn test_l1_tx_execution() { None, ); - let res = vm.execute_next_tx(u32::MAX).unwrap(); + let res = vm.execute_next_tx(u32::MAX, false).unwrap(); // The code hash of the deployed contract should be marked as republished. let known_codes_key = get_known_code_key(&contract_code_hash); @@ -1304,7 +1320,7 @@ fn test_l1_tx_execution() { push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); let res = StorageWritesDeduplicator::apply_on_empty_state( - &vm.execute_next_tx(u32::MAX) + &vm.execute_next_tx(u32::MAX, false) .unwrap() .result .logs @@ -1315,7 +1331,7 @@ fn test_l1_tx_execution() { let tx = get_l1_execute_test_contract_tx(deployed_address, false); push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); let res = StorageWritesDeduplicator::apply_on_empty_state( - &vm.execute_next_tx(u32::MAX) + &vm.execute_next_tx(u32::MAX, false) .unwrap() .result .logs @@ -1327,7 +1343,7 @@ fn test_l1_tx_execution() { push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); let res = StorageWritesDeduplicator::apply_on_empty_state( - &vm.execute_next_tx(u32::MAX) + &vm.execute_next_tx(u32::MAX, false) .unwrap() .result .logs @@ -1346,7 +1362,7 @@ fn test_l1_tx_execution() { _ => unreachable!(), } push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); - let execution_result = vm.execute_next_tx(u32::MAX).unwrap(); + let execution_result = vm.execute_next_tx(u32::MAX, false).unwrap(); // The method is not payable, so the transaction with non-zero value should fail assert_eq!( execution_result.status, @@ -1376,7 +1392,7 @@ fn test_invalid_bytecode() { |bytecode_hash: H256, expected_revert_reason: Option| { let mut storage_accessor = StorageView::new(&raw_storage); let storage_ptr: &mut dyn Storage = &mut storage_accessor; - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( h256_to_u256(bytecode_hash), @@ -1393,9 +1409,10 @@ fn test_invalid_bytecode() { ); }; - let failed_to_mark_factory_deps = |msg: &str| { + let failed_to_mark_factory_deps = |msg: &str, data: Vec| { TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { msg: msg.to_string(), + data, }) }; @@ -1418,6 +1435,13 @@ fn test_invalid_bytecode() { ]), Some(failed_to_mark_factory_deps( "Code length in words must be odd", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, + 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, + 32, 98, 101, 32, 111, 100, 100, + ], )), ); @@ -1430,6 +1454,14 @@ fn test_invalid_bytecode() { ]), Some(failed_to_mark_factory_deps( "Incorrectly formatted bytecodeHash", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, + 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, + 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], )), ); @@ -1442,6 +1474,14 @@ fn test_invalid_bytecode() { ]), Some(failed_to_mark_factory_deps( "Incorrectly formatted bytecodeHash", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, + 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, + 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], )), ); } @@ -1456,6 +1496,7 @@ fn test_tracing_of_execution_errors() { let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); let mut raw_storage = SecondaryStateStorage::new(db); insert_system_contracts(&mut raw_storage); + let private_key = H256::random(); let contract_address = Address::random(); let error_contract = DeployedContract { @@ -1464,7 +1505,7 @@ fn test_tracing_of_execution_errors() { }; let tx = get_error_tx( - H256::random(), + private_key, Nonce(0), contract_address, Fee { @@ -1482,7 +1523,7 @@ fn test_tracing_of_execution_errors() { let key = storage_key_for_eth_balance(&tx.common_data.initiator_address); storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, @@ -1499,7 +1540,7 @@ fn test_tracing_of_execution_errors() { None, ); - let mut tracer = TransactionResultTracer::default(); + let mut tracer = TransactionResultTracer::new(usize::MAX, false); assert_eq!( vm.execute_with_custom_tracer(&mut tracer), VmExecutionStopReason::VmFinished, @@ -1512,7 +1553,14 @@ fn test_tracing_of_execution_errors() { assert_eq!( revert_reason, VmRevertReason::General { - msg: "short".to_string() + msg: "short".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, + 114, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + ], } ) } @@ -1521,6 +1569,40 @@ fn test_tracing_of_execution_errors() { tracer.revert_reason ), } + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context, Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + let tx = get_error_tx( + private_key, + Nonce(1), + contract_address, + Fee { + gas_limit: U256::from(1000000u32), + max_fee_per_gas: U256::from(10000000000u64), + max_priority_fee_per_gas: U256::zero(), + gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), + }, + ); + push_transaction_to_bootloader_memory( + &mut vm, + &tx.into(), + TxExecutionMode::VerifyExecute, + None, + ); + + let mut tracer = TransactionResultTracer::new(10, false); + assert_eq!( + vm.execute_with_custom_tracer(&mut tracer), + VmExecutionStopReason::TracerRequestedStop, + ); + assert!(tracer.is_limit_reached()); } /// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. @@ -1550,7 +1632,7 @@ fn test_tx_gas_limit_offset() { ) .into(); - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, @@ -1614,7 +1696,7 @@ fn test_is_write_initial_behaviour() { let balance_key = storage_key_for_eth_balance(&sender_address); storage_ptr.set_value(&balance_key, u256_to_h256(U256([0, 0, 1, 0]))); - let mut oracle_tools = OracleTools::new(storage_ptr); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, @@ -1627,7 +1709,7 @@ fn test_is_write_initial_behaviour() { push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); - vm.execute_next_tx(u32::MAX) + vm.execute_next_tx(u32::MAX, false) .expect("Bootloader failed while processing the first transaction"); // Check that `is_write_initial` still returns true for the nonce key. assert!(storage_ptr.is_write_initial(&nonce_key)); @@ -1648,12 +1730,59 @@ pub fn get_l1_tx_with_custom_bytecode_hash( const L1_TEST_GAS_PER_PUBDATA_BYTE: u32 = 800; pub fn get_l1_execute_test_contract_tx(deployed_address: Address, with_panic: bool) -> Transaction { - let execute = execute_test_contract(deployed_address, with_panic); + let sender = H160::random(); + get_l1_execute_test_contract_tx_with_sender( + sender, + deployed_address, + with_panic, + U256::zero(), + false, + ) +} + +pub fn get_l1_tx_with_large_output(sender: Address, deployed_address: Address) -> Transaction { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json", + ); + + let function = test_contract.function("longReturnData").unwrap(); + + let calldata = function + .encode_input(&[]) + .expect("failed to encode parameters"); + Transaction { common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(1000000u32), + sender, + gas_limit: U256::from(100000000u32), + gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute: Execute { + contract_address: deployed_address, + calldata, + value: U256::zero(), + factory_deps: None, + }, + received_timestamp_ms: 0, + } +} + +pub fn get_l1_execute_test_contract_tx_with_sender( + sender: Address, + deployed_address: Address, + with_panic: bool, + value: U256, + payable: bool, +) -> Transaction { + let execute = execute_test_contract(deployed_address, with_panic, value, payable); + + Transaction { + common_data: ExecuteTransactionCommon::L1(L1TxCommonData { + sender, + gas_limit: U256::from(200_000_000u32), gas_per_pubdata_limit: L1_TEST_GAS_PER_PUBDATA_BYTE.into(), + to_mint: value, ..Default::default() }), execute, @@ -1680,6 +1809,10 @@ fn read_test_contract() -> Vec { read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") } +fn read_long_return_data_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/long-return-data/long-return-data.sol/LongReturnData.json") +} + fn read_nonce_holder_tester() -> Vec { read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") } @@ -1690,20 +1823,352 @@ fn read_error_contract() -> Vec { ) } -fn execute_test_contract(address: Address, with_panic: bool) -> Execute { +fn execute_test_contract( + address: Address, + with_panic: bool, + value: U256, + payable: bool, +) -> Execute { let test_contract = load_contract( "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", ); - let function = test_contract.function("incrementWithRevert").unwrap(); + let function = if payable { + test_contract + .function("incrementWithRevertPayable") + .unwrap() + } else { + test_contract.function("incrementWithRevert").unwrap() + }; let calldata = function .encode_input(&[Token::Uint(U256::from(1u8)), Token::Bool(with_panic)]) .expect("failed to encode parameters"); + Execute { contract_address: address, calldata, - value: U256::zero(), + value, factory_deps: None, } } + +#[test] +fn test_call_tracer() { + let sender = H160::random(); + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); + let mut raw_storage = SecondaryStateStorage::new(db); + insert_system_contracts(&mut raw_storage); + + let (block_context, block_properties) = create_test_block_params(); + + let contract_code = read_test_contract(); + let contract_code_hash = hash_bytecode(&contract_code); + let l1_deploy_tx = get_l1_deploy_tx(&contract_code, &[]); + let l1_deploy_tx_data: TransactionData = l1_deploy_tx.clone().into(); + + let sender_address_counter = l1_deploy_tx_data.from(); + let mut storage_accessor = StorageView::new(&raw_storage); + let storage_ptr: &mut dyn Storage = &mut storage_accessor; + + let key = storage_key_for_eth_balance(&sender_address_counter); + storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + oracle_tools.decommittment_processor.populate( + vec![( + h256_to_u256(contract_code_hash), + bytes_to_be_words(contract_code), + )], + Timestamp(0), + ); + + let contract_code = read_long_return_data_contract(); + let contract_code_hash = hash_bytecode(&contract_code); + let l1_deploy_long_return_data_tx = get_l1_deploy_tx(&contract_code, &[]); + oracle_tools.decommittment_processor.populate( + vec![( + h256_to_u256(contract_code_hash), + bytes_to_be_words(contract_code), + )], + Timestamp(0), + ); + + let tx_data: TransactionData = l1_deploy_long_return_data_tx.clone().into(); + let sender_long_return_address = tx_data.from(); + // The contract should be deployed successfully. + let deployed_address_long_return_data = + deployed_address_create(sender_long_return_address, U256::zero()); + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context.into(), Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + + push_transaction_to_bootloader_memory( + &mut vm, + &l1_deploy_tx, + TxExecutionMode::VerifyExecute, + None, + ); + + // The contract should be deployed successfully. + let deployed_address = deployed_address_create(sender_address_counter, U256::zero()); + let res = vm.execute_next_tx(u32::MAX, true).unwrap(); + let calls = res.call_traces; + let mut create_call = None; + // The first MIMIC call is call to value simulator. All calls goes through it. + // The second MIMIC call is call to Deployer contract. + // And only third level call is construct call to the newly deployed contract And we call it create_call. + for call in &calls { + if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { + for call in &call.calls { + if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { + for call in &call.calls { + if let CallType::Create = call.r#type { + create_call = Some(call.clone()); + } + } + } + } + } + } + let expected = Call { + r#type: CallType::Create, + to: deployed_address, + from: sender_address_counter, + parent_gas: 0, + gas_used: 0, + gas: 0, + value: U256::zero(), + input: vec![], + output: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + error: None, + revert_reason: None, + calls: vec![], + }; + assert_eq!(create_call.unwrap(), expected); + + push_transaction_to_bootloader_memory( + &mut vm, + &l1_deploy_long_return_data_tx, + TxExecutionMode::VerifyExecute, + None, + ); + + vm.execute_next_tx(u32::MAX, false).unwrap(); + + let tx = get_l1_execute_test_contract_tx_with_sender( + sender, + deployed_address, + false, + U256::from(1u8), + true, + ); + + let tx_data: TransactionData = tx.clone().into(); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + + let res = vm.execute_next_tx(u32::MAX, true).unwrap(); + let calls = res.call_traces; + + // We don't want to compare gas used, because it's not fully deterministic. + let expected = Call { + r#type: CallType::Call(FarCallOpcode::Mimic), + to: deployed_address, + from: tx_data.from(), + parent_gas: 0, + gas_used: 0, + gas: 0, + value: U256::from(1), + input: tx_data.data, + output: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, + ], + error: None, + revert_reason: None, + calls: vec![], + }; + + // First loop filter out the bootloaders calls and + // the second loop filters out the calls msg value simulator calls + for call in calls { + if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { + for call in call.calls { + if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { + assert_eq!(expected, call); + } + } + } + } + + let tx = get_l1_execute_test_contract_tx_with_sender( + sender, + deployed_address, + true, + U256::from(1u8), + true, + ); + + let tx_data: TransactionData = tx.clone().into(); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + + let res = vm.execute_next_tx(u32::MAX, true).unwrap(); + let calls = res.call_traces; + + let expected = Call { + r#type: CallType::Call(FarCallOpcode::Mimic), + to: deployed_address, + from: tx_data.from(), + parent_gas: 257030, + gas_used: 348, + gas: 253008, + value: U256::from(1u8), + input: tx_data.data, + output: vec![], + error: None, + revert_reason: Some("This method always reverts".to_string()), + calls: vec![], + }; + + for call in calls { + if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { + for call in call.calls { + if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { + assert_eq!(expected, call); + } + } + } + } + + let tx = get_l1_tx_with_large_output(sender, deployed_address_long_return_data); + + let tx_data: TransactionData = tx.clone().into(); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + + assert_ne!(deployed_address_long_return_data, deployed_address); + let res = vm.execute_next_tx(u32::MAX, true).unwrap(); + let calls = res.call_traces; + for call in calls { + if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { + for call in call.calls { + if let CallType::Call(FarCallOpcode::Mimic) = call.r#type { + assert_eq!(call.input, tx_data.data); + assert_eq!( + call.revert_reason, + Some("Unknown revert reason".to_string()) + ); + } + } + } + } +} + +#[test] +fn test_get_used_contracts() { + // get block context + let (block_context, block_properties) = create_test_block_params(); + let block_context: DerivedBlockContext = block_context.into(); + + // insert system contracts to avoid vm errors during initialization + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); + let mut raw_storage = SecondaryStateStorage::new(db); + insert_system_contracts(&mut raw_storage); + + // get oracle tools + let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + + // init vm + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context, Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + + assert!(known_bytecodes_without_aa_code(&vm).is_empty()); + + // create and push and execute some not-empty factory deps transaction with success status + // to check that get_used_contracts() updates + let contract_code = read_test_contract(); + let contract_code_hash = hash_bytecode(&contract_code); + let tx1 = get_l1_deploy_tx(&contract_code, &[]); + + push_transaction_to_bootloader_memory(&mut vm, &tx1, TxExecutionMode::VerifyExecute, None); + + let res1 = vm.execute_next_tx(u32::MAX, true).unwrap(); + assert_eq!(res1.status, TxExecutionStatus::Success); + assert!(vm + .get_used_contracts() + .contains(&h256_to_u256(contract_code_hash))); + + assert_eq!( + vm.get_used_contracts() + .into_iter() + .collect::>(), + known_bytecodes_without_aa_code(&vm) + .keys() + .cloned() + .collect::>() + ); + + // create push and execute some non-empty factory deps transaction that fails + // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) + + let mut tx2 = tx1; + tx2.execute.contract_address = L1_MESSENGER_ADDRESS; + + let calldata = vec![1, 2, 3]; + let big_calldata: Vec = calldata + .iter() + .cycle() + .take(calldata.len() * 1024) + .cloned() + .collect(); + + tx2.execute.calldata = big_calldata; + tx2.execute.factory_deps = Some(vec![vec![1; 32]]); + + push_transaction_to_bootloader_memory(&mut vm, &tx2, TxExecutionMode::VerifyExecute, None); + + let res2 = vm.execute_next_tx(u32::MAX, false).unwrap(); + + assert_eq!(res2.status, TxExecutionStatus::Failure); + + for factory_dep in tx2.execute.factory_deps.unwrap() { + let hash = hash_bytecode(&factory_dep); + let hash_to_u256 = h256_to_u256(hash); + assert!(known_bytecodes_without_aa_code(&vm) + .keys() + .contains(&hash_to_u256)); + assert!(!vm.get_used_contracts().contains(&hash_to_u256)); + } +} + +fn known_bytecodes_without_aa_code(vm: &VmInstance) -> HashMap> { + let mut known_bytecodes_without_aa_code = vm + .state + .decommittment_processor + .known_bytecodes + .inner() + .clone(); + + known_bytecodes_without_aa_code + .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) + .unwrap(); + + known_bytecodes_without_aa_code +} diff --git a/core/lib/vm/src/utils.rs b/core/lib/vm/src/utils.rs index 8d75aff725e0..eddd42135ad0 100644 --- a/core/lib/vm/src/utils.rs +++ b/core/lib/vm/src/utils.rs @@ -1,4 +1,8 @@ -use crate::{memory::SimpleMemory, vm_with_bootloader::BlockContext}; +use crate::history_recorder::HistoryMode; +use crate::{ + memory::SimpleMemory, oracles::tracer::PubdataSpentTracer, vm_with_bootloader::BlockContext, + VmInstance, +}; use once_cell::sync::Lazy; use zk_evm::block_properties::BlockProperties; @@ -12,7 +16,8 @@ use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; use zksync_state::secondary_storage::SecondaryStateStorage; use zksync_types::{ get_code_key, get_system_context_init_logs, system_contracts::get_system_smart_contracts, - Address, L1BatchNumber, StorageLog, StorageLogQuery, H160, H256, MAX_L2_TX_GAS_LIMIT, U256, + Address, L1BatchNumber, L2ChainId, StorageLog, StorageLogQuery, H160, MAX_L2_TX_GAS_LIMIT, + U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; @@ -52,8 +57,8 @@ pub const fn aux_heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 3) } -pub(crate) fn dump_memory_page_using_primitive_value( - memory: &SimpleMemory, +pub(crate) fn dump_memory_page_using_primitive_value( + memory: &SimpleMemory, ptr: PrimitiveValue, ) -> Vec { if !ptr.is_pointer { @@ -63,8 +68,8 @@ pub(crate) fn dump_memory_page_using_primitive_value( dump_memory_page_using_fat_pointer(memory, fat_ptr) } -pub(crate) fn dump_memory_page_using_fat_pointer( - memory: &SimpleMemory, +pub(crate) fn dump_memory_page_using_fat_pointer( + memory: &SimpleMemory, fat_ptr: FatPointer, ) -> Vec { dump_memory_page_by_offset_and_length( @@ -75,8 +80,8 @@ pub(crate) fn dump_memory_page_using_fat_pointer( ) } -pub(crate) fn dump_memory_page_by_offset_and_length( - memory: &SimpleMemory, +pub(crate) fn dump_memory_page_by_offset_and_length( + memory: &SimpleMemory, page: u32, offset: usize, length: usize, @@ -254,7 +259,7 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { pub fn insert_system_contracts(raw_storage: &mut SecondaryStateStorage) { let contracts = get_system_smart_contracts(); - let system_context_init_log = get_system_context_init_logs(H256::from_low_u64_be(270)); + let system_context_init_log = get_system_context_init_logs(L2ChainId(270)); let logs: Vec = contracts .iter() @@ -267,7 +272,6 @@ pub fn insert_system_contracts(raw_storage: &mut SecondaryStateStorage) { raw_storage.process_transaction_logs(&logs); for contract in contracts { - raw_storage.store_contract(*contract.account_id.address(), contract.bytecode.clone()); raw_storage.store_factory_dep(hash_bytecode(&contract.bytecode), contract.bytecode); } raw_storage.save(L1BatchNumber(0)) @@ -279,3 +283,26 @@ pub fn read_bootloader_test_code(test: &str) -> Vec { test, test )) } + +pub(crate) fn calculate_computational_gas_used, H: HistoryMode>( + vm: &VmInstance<'_, H>, + tracer: &T, + gas_remaining_before: u32, + spent_pubdata_counter_before: u32, +) -> u32 { + let total_gas_used = gas_remaining_before + .checked_sub(vm.gas_remaining()) + .expect("underflow"); + let gas_used_on_pubdata = + tracer.gas_spent_on_pubdata(&vm.state.local_state) - spent_pubdata_counter_before; + total_gas_used + .checked_sub(gas_used_on_pubdata) + .unwrap_or_else(|| { + vlog::error!( + "Gas used on pubdata is greater than total gas used. On pubdata: {}, total: {}", + gas_used_on_pubdata, + total_gas_used + ); + 0 + }) +} diff --git a/core/lib/vm/src/vm.rs b/core/lib/vm/src/vm.rs index 2abba2dd3215..a3ee48b9d314 100644 --- a/core/lib/vm/src/vm.rs +++ b/core/lib/vm/src/vm.rs @@ -9,7 +9,7 @@ use zk_evm::zkevm_opcode_defs::definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGIS use zksync_config::constants::MAX_TXS_IN_BLOCK; use zksync_types::l2_to_l1_log::L2ToL1Log; use zksync_types::tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}; -use zksync_types::vm_trace::VmExecutionTrace; +use zksync_types::vm_trace::{Call, VmExecutionTrace, VmTrace}; use zksync_types::{L1BatchNumber, StorageLogQuery, VmEvent, U256}; use zksync_utils::bytes_to_be_words; @@ -17,18 +17,21 @@ use crate::bootloader_state::BootloaderState; use crate::errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}; use crate::event_sink::InMemoryEventSink; use crate::events::merge_events; +use crate::history_recorder::{HistoryEnabled, HistoryMode}; use crate::memory::SimpleMemory; use crate::oracles::decommitter::DecommitterOracle; use crate::oracles::precompile::PrecompilesProcessorWithHistory; use crate::oracles::storage::StorageOracle; use crate::oracles::tracer::{ BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, PubdataSpentTracer, - TransactionResultTracer, ValidationError, ValidationTracer, ValidationTracerParams, + StorageInvocationTracer, TransactionResultTracer, ValidationError, ValidationTracer, + ValidationTracerParams, }; use crate::oracles::OracleWithHistory; use crate::utils::{ - collect_log_queries_after_timestamp, collect_storage_log_queries_after_timestamp, - dump_memory_page_using_primitive_value, precompile_calls_count_after_timestamp, + calculate_computational_gas_used, collect_log_queries_after_timestamp, + collect_storage_log_queries_after_timestamp, dump_memory_page_using_primitive_value, + precompile_calls_count_after_timestamp, }; use crate::vm_with_bootloader::{ BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, @@ -36,13 +39,13 @@ use crate::vm_with_bootloader::{ }; use crate::Word; -pub type ZkSyncVmState<'a> = VmState< +pub type ZkSyncVmState<'a, H> = VmState< 'a, - StorageOracle<'a>, - SimpleMemory, - InMemoryEventSink, - PrecompilesProcessorWithHistory, - DecommitterOracle<'a, false>, + StorageOracle<'a, H>, + SimpleMemory, + InMemoryEventSink, + PrecompilesProcessorWithHistory, + DecommitterOracle<'a, false, H>, DummyTracer, >; @@ -56,7 +59,7 @@ pub const VM_HOOK_POSITION: u32 = RESULT_SUCCESS_FIRST_SLOT - 1; pub const VM_HOOK_PARAMS_COUNT: u32 = 2; pub const VM_HOOK_PARAMS_START_POSITION: u32 = VM_HOOK_POSITION - VM_HOOK_PARAMS_COUNT; -pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Vec { +pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Vec { memory.dump_page_content_as_u256_words( BOOTLOADER_HEAP_PAGE, VM_HOOK_PARAMS_START_POSITION..VM_HOOK_PARAMS_START_POSITION + VM_HOOK_PARAMS_COUNT, @@ -64,9 +67,9 @@ pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Vec { } #[derive(Debug)] -pub struct VmInstance<'a> { +pub struct VmInstance<'a, H: HistoryMode> { pub gas_limit: u32, - pub state: ZkSyncVmState<'a>, + pub state: ZkSyncVmState<'a, H>, pub execution_mode: TxExecutionMode, pub block_context: DerivedBlockContext, pub(crate) bootloader_state: BootloaderState, @@ -99,9 +102,11 @@ pub struct VmExecutionResult { /// is executed, but it's not enforced. So best we can do is to calculate the amount of gas before and /// after the invocation, leaving the interpretation of this value to the user. pub gas_used: u32, + /// This value also depends on the context, the same as `gas_used`. + pub computational_gas_used: u32, pub contracts_used: usize, pub revert_reason: Option, - pub trace: VmExecutionTrace, + pub trace: VmTrace, pub total_log_queries: usize, pub cycles_used: u32, } @@ -128,12 +133,14 @@ pub struct VmPartialExecutionResult { pub revert_reason: Option, pub contracts_used: usize, pub cycles_used: u32, + pub computational_gas_used: u32, } #[derive(Debug, Clone, PartialEq)] pub struct VmTxExecutionResult { pub status: TxExecutionStatus, pub result: VmPartialExecutionResult, + pub call_traces: Vec, // Gas refunded to the user at the end of the transaction pub gas_refunded: u32, // Gas proposed by the operator to be refunded, before the postOp call. @@ -149,16 +156,7 @@ pub enum VmExecutionStopReason { use crate::utils::VmExecutionResult as NewVmExecutionResult; -fn vm_may_have_ended_inner( - vm: &VmState< - StorageOracle, - SimpleMemory, - InMemoryEventSink, - PrecompilesProcessorWithHistory, - DecommitterOracle, - DummyTracer, - >, -) -> Option { +fn vm_may_have_ended_inner(vm: &ZkSyncVmState) -> Option { let execution_has_ended = vm.execution_has_ended(); let r1 = vm.local_state.registers[RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER as usize]; @@ -195,10 +193,15 @@ fn vm_may_have_ended_inner( // // `gas_before` argument is used to calculate the amount of gas spent by transaction. // It is required because the same VM instance is continuously used to apply several transactions. -fn vm_may_have_ended(vm: &VmInstance, gas_before: u32) -> Option { +fn vm_may_have_ended( + vm: &VmInstance, + gas_before: u32, +) -> Option { let basic_execution_result = vm_may_have_ended_inner(&vm.state)?; - let gas_used = gas_before - vm.gas_remaining(); + let gas_used = gas_before + .checked_sub(vm.gas_remaining()) + .expect("underflow"); match basic_execution_result { NewVmExecutionResult::Ok(mut data) => { @@ -214,13 +217,15 @@ fn vm_may_have_ended(vm: &VmInstance, gas_before: u32) -> Option Option Option Option VmInstance<'a> { +impl VmInstance<'_, H> { fn has_ended(&self) -> bool { match vm_may_have_ended_inner(&self.state) { None | Some(NewVmExecutionResult::MostLikelyDidNotFinish(_, _)) => false, @@ -349,63 +358,6 @@ impl<'a> VmInstance<'a> { } } - /// Saves the snapshot of the current state of the VM that can be used - /// to roll back its state later on. - pub fn save_current_vm_as_snapshot(&mut self) { - self.snapshots.push(VmSnapshot { - // Vm local state contains O(1) various parameters (registers/etc). - // The only "expensive" copying here is copying of the callstack. - // It will take O(callstack_depth) to copy it. - // So it is generally recommended to get snapshots of the bootloader frame, - // where the depth is 1. - local_state: self.state.local_state.clone(), - bootloader_state: self.bootloader_state.clone(), - }); - } - - fn rollback_to_snapshot(&mut self, snapshot: VmSnapshot) { - let VmSnapshot { - local_state, - bootloader_state, - } = snapshot; - - let timestamp = Timestamp(local_state.timestamp); - - vlog::trace!("Rolling back decomitter"); - self.state - .decommittment_processor - .rollback_to_timestamp(timestamp); - - vlog::trace!("Rolling back event_sink"); - self.state.event_sink.rollback_to_timestamp(timestamp); - - vlog::trace!("Rolling back storage"); - self.state.storage.rollback_to_timestamp(timestamp); - - vlog::trace!("Rolling back memory"); - self.state.memory.rollback_to_timestamp(timestamp); - - vlog::trace!("Rolling back precompiles_processor"); - self.state - .precompiles_processor - .rollback_to_timestamp(timestamp); - self.state.local_state = local_state; - self.bootloader_state = bootloader_state; - } - - /// Rollbacks the state of the VM to the state of the latest snapshot. - pub fn rollback_to_latest_snapshot(&mut self) { - let snapshot = self.snapshots.last().cloned().unwrap(); - self.rollback_to_snapshot(snapshot); - } - - /// Rollbacks the state of the VM to the state of the latest snapshot. - /// Removes that snapshot from the list. - pub fn rollback_to_latest_snapshot_popping(&mut self) { - let snapshot = self.snapshots.pop().unwrap(); - self.rollback_to_snapshot(snapshot); - } - /// Removes the latest snapshot without rollbacking to it. /// This function expects that there is at least one snapshot present. pub fn pop_snapshot_no_rollback(&mut self) { @@ -431,7 +383,9 @@ impl<'a> VmInstance<'a> { /// its execution. Remaining gas value is read from the current stack frame, so if you'll attempt to /// read it during the transaction execution, you may receive invalid value. pub fn gas_consumed(&self) -> u32 { - self.gas_limit - self.gas_remaining() + self.gas_limit + .checked_sub(self.gas_remaining()) + .expect("underflow") } pub(crate) fn collect_events_and_l1_logs_after_timestamp( @@ -454,13 +408,7 @@ impl<'a> VmInstance<'a> { fn collect_execution_logs_after_timestamp(&self, from_timestamp: Timestamp) -> VmExecutionLogs { let storage_logs = collect_storage_log_queries_after_timestamp( - &self - .state - .storage - .frames_stack - .inner() - .current_frame() - .forward, + self.state.storage.frames_stack.forward().current_frame(), from_timestamp, ); let storage_logs_count = storage_logs.len(); @@ -469,13 +417,7 @@ impl<'a> VmInstance<'a> { self.collect_events_and_l1_logs_after_timestamp(from_timestamp); let log_queries = collect_log_queries_after_timestamp( - &self - .state - .event_sink - .frames_stack - .inner() - .current_frame() - .forward, + self.state.event_sink.frames_stack.forward().current_frame(), from_timestamp, ); @@ -493,9 +435,13 @@ impl<'a> VmInstance<'a> { } } - // Returns a tuple of `VmExecutionStopReason` and the size of the refund proposed by the operator + /// Executes VM until the end or tracer says to stop. + /// Returns a tuple of `VmExecutionStopReason` and the size of the refund proposed by the operator fn execute_with_custom_tracer_and_refunds< - T: ExecutionEndTracer + PendingRefundTracer + PubdataSpentTracer, + T: ExecutionEndTracer + + PendingRefundTracer + + PubdataSpentTracer + + StorageInvocationTracer, >( &mut self, tracer: &mut T, @@ -524,6 +470,8 @@ impl<'a> VmInstance<'a> { ); } + // This means that the bootloader has informed the system (usually via VMHooks) - that some gas + // should be refunded back (see askOperatorForRefund in bootloader.yul for details). if let Some(bootloader_refund) = tracer.requested_refund() { assert!( operator_refund.is_none(), @@ -557,10 +505,10 @@ impl<'a> VmInstance<'a> { self.state.memory.memory.write_to_memory( BOOTLOADER_HEAP_PAGE as usize, refund_slot, - Some(PrimitiveValue { + PrimitiveValue { value: refund_to_propose.into(), is_pointer: false, - }), + }, Timestamp(timestamp_before_cycle), ); operator_refund = Some(refund_to_propose); @@ -592,6 +540,15 @@ impl<'a> VmInstance<'a> { ); } + tracer.set_missed_storage_invocations( + self.state + .storage + .storage + .get_ptr() + .borrow() + .missed_storage_invocations(), + ); + if tracer.should_stop_execution() { return ( VmExecutionStopReason::TracerRequestedStop, @@ -603,7 +560,10 @@ impl<'a> VmInstance<'a> { // Executes VM until the end or tracer says to stop. pub(crate) fn execute_with_custom_tracer< - T: ExecutionEndTracer + PendingRefundTracer + PubdataSpentTracer, + T: ExecutionEndTracer + + PendingRefundTracer + + PubdataSpentTracer + + StorageInvocationTracer, >( &mut self, tracer: &mut T, @@ -611,6 +571,9 @@ impl<'a> VmInstance<'a> { self.execute_with_custom_tracer_and_refunds(tracer).0 } + /// Executes the VM until the end of the next transaction. + /// Panics if there are no new transactions in bootloader. + /// Internally uses the OneTxTracer to stop the VM when the last opcode from the transaction is reached. // Err when transaction is rejected. // Ok(status: TxExecutionStatus::Success) when the transaction succeeded // Ok(status: TxExecutionStatus::Failure) when the transaction failed. @@ -618,12 +581,17 @@ impl<'a> VmInstance<'a> { pub fn execute_next_tx( &mut self, validation_computational_gas_limit: u32, + with_call_tracer: bool, ) -> Result { let tx_index = self.bootloader_state.next_unexecuted_tx() as u32; - let mut tx_tracer = OneTxTracer::new(validation_computational_gas_limit); + + let mut tx_tracer: OneTxTracer = + OneTxTracer::new(validation_computational_gas_limit, with_call_tracer); let timestamp_initial = Timestamp(self.state.local_state.timestamp); let cycles_initial = self.state.local_state.monotonic_cycle_counter; + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; let (stop_reason, operator_suggested_refund) = self.execute_with_custom_tracer_and_refunds(&mut tx_tracer); @@ -651,6 +619,13 @@ impl<'a> VmInstance<'a> { let vm_execution_logs = self.collect_execution_logs_after_timestamp(timestamp_initial); + let computational_gas_used = calculate_computational_gas_used( + self, + &tx_tracer, + gas_remaining_before, + spent_pubdata_counter_before, + ); + Ok(VmTxExecutionResult { gas_refunded: tx_tracer.refund_gas, operator_suggested_refund, @@ -666,10 +641,12 @@ impl<'a> VmInstance<'a> { contracts_used: self .state .decommittment_processor - .get_decommitted_bytes_after_timestamp(timestamp_initial), + .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + computational_gas_used, }, + call_traces: tx_tracer.call_traces(), }) } else if tx_tracer.validation_run_out_of_gas() { Err(TxRevertReason::ValidationFailed(VmRevertReason::General { @@ -677,6 +654,7 @@ impl<'a> VmInstance<'a> { "Took too many computational gas, allowed limit: {}", validation_computational_gas_limit ), + data: vec![], })) } else { // VM ended up in state @@ -691,15 +669,43 @@ impl<'a> VmInstance<'a> { /// Returns full VM result and partial result produced within the current execution. pub fn execute_till_block_end(&mut self, job_type: BootloaderJobType) -> VmBlockResult { + self.execute_till_block_end_with_tracer( + job_type, + &mut TransactionResultTracer::new(self.execution_mode.invocation_limit(), false), + ) + } + + pub fn execute_till_block_end_with_call_tracer( + &mut self, + job_type: BootloaderJobType, + ) -> VmBlockResult { + let mut tracer = TransactionResultTracer::new(self.execution_mode.invocation_limit(), true); + let mut block_result = self.execute_till_block_end_with_tracer(job_type, &mut tracer); + block_result.full_result.trace = VmTrace::CallTrace(tracer.call_trace().unwrap()); + block_result + } + + fn execute_till_block_end_with_tracer( + &mut self, + job_type: BootloaderJobType, + tx_result_tracer: &mut TransactionResultTracer, + ) -> VmBlockResult { let timestamp_initial = Timestamp(self.state.local_state.timestamp); let cycles_initial = self.state.local_state.monotonic_cycle_counter; - let gas_before = self.gas_remaining(); + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; - let mut tx_result_tracer = TransactionResultTracer::default(); - let stop_reason = self.execute_with_custom_tracer(&mut tx_result_tracer); + let stop_reason = self.execute_with_custom_tracer(tx_result_tracer); match stop_reason { VmExecutionStopReason::VmFinished => { - let mut full_result = vm_may_have_ended(self, gas_before).unwrap(); + let mut full_result = vm_may_have_ended(self, gas_remaining_before).unwrap(); + + let computational_gas_used = calculate_computational_gas_used( + self, + tx_result_tracer, + gas_remaining_before, + spent_pubdata_counter_before, + ); if job_type == BootloaderJobType::TransactionExecution && tx_has_failed(&self.state, 0) @@ -707,6 +713,7 @@ impl<'a> VmInstance<'a> { { let revert_reason = tx_result_tracer .revert_reason + .clone() .map(|reason| { let vm_revert_reason = VmRevertReason::try_from(reason.as_slice()) .unwrap_or_else(|_| VmRevertReason::Unknown { @@ -723,6 +730,7 @@ impl<'a> VmInstance<'a> { revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { msg: "Transaction reverted with empty reason. Possibly out of gas" .to_string(), + data: vec![], }), original_data: vec![], }); @@ -736,8 +744,9 @@ impl<'a> VmInstance<'a> { contracts_used: self .state .decommittment_processor - .get_decommitted_bytes_after_timestamp(timestamp_initial), + .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + computational_gas_used, }; // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` @@ -750,13 +759,49 @@ impl<'a> VmInstance<'a> { }) .collect(); full_result.l2_to_l1_logs = l1_messages.into_iter().map(L2ToL1Log::from).collect(); + full_result.computational_gas_used = block_tip_result.computational_gas_used; VmBlockResult { full_result, block_tip_result, } } VmExecutionStopReason::TracerRequestedStop => { - unreachable!("NoopMemoryTracer will never stop execution until the block ends") + metrics::increment_counter!("runtime_context.execution.dropped"); + + if tx_result_tracer.is_limit_reached() { + VmBlockResult { + // Normally tracer should never stop, but if it's transaction call and it consumes + // too much requests to memory, we stop execution and return error. + full_result: VmExecutionResult { + events: vec![], + storage_log_queries: vec![], + used_contract_hashes: vec![], + l2_to_l1_logs: vec![], + return_data: vec![], + gas_used: 0, + computational_gas_used: 0, + contracts_used: 0, + revert_reason: Some(VmRevertReasonParsingResult { + revert_reason: TxRevertReason::MissingInvocationLimitReached, + original_data: vec![], + }), + trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), + total_log_queries: 0, + cycles_used: 0, + }, + block_tip_result: VmPartialExecutionResult { + logs: Default::default(), + revert_reason: Some(TxRevertReason::MissingInvocationLimitReached), + contracts_used: 0, + cycles_used: 0, + computational_gas_used: 0, + }, + } + } else { + unreachable!( + "Tracer should never stop execution, except MissingInvocationLimitReached" + ); + } } } } @@ -765,7 +810,9 @@ impl<'a> VmInstance<'a> { pub fn execute_block_tip(&mut self) -> VmPartialExecutionResult { let timestamp_initial = Timestamp(self.state.local_state.timestamp); let cycles_initial = self.state.local_state.monotonic_cycle_counter; - let mut bootloader_tracer = BootloaderTracer::default(); + let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; + let mut bootloader_tracer: BootloaderTracer = BootloaderTracer::default(); let stop_reason = self.execute_with_custom_tracer(&mut bootloader_tracer); let revert_reason = match stop_reason { @@ -785,14 +832,22 @@ impl<'a> VmInstance<'a> { None } }; + + let computational_gas_used = calculate_computational_gas_used( + self, + &bootloader_tracer, + gas_remaining_before, + spent_pubdata_counter_before, + ); VmPartialExecutionResult { logs: self.collect_execution_logs_after_timestamp(timestamp_initial), revert_reason, contracts_used: self .state .decommittment_processor - .get_decommitted_bytes_after_timestamp(timestamp_initial), + .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + computational_gas_used, } } @@ -800,7 +855,7 @@ impl<'a> VmInstance<'a> { &mut self, validation_params: ValidationTracerParams, ) -> Result<(), ValidationError> { - let mut validation_tracer = ValidationTracer::new( + let mut validation_tracer: ValidationTracer = ValidationTracer::new( self.state.storage.storage.inner().get_ptr(), validation_params, ); @@ -822,27 +877,24 @@ impl<'a> VmInstance<'a> { // returns Some only when there is just one frame in execution trace. fn get_final_log_queries(&self) -> Vec { assert_eq!( - self.state.storage.frames_stack.inner().len(), + self.state.storage.frames_stack.len(), 1, "VM finished execution in unexpected state" ); - let result = self - .state + self.state .storage .frames_stack - .inner() + .forward() .current_frame() - .forward - .clone(); - - result + .to_vec() } - fn get_used_contracts(&self) -> Vec { + /// Returns the keys of contracts that are already loaded (known) by bootloader. + pub(crate) fn get_used_contracts(&self) -> Vec { self.state .decommittment_processor - .known_bytecodes + .decommitted_code_hashes .inner() .keys() .cloned() @@ -860,13 +912,73 @@ impl<'a> VmInstance<'a> { } } +impl VmInstance<'_, HistoryEnabled> { + /// Saves the snapshot of the current state of the VM that can be used + /// to roll back its state later on. + pub fn save_current_vm_as_snapshot(&mut self) { + self.snapshots.push(VmSnapshot { + // Vm local state contains O(1) various parameters (registers/etc). + // The only "expensive" copying here is copying of the callstack. + // It will take O(callstack_depth) to copy it. + // So it is generally recommended to get snapshots of the bootloader frame, + // where the depth is 1. + local_state: self.state.local_state.clone(), + bootloader_state: self.bootloader_state.clone(), + }); + } + + fn rollback_to_snapshot(&mut self, snapshot: VmSnapshot) { + let VmSnapshot { + local_state, + bootloader_state, + } = snapshot; + + let timestamp = Timestamp(local_state.timestamp); + + vlog::trace!("Rolling back decomitter"); + self.state + .decommittment_processor + .rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back event_sink"); + self.state.event_sink.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back storage"); + self.state.storage.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back memory"); + self.state.memory.rollback_to_timestamp(timestamp); + + vlog::trace!("Rolling back precompiles_processor"); + self.state + .precompiles_processor + .rollback_to_timestamp(timestamp); + self.state.local_state = local_state; + self.bootloader_state = bootloader_state; + } + + /// Rollbacks the state of the VM to the state of the latest snapshot. + pub fn rollback_to_latest_snapshot(&mut self) { + let snapshot = self.snapshots.last().cloned().unwrap(); + self.rollback_to_snapshot(snapshot); + } + + /// Rollbacks the state of the VM to the state of the latest snapshot. + /// Removes that snapshot from the list. + pub fn rollback_to_latest_snapshot_popping(&mut self) { + let snapshot = self.snapshots.pop().unwrap(); + self.rollback_to_snapshot(snapshot); + } +} + // Reads the bootloader memory and checks whether the execution step of the transaction // has failed. -pub(crate) fn tx_has_failed(state: &ZkSyncVmState<'_>, tx_id: u32) -> bool { +pub(crate) fn tx_has_failed(state: &ZkSyncVmState<'_, H>, tx_id: u32) -> bool { let mem_slot = RESULT_SUCCESS_FIRST_SLOT + tx_id; let mem_value = state .memory - .dump_page_content_as_u256_words(BOOTLOADER_HEAP_PAGE, mem_slot..mem_slot + 1)[0]; + .read_slot(BOOTLOADER_HEAP_PAGE as usize, mem_slot as usize) + .value; mem_value == U256::zero() } diff --git a/core/lib/vm/src/vm_with_bootloader.rs b/core/lib/vm/src/vm_with_bootloader.rs index b30ae804a87a..aa8893ac3e2f 100644 --- a/core/lib/vm/src/vm_with_bootloader.rs +++ b/core/lib/vm/src/vm_with_bootloader.rs @@ -1,13 +1,13 @@ use std::{collections::HashMap, time::Instant}; use zk_evm::{ - abstractions::{MAX_HEAP_PAGE_SIZE_IN_WORDS, MAX_MEMORY_BYTES}, aux_structures::{MemoryPage, Timestamp}, block_properties::BlockProperties, vm_state::{CallStackEntry, PrimitiveValue, VmState}, zkevm_opcode_defs::{ - system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, FatPointer, BOOTLOADER_BASE_PAGE, - BOOTLOADER_CALLDATA_PAGE, STARTING_BASE_PAGE, STARTING_TIMESTAMP, + system_params::{BOOTLOADER_MAX_MEMORY, INITIAL_FRAME_FORMAL_EH_LOCATION}, + FatPointer, BOOTLOADER_BASE_PAGE, BOOTLOADER_CALLDATA_PAGE, STARTING_BASE_PAGE, + STARTING_TIMESTAMP, }, }; use zksync_config::constants::MAX_TXS_IN_BLOCK; @@ -16,6 +16,7 @@ use zksync_contracts::BaseSystemContracts; use zksync_types::{ zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, + USED_BOOTLOADER_MEMORY_WORDS, }; use zksync_utils::{ address_to_u256, @@ -24,9 +25,11 @@ use zksync_utils::{ misc::ceil_div, }; +use itertools::Itertools; + use crate::{ bootloader_state::BootloaderState, - oracles::OracleWithHistory, + history_recorder::HistoryMode, transaction_data::{TransactionData, L1_TX_TYPE}, utils::{ code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, @@ -83,7 +86,7 @@ pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { pub fn derive_base_fee_and_gas_per_pubdata(l1_gas_price: u64, fair_gas_price: u64) -> (u64, u64) { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); - // The baseFee is set in such a way that it is always possible to a transaciton to + // The baseFee is set in such a way that it is always possible for a transaction to // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_gas_price, @@ -148,7 +151,7 @@ pub const BOOTLOADER_TX_DESCRIPTION_OFFSET: usize = // The size of the bootloader memory dedicated to the encodings of transactions pub const BOOTLOADER_TX_ENCODING_SPACE: u32 = - (MAX_HEAP_PAGE_SIZE_IN_WORDS - TX_DESCRIPTION_OFFSET - MAX_TXS_IN_BLOCK) as u32; + (USED_BOOTLOADER_MEMORY_WORDS - TX_DESCRIPTION_OFFSET - MAX_TXS_IN_BLOCK) as u32; // Size of the bootloader tx description in words pub const BOOTLOADER_TX_DESCRIPTION_SIZE: usize = 2; @@ -175,8 +178,26 @@ const BOOTLOADER_CODE_PAGE: u32 = code_page_candidate_from_base(MemoryPage(INITI #[derive(Debug, Clone, Copy)] pub enum TxExecutionMode { VerifyExecute, - EstimateFee, - EthCall, + EstimateFee { + missed_storage_invocation_limit: usize, + }, + EthCall { + missed_storage_invocation_limit: usize, + }, +} + +impl TxExecutionMode { + pub fn invocation_limit(&self) -> usize { + match self { + Self::VerifyExecute => usize::MAX, + TxExecutionMode::EstimateFee { + missed_storage_invocation_limit, + } => *missed_storage_invocation_limit, + TxExecutionMode::EthCall { + missed_storage_invocation_limit, + } => *missed_storage_invocation_limit, + } + } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -191,13 +212,13 @@ impl Default for TxExecutionMode { } } -pub fn init_vm<'a>( - oracle_tools: &'a mut OracleTools<'a, false>, +pub fn init_vm<'a, H: HistoryMode>( + oracle_tools: &'a mut OracleTools<'a, false, H>, block_context: BlockContextMode, block_properties: &'a BlockProperties, execution_mode: TxExecutionMode, base_system_contract: &BaseSystemContracts, -) -> Box> { +) -> Box> { init_vm_with_gas_limit( oracle_tools, block_context, @@ -208,14 +229,14 @@ pub fn init_vm<'a>( ) } -pub fn init_vm_with_gas_limit<'a>( - oracle_tools: &'a mut OracleTools<'a, false>, +pub fn init_vm_with_gas_limit<'a, H: HistoryMode>( + oracle_tools: &'a mut OracleTools<'a, false, H>, block_context: BlockContextMode, block_properties: &'a BlockProperties, execution_mode: TxExecutionMode, base_system_contract: &BaseSystemContracts, gas_limit: u32, -) -> Box> { +) -> Box> { init_vm_inner( oracle_tools, block_context, @@ -303,14 +324,14 @@ impl BlockContextMode { // This method accepts a custom bootloader code. // It should be used only in tests. -pub fn init_vm_inner<'a>( - oracle_tools: &'a mut OracleTools<'a, false>, +pub fn init_vm_inner<'a, H: HistoryMode>( + oracle_tools: &'a mut OracleTools<'a, false, H>, block_context: BlockContextMode, block_properties: &'a BlockProperties, gas_limit: u32, base_system_contract: &BaseSystemContracts, execution_mode: TxExecutionMode, -) -> Box> { +) -> Box> { let start = Instant::now(); oracle_tools.decommittment_processor.populate( @@ -398,8 +419,8 @@ pub fn get_bootloader_memory( memory } -pub fn push_transaction_to_bootloader_memory( - vm: &mut VmInstance, +pub fn push_transaction_to_bootloader_memory( + vm: &mut VmInstance, tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, @@ -416,8 +437,8 @@ pub fn push_transaction_to_bootloader_memory( ); } -pub fn push_raw_transaction_to_bootloader_memory( - vm: &mut VmInstance, +pub fn push_raw_transaction_to_bootloader_memory( + vm: &mut VmInstance, tx: TransactionData, execution_mode: TxExecutionMode, predefined_overhead: u32, @@ -439,24 +460,26 @@ pub fn push_raw_transaction_to_bootloader_memory( return vec![]; } + // Deduplicate and filter factory deps preserving original order. tx.factory_deps .iter() - .filter_map(|bytecode| { - if vm - .state + .enumerate() + .sorted_by_key(|(_idx, dep)| *dep) + .dedup_by(|x, y| x.1 == y.1) + .filter(|(_idx, dep)| { + !vm.state .storage .storage .get_ptr() .borrow_mut() - .is_bytecode_known(&hash_bytecode(bytecode)) - { - return None; - } - - compress_bytecode(bytecode) + .is_bytecode_known(&hash_bytecode(dep)) + }) + .sorted_by_key(|(idx, _dep)| *idx) + .filter_map(|(_idx, dep)| { + compress_bytecode(dep) .ok() .map(|compressed| CompressedBytecodeInfo { - original: bytecode.clone(), + original: dep.clone(), compressed, }) }) @@ -594,11 +617,11 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( memory } -fn get_default_local_state<'a>( - tools: &'a mut OracleTools<'a, false>, +fn get_default_local_state<'a, H: HistoryMode>( + tools: &'a mut OracleTools<'a, false, H>, block_properties: &'a BlockProperties, gas_limit: u32, -) -> ZkSyncVmState<'a> { +) -> ZkSyncVmState<'a, H> { let mut vm = VmState::empty_state( &mut tools.storage, &mut tools.memory, @@ -608,6 +631,8 @@ fn get_default_local_state<'a>( &mut tools.witness_tracer, block_properties, ); + // Override ergs limit for the initial frame. + vm.local_state.callstack.current.ergs_remaining = gas_limit; let initial_context = CallStackEntry { this_address: BOOTLOADER_ADDRESS, @@ -619,8 +644,8 @@ fn get_default_local_state<'a>( pc: 0, // Note, that since the results are written at the end of the memory // it is needed to have the entire heap available from the beginning - heap_bound: MAX_MEMORY_BYTES as u32, - aux_heap_bound: MAX_MEMORY_BYTES as u32, + heap_bound: BOOTLOADER_MAX_MEMORY, + aux_heap_bound: BOOTLOADER_MAX_MEMORY, exception_handler_location: INITIAL_FRAME_FORMAL_EH_LOCATION, ergs_remaining: gas_limit, this_shard_id: 0, @@ -693,8 +718,8 @@ fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool) -> U256 { // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee => 0x00, - TxExecutionMode::EthCall => 0x02, + TxExecutionMode::EstimateFee { .. } => 0x00, + TxExecutionMode::EthCall { .. } => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/web3_decl/Cargo.toml b/core/lib/web3_decl/Cargo.toml index 260f8ebad6ad..22cf1689ea73 100644 --- a/core/lib/web3_decl/Cargo.toml +++ b/core/lib/web3_decl/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/web3_decl/src/error.rs b/core/lib/web3_decl/src/error.rs index 1395014939a7..8606650edd3c 100644 --- a/core/lib/web3_decl/src/error.rs +++ b/core/lib/web3_decl/src/error.rs @@ -17,8 +17,8 @@ pub enum Web3Error { NoSuchFunction, #[error("Invalid transaction data: {0}")] InvalidTransactionData(#[from] zksync_types::ethabi::Error), - #[error("Failed to submit transaction: {0}")] - SubmitTransactionError(String), + #[error("{0}")] + SubmitTransactionError(String, Vec), #[error("Failed to serialize transaction: {0}")] SerializationError(#[from] SerializationTransactionError), #[error("Invalid fee parameters: {0}")] diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 8fb2374f25f4..df96b52c6b8e 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -104,4 +104,10 @@ pub trait ZksNamespace { #[method(name = "getL1BatchDetails")] fn get_l1_batch_details(&self, batch: L1BatchNumber) -> RpcResult>; + + #[method(name = "getBytecodeByHash")] + fn get_bytecode_by_hash(&self, hash: H256) -> RpcResult>>; + + #[method(name = "getL1GasPrice")] + fn get_l1_gas_price(&self) -> RpcResult; } diff --git a/core/tests/cross_external_nodes_checker/Cargo.toml b/core/tests/cross_external_nodes_checker/Cargo.toml new file mode 100644 index 000000000000..c53f41771336 --- /dev/null +++ b/core/tests/cross_external_nodes_checker/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "cross_external_nodes_checker" +version = "1.0.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +publish = false # We don't want to publish our binaries. + +[dependencies] +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0" } +prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } +vlog = { path = "../../lib/vlog", version = "1.0" } +serde_json = "1.0" + +anyhow = "1.0" +tokio = { version = "1", features = ["time"] } +futures = { version = "0.3" } +envy = "0.4" +serde = { version = "1.0" } +ctrlc = { version = "3.1" } diff --git a/core/tests/cross_external_nodes_checker/README.md b/core/tests/cross_external_nodes_checker/README.md new file mode 100644 index 000000000000..7e65f10a487a --- /dev/null +++ b/core/tests/cross_external_nodes_checker/README.md @@ -0,0 +1,58 @@ +# zkSync Cross External Nodes Consistency Checker + +This tool is used to check the consistency of external node instances against the main node. + +## Running locally + +Currently, the URLs to the nodes are set in the main file, so ensure that you have the following consts set to the nodes +you want to check: + +```bash +EN_INSTANCES_URLS="http://127.0.0.1:3060" +MAIN_NODE_URL ="http://127.0.0.1:3050" +``` + +Run the server + +``` +zk init +zk server --components api,tree_lightweight,eth,data_fetcher,state_keeper +``` + +Run the EN + +``` +zk env ext-node +zk clean --database +zk db setup +zk external-node +``` + +Run integration tests to populate the main node with data. + +``` +zk test i server +``` + +Run the checker + +``` +cd core/tests/cross_external_nodes_checker +CHECKER_MODE={Continuous/Triggered} CHECKER_MAIN_NODE_URL={MAIN_NODE_URL} +CHECKER_INSTANCES_URLS={EN_INSTANCES_URLS} CHECKER_INSTANCE_POLL_PERIOD={POLL_PERIOD} +cargo run +``` + +Examples: + +``` +# Continuous Mode connecting to local main node, local EN, and stage EN. + CHECKER_MODE=Continuous CHECKER_MAIN_NODE_URL="http://127.0.0.1:3050" + CHECKER_INSTANCES_URLS="http://127.0.0.1:3060","https://external-node-dev.zksync.dev:443" + CHECKER_INSTANCE_POLL_PERIOD=10 RUST_LOG=cross_external_nodes_checker::checker=debug cargo run + +# Triggered Mode with start and finish miniblocks to check. + CHECKER_MODE=Triggered CHECKER_START_MINIBLOCK=0 CHECKER_FINISH_MINIBLOCK=10 + CHECKER_MAIN_NODE_URL="http://127.0.0.1:3050" CHECKER_INSTANCES_URLS="http://127.0.0.1:3060" + CHECKER_INSTANCE_POLL_PERIOD=10 RUST_LOG=info cargo run +``` diff --git a/core/tests/cross_external_nodes_checker/src/checker.rs b/core/tests/cross_external_nodes_checker/src/checker.rs new file mode 100644 index 000000000000..09dcc189051b --- /dev/null +++ b/core/tests/cross_external_nodes_checker/src/checker.rs @@ -0,0 +1,807 @@ +use crate::config::{CheckerConfig, Mode}; +use std::{collections::HashMap, fmt, fmt::Debug, time::Duration}; +use zksync_types::{ + api::BlockNumber, explorer_api::BlockDetails, web3::types::U64, MiniblockNumber, H256, +}; +use zksync_web3_decl::{ + jsonrpsee::{ + core::RpcResult, + http_client::{HttpClient, HttpClientBuilder}, + }, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, + types::FilterBuilder, +}; + +use crate::helpers::{compare_json, setup_sigint_handler, wait_for_tasks}; +use serde_json::Value; +use tokio::{sync::watch, time::sleep}; + +#[derive(Debug, Clone)] +pub struct Checker { + /// 'Triggered' to run once. 'Continuous' to run forever. + mode: Mode, + /// Client for interacting with the main node. + main_node_client: HttpClient, + /// Client for interacting with the instance nodes. + instance_clients: Vec, + /// Check all miniblocks starting from this. If 'None' then check from genesis. Inclusive. + start_miniblock: Option, + /// For Triggered mode. If 'None' then check all available miniblocks. Inclusive. + finish_miniblock: Option, + /// In seconds, how often to poll the instance node for new miniblocks. + instance_poll_period: u64, + /// Maps instance URL to a list of its divergences. + divergences: HashMap>, + /// How often should blocks logs be checked. + log_check_interval: u32, +} + +#[derive(Debug, Clone)] +struct InstanceClient { + pub url: String, + pub client: HttpClient, +} + +impl Checker { + pub fn new(config: CheckerConfig) -> Self { + let (main_node_client, instance_clients) = + Self::setup_clients(config.main_node_url, config.instances_urls); + + Self { + mode: config.mode, + main_node_client, + instance_clients, + start_miniblock: config.start_miniblock.map(|n| n.into()), + finish_miniblock: config.finish_miniblock.map(|n| n.into()), + instance_poll_period: config.instance_poll_period, + divergences: HashMap::new(), + log_check_interval: 1, + } + } + + // Set up clients for the main node and all EN instances we want to check. + fn setup_clients( + main_node_url: String, + instances_urls: Vec, + ) -> (HttpClient, Vec) { + let main_node_client = HttpClientBuilder::default() + .build(main_node_url) + .expect("Failed to create an HTTP client for the main node"); + + let mut instance_clients: Vec = Vec::new(); + for url in instances_urls { + let client = HttpClientBuilder::default() + .build(url.clone()) + .expect("Failed to create an HTTP client for an instance of the external node"); + instance_clients.push(InstanceClient { url, client }); + } + + (main_node_client, instance_clients) + } + + pub async fn run(mut self) -> RpcResult<()> { + match self.mode { + Mode::Triggered => { + vlog::info!("Starting Checker in Triggered mode"); + self.run_triggered().await?; + } + Mode::Continuous => { + vlog::info!("Starting Checker in Continuous mode"); + self.run_continuous().await?; + } + } + Ok(()) + } + + // For each instance, spawn a task that will continuously poll the instance for new miniblocks + // and compare them with corresponding main node miniblocks. + // + // Errors in task loops exist the loop, stop the tasks, and cause all other tasks to exit too. + async fn run_continuous(&mut self) -> RpcResult<()> { + let mut join_handles = Vec::new(); + + let sigint_receiver = setup_sigint_handler(); + let (stop_sender, stop_receiver) = watch::channel::(false); + + for instance in &self.instance_clients { + let main_node_client = self.main_node_client.clone(); + let instance_client = instance.clone(); + let task_stop_receiver = stop_receiver.clone(); + let mut checker = self.clone(); + + let handle = tokio::spawn(async move { + vlog::debug!("Started a task to check instance {}", instance_client.url); + if let Err(e) = checker.run_node_level_checkers(&instance_client).await { + vlog::error!("Error checking instance {}: {:?}", instance_client.url, e); + }; + let mut next_block_to_check = checker.start_miniblock.unwrap_or(MiniblockNumber(0)); + loop { + vlog::debug!( + "entered loop to check miniblock #({}) for instance: {}", + next_block_to_check, + instance_client.url + ); + + if *task_stop_receiver.borrow() { + break; + } + + let instance_miniblock = match instance_client + .client + .get_block_details(next_block_to_check) + .await + { + Ok(Some(miniblock)) => miniblock, + Ok(None) => { + vlog::debug!( + "No miniblock found for miniblock #({}). Sleeping for {} seconds", + next_block_to_check, + checker.instance_poll_period + ); + // The instance doesn't have a next block to check yet. For now, we wait until it does. + sleep(Duration::from_secs(checker.instance_poll_period)).await; + continue; + } + Err(e) => { + vlog::error!( + "Error getting miniblock #({}) from instance: {}: {:?}", + next_block_to_check, + instance_client.url, + e + ); + break; + } + }; + + let main_node_miniblock = match main_node_client + .get_block_details(next_block_to_check) + .await + { + Ok(Some(miniblock)) => miniblock, + Ok(None) => { + vlog::error!( + "Miniblock #({}), which exists in external node instance {}, was not found in the main node", + next_block_to_check, instance_client.url + ); + break; + } + Err(e) => { + vlog::error!("Error getting miniblock from main node while checking instance {}: {:?}", instance_client.url, e); + break; + } + }; + + let main_node_miniblock_txs = match checker + .create_tx_map(&main_node_client, main_node_miniblock.number) + .await + { + Ok(tx_map) => tx_map, + Err(e) => { + vlog::error!("Error creating tx map for main node miniblock while checking instance {}: {}", instance_client.url, e); + break; + } + }; + + match checker + .compare_miniblocks( + &instance_client, + &main_node_miniblock_txs, + &main_node_miniblock, + &instance_miniblock, + ) + .await + { + Ok(_) => { + vlog::debug!( + "successfully checked miniblock #({}) for instance: {}", + next_block_to_check, + instance_client.url + ); + next_block_to_check += 1; + } + Err(e) => { + vlog::error!( + "Error comparing miniblocks for instance {}: {:?}", + instance_client.url, + e + ); + } + } + } + }); + join_handles.push(handle); + } + + // Wait for either all tasks to finish or a stop signal. + tokio::select! { + _ = wait_for_tasks(join_handles) => {}, + _ = sigint_receiver => { + let _ = stop_sender.send(true); + vlog::info!("Stop signal received, shutting down"); + }, + } + + Ok(()) + } + + // Iterate through all miniblocks to be checked. For each, run the checkers through every given instance. + async fn run_triggered(&mut self) -> RpcResult<()> { + let start_miniblock = self.start_miniblock.unwrap_or(MiniblockNumber(0)); + let finish_miniblock = match self.finish_miniblock { + Some(finish_miniblock) => finish_miniblock, + None => { + let highest_main_node_miniblock = self.main_node_client.get_block_number().await?; + MiniblockNumber(highest_main_node_miniblock.as_u32()) + } + }; + + for instance_client in self.instance_clients.clone() { + self.run_node_level_checkers(&instance_client).await?; + } + + for miniblock_num_to_check in start_miniblock.0..=finish_miniblock.0 { + let main_node_miniblock = match self + .main_node_client + .get_block_details(MiniblockNumber(miniblock_num_to_check)) + .await + { + Ok(Some(miniblock)) => miniblock, + Ok(None) => panic!("No miniblock found for existing miniblock number {:?}", miniblock_num_to_check), + Err(e) => panic!("Couldn't fetch existing main node miniblock header for miniblock {:?} due to error: {:?}", miniblock_num_to_check, e), + }; + + let main_node_miniblock_txs = self + .create_tx_map(&self.main_node_client, main_node_miniblock.number) + .await?; + + for instance_client in self.instance_clients.clone() { + let instance_miniblock = match instance_client + .client + .get_block_details(MiniblockNumber(miniblock_num_to_check)) + .await? + { + Some(miniblock) => miniblock, + None => continue, + }; + + self.compare_miniblocks( + &instance_client, + &main_node_miniblock_txs, + &main_node_miniblock, + &instance_miniblock, + ) + .await?; + } + + vlog::info!( + "checked divergences for miniblock number {:?}", + miniblock_num_to_check, + ); + } + + self.log_divergences(); + + Ok(()) + } + + // Check divergences using all checkers for every given pair of miniblocks. + async fn compare_miniblocks( + &mut self, + instance_client: &InstanceClient, + main_node_tx_map: &HashMap, + main_node_miniblock: &BlockDetails, + instance_miniblock: &BlockDetails, + ) -> RpcResult<()> { + self.check_miniblock_details( + &instance_client.url, + main_node_miniblock, + instance_miniblock, + ) + .await; + + self.check_transactions(main_node_tx_map, instance_miniblock, instance_client) + .await?; + + self.check_logs(instance_client, main_node_miniblock.number) + .await?; + + Ok(()) + } + + // Run all the checkers that ought to be run once per instance (the non block-dependent checkers.) + async fn run_node_level_checkers(&mut self, instance_client: &InstanceClient) -> RpcResult<()> { + self.check_chain_id(instance_client).await?; + self.check_main_contract(instance_client).await?; + self.check_bridge_contracts(instance_client).await?; + self.check_l1_chain_id(instance_client).await?; + self.check_confirmed_tokens(instance_client).await?; + Ok(()) + } + + // Add a divergence in Triggered mode; log it in Continuous mode. + fn communicate_divergence(&mut self, url: &str, divergence: Divergence) { + match self.mode { + Mode::Triggered => { + // Add a divergence to the list of divergences for the given EN instance. + let divergences = self + .divergences + .entry(url.to_string()) + .or_insert_with(Vec::new); + divergences.push(divergence); + } + Mode::Continuous => { + // Simply log for now. + vlog::error!("{}", divergence); + } + } + } + + // Create a mapping from the tx hash to a json representation of the tx. + async fn create_tx_map( + &self, + client: &HttpClient, + miniblock_num: MiniblockNumber, + ) -> RpcResult> { + let txs = client.get_raw_block_transactions(miniblock_num).await?; + + let mut tx_map = HashMap::new(); + for tx in txs { + tx_map.insert( + tx.hash(), + serde_json::to_value(tx).expect("tx serialization fail"), + ); + } + + Ok(tx_map) + } + + fn log_divergences(&mut self) { + if self.divergences.is_empty() { + vlog::info!("No divergences found"); + return; + } + for (url, divergences) in &self.divergences { + vlog::warn!("Divergences found for URL: {}", url); + for divergence in divergences { + vlog::warn!("{}", divergence); + } + } + } +} + +// Separate impl for the checkers. +impl Checker { + async fn check_miniblock_details( + &mut self, + instance_url: &str, + main_node_miniblock: &BlockDetails, + instance_miniblock: &BlockDetails, + ) { + vlog::debug!( + "Checking miniblock details for miniblock #({})", + main_node_miniblock.number + ); + let receipt_differences = + compare_json(main_node_miniblock, instance_miniblock, "".to_string()); + for (key, (main_node_val, instance_val)) in receipt_differences { + self.communicate_divergence( + instance_url, + Divergence::MiniblockDetails(DivergenceDetails { + en_instance_url: instance_url.to_string(), + main_node_value: Some(format!("{}: {:?}", key, main_node_val)), + en_instance_value: Some(format!("{}: {:?}", key, instance_val)), + miniblock_number: main_node_miniblock.number, + }), + ); + } + } + + // Looks for txs existing in one node's miniblock and not the other, for + // discrepancies in the content of txs, and runs the individual transaction checkers. + async fn check_transactions( + &mut self, + main_node_tx_map: &HashMap, + instance_miniblock: &BlockDetails, + instance_client: &InstanceClient, + ) -> RpcResult<()> { + vlog::debug!( + "Checking transactions for miniblock {}", + instance_miniblock.number + ); + + let mut instance_tx_map = self + .create_tx_map(&instance_client.client, instance_miniblock.number) + .await?; + + for (tx_hash, main_node_tx) in main_node_tx_map { + match instance_tx_map.remove(tx_hash) { + Some(instance_tx) => { + if *main_node_tx != instance_tx { + let tx_differences = + compare_json(main_node_tx, &instance_tx, "".to_string()); + for (key, (main_node_val, instance_val)) in tx_differences { + self.communicate_divergence( + &instance_client.url, + Divergence::Transaction(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(format!("{}: {:?}", key, main_node_val)), + en_instance_value: Some(format!("{}: {:?}", key, instance_val)), + miniblock_number: instance_miniblock.number, + }), + ); + } + } else { + self.check_transaction_receipt( + instance_client, + tx_hash, + instance_miniblock.number, + ) + .await?; + + self.check_transaction_details( + instance_client, + tx_hash, + instance_miniblock.number, + ) + .await?; + } + } + None => { + self.communicate_divergence( + &instance_client.url, + Divergence::Transaction(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(tx_hash.to_string()), + en_instance_value: None, + miniblock_number: instance_miniblock.number, + }), + ); + vlog::debug!( + "Added divergence for a tx that is in main node but not in instance: {:?}", + tx_hash + ); + } + } + } + + // If there are txs left in the instance tx map, then they don't exist in the main node. + for tx_hash in instance_tx_map.keys() { + self.communicate_divergence( + &instance_client.url, + Divergence::Transaction(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: None, + en_instance_value: Some(tx_hash.to_string()), + miniblock_number: instance_miniblock.number, + }), + ); + vlog::debug!( + "Added divergence for a tx that is in instance but not in main node: {:?}", + tx_hash + ); + } + + Ok(()) + } + + async fn check_transaction_receipt( + &mut self, + instance_client: &InstanceClient, + tx_hash: &H256, + miniblock_number: MiniblockNumber, + ) -> RpcResult<()> { + vlog::debug!( + "Checking receipts for a tx in miniblock {}", + miniblock_number + ); + + let main_node_receipt = self + .main_node_client + .get_transaction_receipt(*tx_hash) + .await?; + let instance_receipt = instance_client + .client + .get_transaction_receipt(*tx_hash) + .await?; + + let receipt_differences = + compare_json(&main_node_receipt, &instance_receipt, "".to_string()); + for (key, (main_node_val, instance_val)) in receipt_differences { + self.communicate_divergence( + &instance_client.url, + Divergence::TransactionReceipt(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(format!("{}: {:?}", key, main_node_val)), + en_instance_value: Some(format!("{}: {:?}", key, instance_val)), + miniblock_number, + }), + ); + } + + Ok(()) + } + + async fn check_transaction_details( + &mut self, + instance_client: &InstanceClient, + tx_hash: &H256, + miniblock_number: MiniblockNumber, + ) -> RpcResult<()> { + vlog::debug!( + "Checking transaction details for a tx in miniblock {}", + miniblock_number + ); + + let main_node_tx_details = self + .main_node_client + .get_transaction_details(*tx_hash) + .await?; + let instance_tx_details = instance_client + .client + .get_transaction_details(*tx_hash) + .await?; + + let tx_details_differences = + compare_json(&main_node_tx_details, &instance_tx_details, "".to_string()); + for (key, (main_node_val, instance_val)) in tx_details_differences { + self.communicate_divergence( + &instance_client.url, + Divergence::TransactionDetails(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(format!("{}: {:?}", key, main_node_val)), + en_instance_value: Some(format!("{}: {:?}", key, instance_val)), + miniblock_number, + }), + ); + } + + Ok(()) + } + + async fn check_logs( + &mut self, + instance_client: &InstanceClient, + current_miniblock_block_num: MiniblockNumber, + ) -> RpcResult<()> { + let from_block = current_miniblock_block_num + .0 + .checked_sub(self.log_check_interval); + let to_block = current_miniblock_block_num.0; + + if from_block < Some(0) || to_block % self.log_check_interval != 0 { + vlog::debug!("Skipping log check for miniblock {}", to_block); + return Ok(()); + } + vlog::debug!( + "Checking logs for miniblocks {}-{}", + from_block.unwrap(), + to_block - 1 + ); + + let filter = FilterBuilder::default() + .set_from_block(BlockNumber::Number(U64::from(from_block.unwrap()))) + .set_to_block(BlockNumber::Number(U64::from(&to_block - 1))) + .build(); + + let main_node_logs = match self.main_node_client.get_logs(filter.clone()).await { + Ok(logs) => logs, + Err(e) => { + vlog::error!("Failed to get logs from main node: {}", e); + return Ok(()); + } + }; + let instance_logs = match instance_client.client.get_logs(filter).await { + Ok(logs) => logs, + Err(e) => { + vlog::error!("Failed to get logs from instance: {}", e); + return Ok(()); + } + }; + + for (main_node_log, instance_log) in main_node_logs.iter().zip(instance_logs.iter()) { + let log_differences = compare_json(&main_node_log, &instance_log, "".to_string()); + for (key, (main_node_val, instance_val)) in log_differences { + self.communicate_divergence( + &instance_client.url, + Divergence::Log(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(format!("{}: {:?}", key, main_node_val)), + en_instance_value: Some(format!("{}: {:?}", key, instance_val)), + miniblock_number: MiniblockNumber( + main_node_log.block_number.unwrap().as_u32(), + ), + }), + ); + } + } + + Ok(()) + } + + async fn check_main_contract(&mut self, instance_client: &InstanceClient) -> RpcResult<()> { + let main_node_main_contract = self.main_node_client.get_main_contract().await?; + let instance_main_contract = instance_client.client.get_main_contract().await?; + + let contract_differences = compare_json( + &main_node_main_contract, + &instance_main_contract, + "".to_string(), + ); + for (key, (main_node_val, instance_val)) in contract_differences { + self.communicate_divergence( + &instance_client.url, + Divergence::MainContracts(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(format!("{} {:?}", key, main_node_val)), + en_instance_value: Some(format!("{} {:?}", key, instance_val)), + miniblock_number: MiniblockNumber(0), + }), + ); + } + + Ok(()) + } + + async fn check_chain_id(&mut self, instance_client: &InstanceClient) -> RpcResult<()> { + let main_node_chain_id = self.main_node_client.chain_id().await?; + let instance_chain_id = instance_client.client.chain_id().await?; + + if main_node_chain_id != instance_chain_id { + self.communicate_divergence( + &instance_client.url, + Divergence::ChainID(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(main_node_chain_id), + en_instance_value: Some(instance_chain_id), + miniblock_number: MiniblockNumber(0), + }), + ); + } + + Ok(()) + } + + async fn check_l1_chain_id(&mut self, instance_client: &InstanceClient) -> RpcResult<()> { + let main_node_chain_id = self.main_node_client.l1_chain_id().await?; + let instance_chain_id = instance_client.client.l1_chain_id().await?; + + if main_node_chain_id != instance_chain_id { + self.communicate_divergence( + &instance_client.url, + Divergence::L1ChainID(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(main_node_chain_id), + en_instance_value: Some(instance_chain_id), + miniblock_number: MiniblockNumber(0), + }), + ); + } + + Ok(()) + } + + async fn check_bridge_contracts(&mut self, instance_client: &InstanceClient) -> RpcResult<()> { + let main_node_bridge_contracts = self.main_node_client.get_bridge_contracts().await?; + let instance_bridge_contracts = instance_client.client.get_bridge_contracts().await?; + + let receipt_differences = compare_json( + &main_node_bridge_contracts, + &instance_bridge_contracts, + "".to_string(), + ); + for (key, (main_node_val, instance_val)) in receipt_differences { + self.communicate_divergence( + &instance_client.url, + Divergence::BridgeContracts(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(format!("{}: {:?}", key, main_node_val)), + en_instance_value: Some(format!("{}: {:?}", key, instance_val)), + miniblock_number: MiniblockNumber(0), + }), + ); + } + + Ok(()) + } + + async fn check_confirmed_tokens(&mut self, instance_client: &InstanceClient) -> RpcResult<()> { + let main_node_confirmed_tokens = self + .main_node_client + .get_confirmed_tokens(0, u8::MAX) + .await?; + let instance_confirmed_tokens = instance_client + .client + .get_confirmed_tokens(0, u8::MAX) + .await?; + + let receipt_differences = compare_json( + &main_node_confirmed_tokens, + &instance_confirmed_tokens, + "".to_string(), + ); + for (key, (main_node_val, instance_val)) in receipt_differences { + self.communicate_divergence( + &instance_client.url, + Divergence::ConfirmedTokens(DivergenceDetails { + en_instance_url: instance_client.url.to_string(), + main_node_value: Some(format!("{}: {:?}", key, main_node_val)), + en_instance_value: Some(format!("{}: {:?}", key, instance_val)), + miniblock_number: MiniblockNumber(0), + }), + ); + } + + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub(crate) enum Divergence { + MiniblockDetails(DivergenceDetails>), + Transaction(DivergenceDetails>), + TransactionReceipt(DivergenceDetails>), + TransactionDetails(DivergenceDetails>), + Log(DivergenceDetails>), + MainContracts(DivergenceDetails>), + BridgeContracts(DivergenceDetails>), + ChainID(DivergenceDetails>), + L1ChainID(DivergenceDetails>), + ConfirmedTokens(DivergenceDetails>), +} + +impl fmt::Display for Divergence { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Divergence::MiniblockDetails(details) => { + write!(f, "Miniblock Details divergence found: {}", details) + } + Divergence::Transaction(details) => { + write!(f, "Transaction divergence found: {}", details) + } + Divergence::TransactionReceipt(details) => { + write!(f, "TransactionReceipt divergence found: {}", details) + } + Divergence::TransactionDetails(details) => { + write!(f, "TransactionDetails divergence found: {}", details) + } + Divergence::Log(details) => write!(f, "Log divergence found: {}", details), + Divergence::MainContracts(details) => { + write!(f, "MainContracts divergence found: {}", details) + } + Divergence::BridgeContracts(details) => { + write!(f, "BridgeContracts divergence found: {}", details) + } + Divergence::ChainID(details) => write!(f, "ChainID divergence found: {}", details), + Divergence::L1ChainID(details) => write!(f, "L1ChainID divergence found: {}", details), + Divergence::ConfirmedTokens(details) => { + write!(f, "ConfirmedTokens divergence found: {}", details) + } + } + } +} + +#[derive(Debug, Clone)] +pub(crate) struct DivergenceDetails { + en_instance_url: String, + main_node_value: T, + en_instance_value: T, + miniblock_number: MiniblockNumber, +} + +impl fmt::Display for DivergenceDetails> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let main_node_value = match &self.main_node_value { + Some(value) => format!("{}", value), + None => String::from("None"), + }; + let en_instance_value = match &self.en_instance_value { + Some(value) => format!("{}", value), + None => String::from("None"), + }; + write!( + f, + "Main node value: {}, EN instance value: {}, Miniblock number: {} in EN instance: {}", + main_node_value, en_instance_value, self.miniblock_number, self.en_instance_url + ) + } +} diff --git a/core/tests/cross_external_nodes_checker/src/config.rs b/core/tests/cross_external_nodes_checker/src/config.rs new file mode 100644 index 000000000000..465164169be5 --- /dev/null +++ b/core/tests/cross_external_nodes_checker/src/config.rs @@ -0,0 +1,73 @@ +use envy::prefixed; +use serde::Deserialize; + +#[derive(Debug, Deserialize, PartialEq)] +pub struct CheckerConfig { + pub mode: Mode, + pub start_miniblock: Option, + pub finish_miniblock: Option, + pub main_node_url: String, + pub instances_urls: Vec, + pub instance_poll_period: u64, +} + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub enum Mode { + Triggered, + Continuous, +} + +impl CheckerConfig { + pub fn from_env() -> Self { + prefixed("CHECKER_") + .from_env() + .unwrap_or_else(|err| panic!("Failed to load the checker config with error: {}", err)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::env; + + #[test] + fn success() { + let config = r#" + CHECKER_MODE="Continuous" + CHECKER_START_MINIBLOCK="2" + CHECKER_FINISH_MINIBLOCK="4" + CHECKER_MAIN_NODE_URL="http://127.0.0.1:1020" + CHECKER_INSTANCES_URLS="http://127.0.0.1:1030,http://127.0.0.1:1020" + CHECKER_INSTANCE_POLL_PERIOD="60" + "#; + + set_env(config); + + let actual = CheckerConfig::from_env(); + let want = CheckerConfig { + mode: Mode::Continuous, + start_miniblock: Some(2), + finish_miniblock: Some(4), + main_node_url: "http://127.0.0.1:1020".into(), + instances_urls: vec![ + "http://127.0.0.1:1030".into(), + "http://127.0.0.1:1020".into(), + ], + instance_poll_period: 60, + }; + assert_eq!(actual, want); + } + + pub fn set_env(fixture: &str) { + for line in fixture.split('\n').map(str::trim) { + if line.is_empty() { + continue; + } + let elements: Vec<_> = line.split('=').collect(); + let variable_name = elements[0]; + let variable_value = elements[1].trim_matches('"'); + + env::set_var(variable_name, variable_value); + } + } +} diff --git a/core/tests/cross_external_nodes_checker/src/helpers.rs b/core/tests/cross_external_nodes_checker/src/helpers.rs new file mode 100644 index 000000000000..1a03ad2d94f9 --- /dev/null +++ b/core/tests/cross_external_nodes_checker/src/helpers.rs @@ -0,0 +1,305 @@ +use futures::{channel::oneshot, future}; +use serde_json::{Map, Value}; +use std::collections::HashMap; +use tokio::task::JoinHandle; + +pub async fn wait_for_tasks(task_futures: Vec>) { + match future::select_all(task_futures).await.0 { + Ok(_) => { + vlog::info!("One of the instance loops unexpectedly finished its run"); + } + Err(error) => { + vlog::info!( + "One of the tokio threads unexpectedly finished with error: {:?}", + error + ); + } + } +} + +/// Sets up an interrupt handler and returns a future that resolves once an interrupt signal is received. +pub fn setup_sigint_handler() -> oneshot::Receiver<()> { + let (sigint_sender, sigint_receiver) = oneshot::channel(); + let mut sigint_sender = Some(sigint_sender); + ctrlc::set_handler(move || { + if let Some(sigint_sender) = sigint_sender.take() { + sigint_sender.send(()).ok(); + // ^ The send fails if `sigint_receiver` is dropped. We're OK with this, + // since at this point the node should be stopping anyway, or is not interested + // in listening to interrupt signals. + } + }) + .expect("Error setting Ctrl+C handler"); + + sigint_receiver +} + +pub fn compare_json( + a: &T, + b: &T, + path: String, +) -> HashMap, Option)> { + let a = serde_json::to_value(a).expect("serialization failure"); + let b = serde_json::to_value(b).expect("serialization failure"); + + if a == b { + return HashMap::new(); + } + + match (a, b) { + (Value::Object(ref a), Value::Object(ref b)) => compare_json_object(a, b, path), + (Value::Array(ref a), Value::Array(ref b)) => compare_json_array(a, b, path), + (a, b) => { + let mut res = HashMap::new(); + let a_val = if a.is_null() { None } else { Some(a) }; + let b_val = if b.is_null() { None } else { Some(b) }; + res.insert(path, (a_val, b_val)); + res + } + } +} + +fn compare_json_object( + a: &Map, + b: &Map, + path: String, +) -> HashMap, Option)> { + let mut differences = HashMap::new(); + + for (k, v) in a.iter() { + let new_path = if path.is_empty() { + k.clone() + } else { + format!("{}.{}", path, k) + }; + + differences.extend(compare_json(v, b.get(k).unwrap_or(&Value::Null), new_path)); + } + + for (k, v) in b.iter() { + if !a.contains_key(k) { + let new_path = if path.is_empty() { + k.clone() + } else { + format!("{}.{}", path, k) + }; + differences.insert(new_path, (None, Some(v.clone()))); + } + } + + differences +} + +fn compare_json_array( + a: &Vec, + b: &Vec, + path: String, +) -> HashMap, Option)> { + let mut differences = HashMap::new(); + + let len = a.len().max(b.len()); + for i in 0..len { + let new_path = format!("{}[{}]", path, i); + differences.extend(compare_json( + a.get(i).unwrap_or(&Value::Null), + b.get(i).unwrap_or(&Value::Null), + new_path, + )); + } + + differences +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_same_json() { + let json1 = json!({ + "key1": "value1", + "key2": 2, + "key3": [ + "value2", + "+value3" + ] + }); + + let differences = compare_json(&json1, &json1, "".to_string()); + assert_eq!(differences.len(), 0); + } + + #[test] + fn test_deeply_nested_objects() { + let a = json!({ + "key1": { + "subkey1": { + "subsubkey1": "value1", + "subsubkey2": "value2" + }, + "subkey2": "value3" + }, + "key2": "value4" + }); + + let b = json!({ + "key1": { + "subkey1": { + "subsubkey1": "value1", + "subsubkey2": "value5" + }, + "subkey2": "value6" + }, + "key2": "value4" + }); + + let differences = compare_json(&a, &b, "".to_string()); + + assert_eq!(differences.len(), 2); + assert_eq!( + differences.get("key1.subkey1.subsubkey2"), + Some(&(Some(json!("value2")), Some(json!("value5")))) + ); + assert_eq!( + differences.get("key1.subkey2"), + Some(&(Some(json!("value3")), Some(json!("value6")))) + ); + } + + #[test] + fn test_diff_different_keys() { + let a = json!({ + "key1": "value1", + "key2": "value2" + }); + + let b = json!({ + "key1": "value1", + "key3": "value3" + }); + + let differences = compare_json(&a, &b, "".to_string()); + + assert_eq!(differences.len(), 2); + assert_eq!( + differences.get("key2"), + Some(&(Some(json!("value2")), None)) + ); + assert_eq!( + differences.get("key3"), + Some(&(None, Some(json!("value3")))) + ); + } + + #[test] + fn test_diff_different_types() { + let a = json!({ + "key1": true, + "key2": 123, + "key3": "value1" + }); + + let b = json!({ + "key1": false, + "key2": "123", + "key3": "value2" + }); + + let differences = compare_json(&a, &b, "".to_string()); + + assert_eq!(differences.len(), 3); + assert_eq!( + differences.get("key1"), + Some(&(Some(json!(true)), Some(json!(false)))) + ); + assert_eq!( + differences.get("key2"), + Some(&(Some(json!(123)), Some(json!("123")))) + ); + assert_eq!( + differences.get("key3"), + Some(&(Some(json!("value1")), Some(json!("value2")))) + ); + } + + #[test] + fn test_empty_jsons() { + let json1 = json!({}); + let json2 = json!([]); + + let differences = compare_json(&json1, &json1, "".to_string()); + assert_eq!(differences.len(), 0); + + let differences = compare_json(&json2, &json2, "".to_string()); + assert_eq!(differences.len(), 0); + + let differences = compare_json(&json1, &json2, "".to_string()); + assert_eq!(differences.len(), 1); + } + + #[test] + fn test_one_empty_json() { + let json1 = json!({}); + let json2 = json!({ + "key1": "value1", + "key2": 2, + }); + + let differences = compare_json(&json1, &json2, "".to_string()); + assert_eq!(differences.len(), 2); + + let differences = compare_json(&json2, &json1, "".to_string()); + assert_eq!(differences.len(), 2); + } + + #[test] + fn test_json_with_null() { + let a = json!({ + "key1": null, + "key2": "value2" + }); + + let b = json!({ + "key1": "value1", + "key2": null + }); + + let differences = compare_json(&a, &b, "".to_string()); + + assert_eq!(differences.len(), 2); + assert_eq!( + differences.get("key1"), + Some(&(None, Some(json!("value1")))) + ); + assert_eq!( + differences.get("key2"), + Some(&(Some(json!("value2")), None)) + ); + } + + #[test] + fn test_arrays_different_lengths() { + let a = json!([1, 2, 3]); + let b = json!([1, 2, 3, 4]); + + let differences = compare_json(&a, &b, "".to_string()); + + assert_eq!(differences.len(), 1); + assert_eq!(differences.get("[3]"), Some(&(None, Some(json!(4))))); + } + + #[test] + fn test_arrays_with_nested_objects() { + let a = json!([{"key1": "value1"}, {"key2": "value2"}]); + let b = json!([{"key1": "value1"}, {"key2": "value3"}]); + + let differences = compare_json(&a, &b, "".to_string()); + + assert_eq!(differences.len(), 1); + assert_eq!( + differences.get("[1].key2"), + Some(&(Some(json!("value2")), Some(json!("value3")))) + ); + } +} diff --git a/core/tests/cross_external_nodes_checker/src/main.rs b/core/tests/cross_external_nodes_checker/src/main.rs new file mode 100644 index 000000000000..a053fbac946a --- /dev/null +++ b/core/tests/cross_external_nodes_checker/src/main.rs @@ -0,0 +1,29 @@ +extern crate core; + +mod checker; +mod config; +mod helpers; + +use crate::config::CheckerConfig; +use checker::Checker; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let _ = vlog::init(); + + vlog::info!("Started the Cross Node Checker"); + + let config = CheckerConfig::from_env(); + let cross_node_checker = Checker::new(config); + + match cross_node_checker.run().await { + Ok(()) => { + vlog::info!("Cross node checker finished with no error"); + } + Err(err) => { + vlog::error!("Unexpected error in the checker: {}", err); + } + } + + Ok(()) +} diff --git a/core/tests/loadnext/Cargo.toml b/core/tests/loadnext/Cargo.toml index cf15999277f9..095df32dc838 100644 --- a/core/tests/loadnext/Cargo.toml +++ b/core/tests/loadnext/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -19,6 +19,7 @@ zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0" } zksync_eth_client = { path = "../../lib/eth_client", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } +prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } async-trait = "0.1" serde = { version = "1.0", features = ["derive"] } @@ -36,3 +37,4 @@ once_cell = "1.7" thiserror = "1" reqwest = { version = "0.11", features = ["blocking", "json"] } regex = "1.7" +metrics = "0.20" diff --git a/core/tests/loadnext/src/account/mod.rs b/core/tests/loadnext/src/account/mod.rs index cafa38d10385..4cb89bb1d83b 100644 --- a/core/tests/loadnext/src/account/mod.rs +++ b/core/tests/loadnext/src/account/mod.rs @@ -143,7 +143,6 @@ impl AccountLifespan { }; self.execute_command(deploy_command.clone()).await; self.wait_for_all_inflight_tx().await; - let mut timer = tokio::time::interval(POLLING_INTERVAL); loop { let command = self.generate_command(); @@ -173,21 +172,45 @@ impl AccountLifespan { // If some txs haven't been processed yet, we'll check them in the next iteration. // Due to natural sleep for sending tx, usually more than 1 tx can be already // processed and have a receipt + let start = Instant::now(); + vlog::debug!( + "Account {:?}: check_inflight_txs len {:?}", + self.wallet.wallet.address(), + self.inflight_txs.len() + ); while let Some(tx) = self.inflight_txs.pop_front() { - if let Ok(Some(transaction_receipt)) = - self.get_tx_receipt_for_committed_block(tx.tx_hash).await - { - let label = self.verify_receipt( - &transaction_receipt, - &tx.command.modifier.expected_outcome(), - ); - self.report(label, tx.start.elapsed(), tx.attempt, tx.command) - .await; - } else { - self.inflight_txs.push_front(tx); - break; + let receipt = self.get_tx_receipt_for_committed_block(tx.tx_hash).await; + match receipt { + Ok(Some(transaction_receipt)) => { + let label = self.verify_receipt( + &transaction_receipt, + &tx.command.modifier.expected_outcome(), + ); + vlog::trace!( + "Account {:?}: check_inflight_txs tx is included after {:?} attempt {:?}", + self.wallet.wallet.address(), + tx.start.elapsed(), + tx.attempt + ); + self.report(label, tx.start.elapsed(), tx.attempt, tx.command) + .await; + } + other => { + vlog::debug!( + "Account {:?}: check_inflight_txs tx not yet included: {:?}", + self.wallet.wallet.address(), + other + ); + self.inflight_txs.push_front(tx); + break; + } } } + vlog::debug!( + "Account {:?}: check_inflight_txs complete {:?}", + self.wallet.wallet.address(), + start.elapsed() + ); } fn verify_receipt( @@ -240,6 +263,10 @@ impl AccountLifespan { Ok(result) => result, Err(ClientError::NetworkError(_)) | Err(ClientError::OperationTimeout) => { if attempt < MAX_RETRIES { + vlog::warn!( + "Error while sending tx: {}. Retrying...", + result.unwrap_err() + ); // Retry operation. attempt += 1; continue; @@ -269,7 +296,7 @@ impl AccountLifespan { self.successfully_sent_txs.write().await.push(tx_hash) } SubmitResult::ReportLabel(label) => { - // Make a report if it was some problems in sending tx + // Make a report if there was some problems sending tx self.report(label, start.elapsed(), attempt, command).await } }; @@ -357,7 +384,7 @@ impl AccountLifespan { err.message().to_string() } }; - if message.contains("nonce is incorrect") { + if message.contains("nonce") { self.reset_nonce().await; return Ok(SubmitResult::ReportLabel(ReportLabel::skipped(&message))); } diff --git a/core/tests/loadnext/src/account/tx_command_executor.rs b/core/tests/loadnext/src/account/tx_command_executor.rs index ca7fdd92c743..0944e5cc1aa6 100644 --- a/core/tests/loadnext/src/account/tx_command_executor.rs +++ b/core/tests/loadnext/src/account/tx_command_executor.rs @@ -1,3 +1,4 @@ +use std::time::Instant; use zksync::web3::ethabi; use zksync::EthNamespaceClient; use zksync::{ @@ -346,11 +347,23 @@ impl AccountLifespan { } ExecutionType::L2 => { + let mut started_at = Instant::now(); let tx = self .build_execute_loadnext_contract(command, contract_address) .await?; - - self.execute_submit(tx, command.modifier).await + vlog::trace!( + "Account {:?}: execute_loadnext_contract: tx built in {:?}", + self.wallet.wallet.address(), + started_at.elapsed() + ); + started_at = Instant::now(); + let result = self.execute_submit(tx, command.modifier).await; + vlog::trace!( + "Account {:?}: execute_loadnext_contract: tx executed in {:?}", + self.wallet.wallet.address(), + started_at.elapsed() + ); + result } } } diff --git a/core/tests/loadnext/src/config.rs b/core/tests/loadnext/src/config.rs index 4156ba6f6a45..9f15610dec32 100644 --- a/core/tests/loadnext/src/config.rs +++ b/core/tests/loadnext/src/config.rs @@ -129,6 +129,10 @@ pub struct LoadtestConfig { /// that should be compared to the actual result. #[serde(default = "default_expected_tx_count")] pub expected_tx_count: Option, + + /// Label to use for results pushed to Prometheus. + #[serde(default = "default_prometheus_label")] + pub prometheus_label: String, } fn default_max_inflight_txs() -> usize { @@ -236,7 +240,7 @@ pub fn get_default_l2_rpc_address() -> String { } fn default_l2_rpc_address() -> String { - // http://z2-dev-api.zksync.dev/ for stage2 + // https://z2-dev-api.zksync.dev:443 for stage2 let result = get_default_l2_rpc_address(); vlog::info!("Using default L2_RPC_ADDRESS: {}", result); result @@ -255,6 +259,12 @@ fn default_expected_tx_count() -> Option { result } +fn default_prometheus_label() -> String { + let result = "unset".to_string(); + vlog::info!("Using default PROMETHEUS_LABEL: {:?}", result); + result +} + impl LoadtestConfig { pub fn from_env() -> envy::Result { envy::from_env() diff --git a/core/tests/loadnext/src/constants.rs b/core/tests/loadnext/src/constants.rs index e4153d9bfb36..3f76cc52b275 100644 --- a/core/tests/loadnext/src/constants.rs +++ b/core/tests/loadnext/src/constants.rs @@ -16,7 +16,7 @@ pub const COMMIT_TIMEOUT: Duration = Duration::from_secs(600); /// every couple of seconds, chosen value seems to be adequate to provide the result in one or two calls at average. pub const POLLING_INTERVAL: Duration = Duration::from_secs(3); -pub const MAX_OUTSTANDING_NONCE: usize = 50; +pub const MAX_OUTSTANDING_NONCE: usize = 20; /// Each account continuously sends API requests in addition to transactions. Such requests are considered failed /// after this amount of time elapsed without any server response. diff --git a/core/tests/loadnext/src/executor.rs b/core/tests/loadnext/src/executor.rs index a8ce01f03ebd..b742f6498bc9 100644 --- a/core/tests/loadnext/src/executor.rs +++ b/core/tests/loadnext/src/executor.rs @@ -1,6 +1,7 @@ -use futures::{channel::mpsc, future::join_all}; +use futures::{channel::mpsc, future::join_all, SinkExt}; use std::ops::Add; use tokio::task::JoinHandle; +use zksync_eth_client::BoundEthInterface; use zksync_types::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; use zksync::ethereum::{PriorityOpHolder, DEFAULT_PRIORITY_FEE}; @@ -15,6 +16,7 @@ use zksync_eth_signer::PrivateKeySigner; use zksync_types::api::{BlockNumber, U64}; use zksync_types::{tokens::ETHEREUM_ADDRESS, Address, Nonce, U256}; +use crate::report::ReportBuilder; use crate::{ account::AccountLifespan, account_pool::AccountPool, @@ -327,11 +329,11 @@ impl Executor { // We request nonce each time, so that if one iteration was failed, it will be repeated on the next iteration. let mut nonce = Nonce(master_wallet.get_nonce().await?); - // 2 txs per account (1 ERC-20 & 1 ETH transfer) + 1 fee tx. let txs_amount = accounts_to_process * 2 + 1; - let mut handles = Vec::with_capacity(txs_amount); + let mut handles = Vec::with_capacity(accounts_to_process); - let mut eth_txs = Vec::with_capacity((txs_amount + 1) * 2); + // 2 txs per account (1 ERC-20 & 1 ETH transfer). + let mut eth_txs = Vec::with_capacity(txs_amount * 2); let mut eth_nonce = ethereum.client().pending_nonce("loadnext").await?; for account in self.pool.accounts.iter().take(accounts_to_process) { @@ -348,10 +350,13 @@ impl Executor { .client() .eth_balance(target_address, "loadnext") .await?; + let gas_price = ethereum.client().get_gas_price("loadnext").await?; if balance < eth_to_distribute { let options = Options { nonce: Some(eth_nonce), + max_fee_per_gas: Some(gas_price * 2), + max_priority_fee_per_gas: Some(gas_price * 2), ..Default::default() }; let res = ethereum @@ -374,6 +379,8 @@ impl Executor { if ethereum_erc20_balance < U256::from(l1_transfer_amount) { let options = Options { nonce: Some(eth_nonce), + max_fee_per_gas: Some(gas_price * 2), + max_priority_fee_per_gas: Some(gas_price * 2), ..Default::default() }; let res = ethereum @@ -469,9 +476,14 @@ impl Executor { const MAX_RETRIES: usize = 3; // Prepare channels for the report collector. - let (report_sender, report_receiver) = mpsc::channel(256); + let (mut report_sender, report_receiver) = mpsc::channel(256); - let report_collector = ReportCollector::new(report_receiver, self.config.expected_tx_count); + let report_collector = ReportCollector::new( + report_receiver, + self.config.expected_tx_count, + self.config.duration(), + self.config.prometheus_label.clone(), + ); let report_collector_future = tokio::spawn(report_collector.run()); let config = &self.config; @@ -495,7 +507,7 @@ impl Executor { } let accounts_left = accounts_amount - accounts_processed; - let max_accounts_per_iter = MAX_OUTSTANDING_NONCE / 2; // We send two transfers per account: ERC-20 and ETH. + let max_accounts_per_iter = MAX_OUTSTANDING_NONCE; let accounts_to_process = std::cmp::min(accounts_left, max_accounts_per_iter); if let Err(err) = self.send_initial_transfers_inner(accounts_to_process).await { @@ -507,6 +519,7 @@ impl Executor { continue; } + accounts_processed += accounts_to_process; vlog::info!( "[{}/{}] Accounts processed", accounts_processed, @@ -514,11 +527,13 @@ impl Executor { ); retry_counter = 0; - accounts_processed += accounts_to_process; let contract_execution_params = self.execution_config.contract_execution_params.clone(); // Spawn each account lifespan. let main_token = self.l2_main_token; + report_sender + .send(ReportBuilder::build_init_complete_report()) + .await?; let new_account_futures = self.pool .accounts diff --git a/core/tests/loadnext/src/main.rs b/core/tests/loadnext/src/main.rs index c13b8f54f1a0..8daebd08933a 100644 --- a/core/tests/loadnext/src/main.rs +++ b/core/tests/loadnext/src/main.rs @@ -10,6 +10,7 @@ use loadnext::{ executor::Executor, report_collector::LoadtestResult, }; +use std::time::Duration; #[tokio::main] async fn main() -> anyhow::Result<()> { @@ -18,6 +19,8 @@ async fn main() -> anyhow::Result<()> { let config = LoadtestConfig::from_env() .expect("Config parameters should be loaded from env or from default values"); let execution_config = ExecutionConfig::from_env(); + let prometheus_config = envy::prefixed("PROMETHEUS_").from_env().ok(); + TxType::initialize_weights(&execution_config.transaction_weights); ExplorerApiRequestType::initialize_weights(&execution_config.explorer_api_config_weights); @@ -31,10 +34,24 @@ async fn main() -> anyhow::Result<()> { execution_config.explorer_api_config_weights ); - let mut executor = Executor::new(config, execution_config).await?; - let final_resolution = executor.start().await; + let mut executor = Executor::new(config.clone(), execution_config).await?; - match final_resolution { + if let Some(prometheus_config) = prometheus_config { + vlog::info!( + "Starting prometheus exporter with config {:?}", + prometheus_config + ); + tokio::spawn(prometheus_exporter::run_prometheus_exporter( + prometheus_config, + true, + )); + } else { + vlog::info!("Starting without prometheus exporter"); + } + let result = executor.start().await; + vlog::info!("Waiting 5 seconds to make sure all the metrics are pushed to the push gateway"); + tokio::time::sleep(Duration::from_secs(5)).await; + match result { LoadtestResult::TestPassed => { vlog::info!("Test passed"); Ok(()) diff --git a/core/tests/loadnext/src/report.rs b/core/tests/loadnext/src/report.rs index 9aab28ed3b96..a9ab2d2e0cad 100644 --- a/core/tests/loadnext/src/report.rs +++ b/core/tests/loadnext/src/report.rs @@ -84,6 +84,16 @@ impl ReportBuilder { pub fn finish(self) -> Report { self.report } + + pub fn build_init_complete_report() -> Report { + Report { + reporter: Default::default(), + label: ReportLabel::done(), + action: ActionType::InitComplete, + retries: 0, + time: Default::default(), + } + } } /// Denotes the outcome of a performed action. @@ -180,6 +190,7 @@ impl From for ApiActionType { /// Generic wrapper of all the actions that can be done in loadtest. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum ActionType { + InitComplete, Tx(TxActionType), Api(ApiActionType), ExplorerApi(ExplorerApiRequestType), diff --git a/core/tests/loadnext/src/report_collector/mod.rs b/core/tests/loadnext/src/report_collector/mod.rs index 5285f51d0fd8..8ecfd244b42c 100644 --- a/core/tests/loadnext/src/report_collector/mod.rs +++ b/core/tests/loadnext/src/report_collector/mod.rs @@ -1,6 +1,8 @@ use futures::{channel::mpsc::Receiver, StreamExt}; use operation_results_collector::OperationResultsCollector; +use std::time::Duration; +use crate::report::ActionType; use crate::{ report::{Report, ReportLabel}, report_collector::metrics_collector::MetricsCollector, @@ -47,21 +49,36 @@ pub struct ReportCollector { metrics_collector: MetricsCollector, operations_results_collector: OperationResultsCollector, expected_tx_count: Option, + loadtest_duration: Duration, + prometheus_label: String, } impl ReportCollector { - pub fn new(reports_stream: Receiver, expected_tx_count: Option) -> Self { + pub fn new( + reports_stream: Receiver, + expected_tx_count: Option, + loadtest_duration: Duration, + prometheus_label: String, + ) -> Self { Self { reports_stream, metrics_collector: MetricsCollector::new(), - operations_results_collector: OperationResultsCollector::new(), + operations_results_collector: OperationResultsCollector::new(loadtest_duration), expected_tx_count, + loadtest_duration, + prometheus_label, } } pub async fn run(mut self) -> LoadtestResult { while let Some(report) = self.reports_stream.next().await { vlog::trace!("Report: {:?}", &report); + if matches!(&report.action, ActionType::InitComplete) { + self.metrics_collector = MetricsCollector::new(); + self.operations_results_collector = + OperationResultsCollector::new(self.loadtest_duration); + continue; + } if matches!(&report.label, ReportLabel::ActionDone) { // We only count successfully created statistics. @@ -81,6 +98,11 @@ impl ReportCollector { // All the receivers are gone, it's likely the end of the test. // Now we can output the statistics. self.metrics_collector.report(); + metrics::gauge!( + "loadtest.tps", + self.operations_results_collector.tps(), + "label" => self.prometheus_label.clone(), + ); self.operations_results_collector.report(); self.final_resolution() diff --git a/core/tests/loadnext/src/report_collector/operation_results_collector.rs b/core/tests/loadnext/src/report_collector/operation_results_collector.rs index 8d066005de4a..07d18954cebf 100644 --- a/core/tests/loadnext/src/report_collector/operation_results_collector.rs +++ b/core/tests/loadnext/src/report_collector/operation_results_collector.rs @@ -1,4 +1,5 @@ use crate::report::{ActionType, ReportLabel}; +use std::time::Duration; /// Collector that analyzes the outcomes of the performed operations. /// Currently it's solely capable of deciding whether test was failed or not. @@ -9,6 +10,7 @@ pub struct OperationResultsCollector { api_requests_results: ResultCollector, subscriptions_results: ResultCollector, explorer_api_requests_results: ResultCollector, + loadtest_duration: Duration, } #[derive(Debug, Clone, Default)] @@ -45,8 +47,11 @@ impl ResultCollector { } impl OperationResultsCollector { - pub fn new() -> Self { - Self::default() + pub fn new(loadtest_duration: Duration) -> Self { + Self { + loadtest_duration, + ..Default::default() + } } pub fn add_status(&mut self, status: &ReportLabel, action_type: ActionType) { @@ -55,12 +60,21 @@ impl OperationResultsCollector { ActionType::Api(_) => self.api_requests_results.add_status(status), ActionType::Subscription(_) => self.subscriptions_results.add_status(status), ActionType::ExplorerApi(_) => self.explorer_api_requests_results.add_status(status), + ActionType::InitComplete => {} } } + pub fn tps(&self) -> f64 { + self.tx_results.successes() as f64 / self.loadtest_duration.as_secs() as f64 + } pub fn report(&self) { vlog::info!( - "Loadtest status: {} successful operations, {} skipped, {} failures. {} actions total.", + "Ran loadtest for {:?}. TPS: {}", + self.loadtest_duration, + self.tps() + ); + vlog::info!( + "Transaction execution stats: {} successful, {} skipped, {} failures. {} actions total.", self.tx_results.successes(), self.tx_results.skipped(), self.tx_results.failures(), diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 304bf2257b3c..33ac6d67695f 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -6,8 +6,25 @@ import { expect } from 'chai'; // Parses output of "print-suggested-values" command of the revert block tool. function parseSuggestedValues(suggestedValuesString: string) { - let result = suggestedValuesString.match(/(?<=l1 batch number: |nonce: |priority fee: )[0-9]*/g)!; - return { lastL1BatchNumber: parseInt(result[0]), nonce: parseInt(result[1]), priorityFee: parseInt(result[2]) }; + const json = JSON.parse(suggestedValuesString); + if (!json || typeof json !== 'object') { + throw new TypeError('suggested values are not an object'); + } + + const lastL1BatchNumber = json.last_executed_l1_batch_number; + if (!Number.isInteger(lastL1BatchNumber)) { + throw new TypeError('suggested `lastL1BatchNumber` is not an integer'); + } + const nonce = json.nonce; + if (!Number.isInteger(nonce)) { + throw new TypeError('suggested `nonce` is not an integer'); + } + const priorityFee = json.priority_fee; + if (!Number.isInteger(priorityFee)) { + throw new TypeError('suggested `priorityFee` is not an integer'); + } + + return { lastL1BatchNumber, nonce, priorityFee }; } async function killServerAndWaitForShutdown(tester: Tester) { @@ -28,6 +45,11 @@ async function killServerAndWaitForShutdown(tester: Tester) { throw new Error("Server didn't stop after a kill request"); } +function ignoreError(err: any, context?: string) { + const message = context ? `Error ignored (context: ${context}).` : 'Error ignored.'; + console.info(message, err); +} + const depositAmount = ethers.utils.parseEther('0.001'); describe('Block reverting test', function () { @@ -43,21 +65,21 @@ describe('Block reverting test', function () { step('run server and execute some transactions', async () => { // Make sure server isn't running. - try { - await killServerAndWaitForShutdown(tester); - } catch (_) {} + await killServerAndWaitForShutdown(tester).catch(ignoreError); // Set 1000 seconds deadline for `ExecuteBlocks` operation. process.env.CHAIN_STATE_KEEPER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1000'; // Run server in background. - utils.background(`zk server --components api,tree,tree_lightweight,eth,data_fetcher,state_keeper`); + const components = 'api,tree,tree_lightweight,tree_lightweight_new,eth,data_fetcher,state_keeper'; + utils.background(`zk server --components ${components}`); // Server may need some time to recompile if it's a cold run, so wait for it. let iter = 0; while (iter < 30 && !mainContract) { try { mainContract = await tester.syncWallet.getMainContract(); - } catch (_) { + } catch (err) { + ignoreError(err, 'waiting for server HTTP JSON-RPC to start'); await utils.sleep(5); iter += 1; } @@ -112,10 +134,13 @@ describe('Block reverting test', function () { }); step('revert blocks', async () => { - let suggestedValuesOutput = ( - await utils.exec(`cd $ZKSYNC_HOME && cargo run --bin block_reverter --release -- print-suggested-values`) - ).stdout; - let { lastL1BatchNumber, nonce, priorityFee } = parseSuggestedValues(suggestedValuesOutput); + const executedProcess = await utils.exec( + 'cd $ZKSYNC_HOME && ' + + 'RUST_LOG=off cargo run --bin block_reverter --release -- print-suggested-values --json' + // ^ Switch off logs to not pollute the output JSON + ); + const suggestedValuesOutput = executedProcess.stdout; + const { lastL1BatchNumber, nonce, priorityFee } = parseSuggestedValues(suggestedValuesOutput); expect(lastL1BatchNumber < blocksCommittedBeforeRevert, 'There should be at least one block for revert').to.be .true; @@ -142,7 +167,7 @@ describe('Block reverting test', function () { process.env.CHAIN_STATE_KEEPER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1'; // Run server. - utils.background(`zk server --components api,tree,tree_lightweight,eth,data_fetcher,state_keeper`); + utils.background(`zk server --components api,tree_new,tree_lightweight,eth,data_fetcher,state_keeper`); await utils.sleep(10); const balanceBefore = await alice.getBalance(); @@ -170,7 +195,7 @@ describe('Block reverting test', function () { await killServerAndWaitForShutdown(tester); // Run again. - utils.background(`zk server --components=api,tree,tree_lightweight,eth,data_fetcher,state_keeper`); + utils.background(`zk server --components=api,tree_new,tree_lightweight,eth,data_fetcher,state_keeper`); await utils.sleep(10); // Trying to send a transaction from the same address again @@ -178,9 +203,7 @@ describe('Block reverting test', function () { }); after('Try killing server', async () => { - try { - await utils.exec('pkill zksync_server'); - } catch (_) {} + await utils.exec('pkill zksync_server').catch(ignoreError); }); }); diff --git a/core/tests/revert-test/tests/tester.ts b/core/tests/revert-test/tests/tester.ts index ce1581212a68..edbffeeda9bd 100644 --- a/core/tests/revert-test/tests/tester.ts +++ b/core/tests/revert-test/tests/tester.ts @@ -37,7 +37,7 @@ export class Tester { ethWallet = new ethers.Wallet(process.env.MASTER_WALLET_PK!); } ethWallet = ethWallet.connect(ethProvider); - const web3Provider = new zkweb3.Provider(process.env.ZKSYNC_WEB3_API_URL || "http://localhost:3050"); + const web3Provider = new zkweb3.Provider(process.env.ZKSYNC_WEB3_API_URL || "http://127.0.0.1:3050"); web3Provider.pollingInterval = 100; // It's OK to keep it low even on stage. const syncWallet = new zkweb3.Wallet(ethWallet.privateKey, web3Provider, ethProvider); diff --git a/core/tests/test_account/Cargo.toml b/core/tests/test_account/Cargo.toml index c25ca022ec65..78a1a2e08a86 100644 --- a/core/tests/test_account/Cargo.toml +++ b/core/tests/test_account/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index a4087b24eb32..d75a2c030dd3 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -75,7 +75,7 @@ impl ZkSyncAccount { _nonce: Option, _increment_nonce: bool, ) -> L2Tx { - todo!("New withdrawal support is not yet implemented") + todo!("TODO") // let mut stored_nonce = self.nonce.lock().unwrap(); // let withdraw = GenericL2Tx::::new_signed( diff --git a/core/tests/testkit/Cargo.toml b/core/tests/testkit/Cargo.toml index 0aefc2b773e5..710533aeb597 100644 --- a/core/tests/testkit/Cargo.toml +++ b/core/tests/testkit/Cargo.toml @@ -4,7 +4,7 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" +repository = "https://github.com/matter-labs/zksync-2" license = "Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/tests/testkit/src/commands/gas_price/mod.rs b/core/tests/testkit/src/commands/gas_price/mod.rs index d32f4f4cfd6a..43e305d9cb7c 100644 --- a/core/tests/testkit/src/commands/gas_price/mod.rs +++ b/core/tests/testkit/src/commands/gas_price/mod.rs @@ -11,7 +11,8 @@ use std::time::Instant; use rand::thread_rng; -use zksync_core::genesis::ensure_genesis_state; +use zksync_core::genesis::{ensure_genesis_state, GenesisParams}; +use zksync_types::L2ChainId; use crate::commands::gas_price::utils::{ commit_cost_of_add_tokens, commit_cost_of_deploys, commit_cost_of_deposits, @@ -36,7 +37,14 @@ pub async fn test_gas_price() { let test_db_manager = TestDatabaseManager::new().await; let mut storage = test_db_manager.connect_to_postgres().await; { - ensure_genesis_state(&mut storage, &config).await; + ensure_genesis_state( + &mut storage, + L2ChainId(config.chain.eth.zksync_network_id), + GenesisParams::MainNode { + first_validator: config.eth_sender.sender.operator_commit_eth_addr, + }, + ) + .await; } println!("deploying contracts"); diff --git a/core/tests/testkit/src/commands/gas_price/utils.rs b/core/tests/testkit/src/commands/gas_price/utils.rs index 889b10859425..07538544c333 100644 --- a/core/tests/testkit/src/commands/gas_price/utils.rs +++ b/core/tests/testkit/src/commands/gas_price/utils.rs @@ -262,7 +262,7 @@ pub async fn commit_cost_of_deploys( bytecode.clone(), constructor_calldata.clone(), rng.gen_biguint_range(&BigUint::from(u32::MAX / 2), &BigUint::from(u32::MAX)), - n_accounts.into(), + n_accounts.into(), // TODO: May be incorrect, needs to be checked. ) .await; } @@ -292,7 +292,7 @@ pub async fn commit_cost_of_executes( bytecode.clone(), constructor_calldata.clone(), rng.gen_biguint_range(&BigUint::from(u32::MAX / 2), &BigUint::from(u32::MAX)), - 0.into(), + 0.into(), // TODO: May be incorrect, needs to be checked. ) .await; process_blocks(tester, 1).await; diff --git a/core/tests/testkit/src/commands/revert_block.rs b/core/tests/testkit/src/commands/revert_block.rs index 32cd06496d41..f2241d04cef6 100644 --- a/core/tests/testkit/src/commands/revert_block.rs +++ b/core/tests/testkit/src/commands/revert_block.rs @@ -1,7 +1,8 @@ use num::BigUint; use std::time::Instant; -use zksync_core::genesis::ensure_genesis_state; +use zksync_core::genesis::{ensure_genesis_state, GenesisParams}; +use zksync_types::L2ChainId; use crate::commands::utils::{ create_first_block, create_test_accounts, get_root_hashes, get_test_config, @@ -16,7 +17,14 @@ pub async fn test_revert_blocks() { let test_db_manager = TestDatabaseManager::new().await; let db = test_db_manager.get_db(); - ensure_genesis_state(db.clone(), &config); + ensure_genesis_state( + &mut storage, + L2ChainId(config.chain.eth.zksync_network_id), + GenesisParams::MainNode { + first_validator: config.eth_sender.sender.operator_commit_eth_addr, + }, + ) + .await; println!("deploying contracts"); let deploy_timer = Instant::now(); diff --git a/core/tests/testkit/src/commands/upgrade_contract.rs b/core/tests/testkit/src/commands/upgrade_contract.rs index c40faf8aa29d..e4eb378be030 100644 --- a/core/tests/testkit/src/commands/upgrade_contract.rs +++ b/core/tests/testkit/src/commands/upgrade_contract.rs @@ -1,7 +1,8 @@ use num::BigUint; use std::time::Instant; -use zksync_core::genesis::ensure_genesis_state; +use zksync_core::genesis::{ensure_genesis_state, GenesisParams}; +use zksync_types::L2ChainId; use crate::commands::utils::{ create_first_block, create_test_accounts, get_root_hashes, get_test_config, @@ -17,7 +18,14 @@ pub async fn test_upgrade_contract() { let test_db_manager = TestDatabaseManager::new().await; let db = test_db_manager.get_db(); - ensure_genesis_state(db.clone(), &config); + ensure_genesis_state( + &mut storage, + L2ChainId(config.chain.eth.zksync_network_id), + GenesisParams::MainNode { + first_validator: config.eth_sender.sender.operator_commit_eth_addr, + }, + ) + .await; println!("deploying contracts"); let deploy_timer = Instant::now(); diff --git a/core/tests/testkit/src/commands/utils.rs b/core/tests/testkit/src/commands/utils.rs index 48d4bf4dd8ea..7a32d40d3a24 100644 --- a/core/tests/testkit/src/commands/utils.rs +++ b/core/tests/testkit/src/commands/utils.rs @@ -11,6 +11,7 @@ use crate::utils::load_test_bytecode_and_calldata; use zksync_config::ZkSyncConfig; use zksync_contracts::zksync_contract; use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_eth_client::EthInterface; use zksync_storage::db::Database; use zksync_types::{l1::L1Tx, tokens::ETHEREUM_ADDRESS, Address, L1BatchNumber, H256}; @@ -65,7 +66,7 @@ impl TestDatabaseManager { pub async fn connect_to_postgres(&self) -> StorageProcessor<'static> { // This method is currently redundant, but it was created so that if some tweaks would be required, // it'll be easier to introduce them through `TestDatabaseManager`. - StorageProcessor::establish_connection(true).await + StorageProcessor::establish_connection_blocking(true) } pub fn create_pool(&self) -> ConnectionPool { @@ -289,7 +290,7 @@ pub async fn perform_transactions( bytecode, constructor_calldata, fee_amount.clone(), - 0.into(), + 0.into(), // TODO: May be incorrect, needs to be checked. ) .await; println!("Deploy contract test success"); diff --git a/core/tests/testkit/src/eth_provider.rs b/core/tests/testkit/src/eth_provider.rs index b0d2b4a527b1..7d59e9bfd773 100644 --- a/core/tests/testkit/src/eth_provider.rs +++ b/core/tests/testkit/src/eth_provider.rs @@ -1,6 +1,6 @@ use anyhow::format_err; use num::BigUint; -use zksync_eth_client::clients::http_client::{Error, EthInterface}; +use zksync_eth_client::{types::Error, BoundEthInterface, EthInterface}; use zksync_types::ethabi; use zksync_types::web3::{ contract::{tokens::Tokenize, Options}, @@ -10,7 +10,7 @@ use zksync_types::web3::{ use zksync_types::L1ChainId; use zksync_contracts::{erc20_contract, zksync_contract}; -use zksync_eth_client::ETHDirectClient; +use zksync_eth_client::clients::http::SigningClient; use zksync_eth_signer::PrivateKeySigner; use zksync_types::aggregated_operations::{ BlocksCommitOperation, BlocksExecuteOperation, BlocksProofOperation, @@ -27,7 +27,7 @@ const DEFAULT_PRIORITY_FEE: usize = 5; // 5 wei, doesn't really matter /// Used to sign and post ETH transactions for the zkSync contracts. #[derive(Debug, Clone)] pub struct EthereumProvider { - pub main_contract_eth_client: ETHDirectClient, + pub main_contract_eth_client: SigningClient, pub erc20_abi: ethabi::Contract, pub address: Address, } @@ -44,7 +44,7 @@ impl EthereumProvider { .expect("failed get address from private key"); let eth_signer = PrivateKeySigner::new(private_key); - let main_contract_eth_client = ETHDirectClient::new( + let main_contract_eth_client = SigningClient::new( transport, zksync_contract(), address, @@ -204,6 +204,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + // TODO: Add interaction with the default bridge to testkit. todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( @@ -252,6 +253,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + // TODO: Add interaction with the default bridge to testkit. todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( @@ -295,6 +297,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + // TODO: Add interaction with the default bridge to testkit. todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( @@ -343,6 +346,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + // TODO: Add interaction with the default bridge to testkit. todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( @@ -446,6 +450,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + // TODO: Add interaction with the default bridge to testkit. todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( @@ -587,7 +592,7 @@ pub struct EthExecResult { impl EthExecResult { pub async fn new( receipt: TransactionReceipt, - client: ÐDirectClient, + client: &SigningClient, ) -> Self { let revert_reason = if receipt.status == Some(U64::from(1)) { None @@ -641,7 +646,7 @@ impl EthExecResult { } async fn send_raw_tx_wait_confirmation( - client: ÐDirectClient, + client: &SigningClient, raw_tx: Vec, ) -> Result { let tx_hash = client diff --git a/core/tests/testkit/src/tester.rs b/core/tests/testkit/src/tester.rs index 3e1f2ca090fc..f341f9f8067d 100644 --- a/core/tests/testkit/src/tester.rs +++ b/core/tests/testkit/src/tester.rs @@ -387,6 +387,10 @@ impl Tester { ); } + // TODO: Currently this method relies on our ability to deterministically generate the contract address. + // Right now, it can only be done if we know the deployment nonce: amount of contracts deployed from the address. + // In the future, it may change, so temporarily we require caller to explicitly tell how many contracts were deployed + // from this address (including ones that used `CREATE`/`CREATE2`). pub async fn deploy_contract( &mut self, from: &AccountHandler, diff --git a/core/tests/testkit/src/utils.rs b/core/tests/testkit/src/utils.rs index ad407218df3a..a29ba7cd9ded 100644 --- a/core/tests/testkit/src/utils.rs +++ b/core/tests/testkit/src/utils.rs @@ -7,7 +7,7 @@ use std::path::PathBuf; use zksync_utils::parse_env; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage}; -use zksync_eth_client::ETHDirectClient; +use zksync_eth_client::{clients::http::SigningClient, EthInterface}; use zksync_eth_signer::PrivateKeySigner; use zksync_types::{l1::L1Tx, web3::types::TransactionReceipt, Address}; use zksync_utils::u256_to_biguint; @@ -50,7 +50,7 @@ pub fn is_token_eth(token_address: Address) -> bool { /// Get fee paid in wei for tx execution pub async fn get_executed_tx_fee( - client: ÐDirectClient, + client: &SigningClient, receipt: &TransactionReceipt, ) -> anyhow::Result { let gas_used = receipt.gas_used.ok_or_else(|| { diff --git a/core/tests/ts-integration/contracts/counter/counter.sol b/core/tests/ts-integration/contracts/counter/counter.sol index 841e4caa7d7e..f286339701be 100644 --- a/core/tests/ts-integration/contracts/counter/counter.sol +++ b/core/tests/ts-integration/contracts/counter/counter.sol @@ -8,12 +8,16 @@ contract Counter { function increment(uint256 x) public { value += x; } + function incrementWithRevertPayable(uint256 x, bool shouldRevert) payable public returns (uint256) { + return incrementWithRevert(x, shouldRevert); + } - function incrementWithRevert(uint256 x, bool shouldRevert) public { + function incrementWithRevert(uint256 x, bool shouldRevert) public returns (uint256) { value += x; if(shouldRevert) { revert("This method always reverts"); } + return value; } function set(uint256 x) public { diff --git a/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol b/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol index 409f3d16b372..6a09e7f09134 100644 --- a/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol +++ b/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol @@ -75,8 +75,6 @@ library RLPEncoder { /// @notice Uses little endian ordering (The least significant byte has index `0`). /// NOTE: returns `0` for `0` function _highestByteSet(uint256 _number) private pure returns (uint256 hbs) { - // TODO: for optimization, the comparison can be replaced with bitwise operations - // should be resolver after evaluating the cost of opcodes. if (_number >= 2**128) { _number >>= 128; hbs += 16; diff --git a/core/tests/ts-integration/contracts/paymaster-nonce/Paymaster.sol b/core/tests/ts-integration/contracts/paymaster-nonce/Paymaster.sol new file mode 100644 index 000000000000..6fd219ff00ef --- /dev/null +++ b/core/tests/ts-integration/contracts/paymaster-nonce/Paymaster.sol @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IPaymaster, ExecutionResult, PAYMASTER_VALIDATION_SUCCESS_MAGIC} from "../custom-account/interfaces/IPaymaster.sol"; +import {IPaymasterFlow} from "../custom-account/interfaces/IPaymasterFlow.sol"; +import {TransactionHelper, Transaction} from "../custom-account/TransactionHelper.sol"; + +import "../custom-account/Constants.sol"; + +contract Paymaster is IPaymaster { + uint256 constant PRICE_FOR_PAYING_FEES = 1; + + modifier onlyBootloader() { + require( + msg.sender == BOOTLOADER_FORMAL_ADDRESS, + "Only bootloader can call this method" + ); + // Continue execution if called from the bootloader. + _; + } + + constructor() {} + + function validateAndPayForPaymasterTransaction( + bytes32, + bytes32, + Transaction calldata _transaction + ) external payable returns (bytes4 magic, bytes memory context) { + // By default we consider the transaction as accepted. + magic = PAYMASTER_VALIDATION_SUCCESS_MAGIC; + require( + _transaction.paymasterInput.length >= 4, + "The standard paymaster input must be at least 4 bytes long" + ); + + uint256 txNonce = _transaction.nonce; + + if (txNonce == 0) { + revert("Nonce is zerooo"); + } + + // Note, that while the minimal amount of ETH needed is tx.gasPrice * tx.gasLimit, + // neither paymaster nor account are allowed to access this context variable. + uint256 requiredETH = _transaction.gasLimit * _transaction.maxFeePerGas; + + // The bootloader never returns any data, so it can safely be ignored here. + (bool success, ) = payable(BOOTLOADER_FORMAL_ADDRESS).call{ + value: requiredETH + }(""); + require(success, "Failed to transfer funds to the bootloader"); + } + + function postTransaction( + bytes calldata _context, + Transaction calldata _transaction, + bytes32, + bytes32, + ExecutionResult _txResult, + uint256 _maxRefundedGas + ) external payable override { + // Refunds are not supported yet. + } + + receive() external payable {} +} diff --git a/core/tests/ts-integration/contracts/yul/Empty.yul b/core/tests/ts-integration/contracts/yul/Empty.yul new file mode 100644 index 000000000000..c84dacf0994f --- /dev/null +++ b/core/tests/ts-integration/contracts/yul/Empty.yul @@ -0,0 +1,9 @@ +object "Empty" { + code { + mstore(0, 0) + return(0, 32) + } + object "Empty_deployed" { + code { } + } +} diff --git a/core/tests/ts-integration/hardhat.config.ts b/core/tests/ts-integration/hardhat.config.ts index 1828a5547c78..039a465d551f 100644 --- a/core/tests/ts-integration/hardhat.config.ts +++ b/core/tests/ts-integration/hardhat.config.ts @@ -2,7 +2,7 @@ import '@matterlabs/hardhat-zksync-solc'; export default { zksolc: { - version: '1.3.7', + version: '1.3.10', compilerSource: 'binary', settings: { isSystem: true diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index eece51e49d99..a797cc816ad8 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -4,11 +4,13 @@ "license": "MIT", "private": true, "scripts": { - "test": "zk f jest", - "fee-test": "RUN_FEE_TEST=1 zk f jest -- fees.test.ts" + "test": "zk f jest --forceExit --testTimeout 60000", + "fee-test": "RUN_FEE_TEST=1 zk f jest -- fees.test.ts", + "api-test": "zk f jest -- api/web3.test.ts" }, "devDependencies": { - "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", + "@matterlabs/hardhat-zksync-solc": "^0.3.15", + "@matterlabs/hardhat-zksync-deploy": "^0.6.1", "@types/jest": "^29.0.3", "@types/node": "^14.14.5", "@types/node-fetch": "^2.5.7", diff --git a/core/tests/ts-integration/scripts/compile-yul.ts b/core/tests/ts-integration/scripts/compile-yul.ts new file mode 100644 index 000000000000..00e0a4179e32 --- /dev/null +++ b/core/tests/ts-integration/scripts/compile-yul.ts @@ -0,0 +1,89 @@ +import * as hre from 'hardhat'; +import * as fs from 'fs'; +import { spawn as _spawn } from 'child_process'; + +import { getZksolcPath, getZksolcUrl, saltFromUrl } from '@matterlabs/hardhat-zksync-solc'; + +const COMPILER_VERSION = '1.3.10'; +const IS_COMPILER_PRE_RELEASE = false; + +async function compilerLocation(): Promise { + if (IS_COMPILER_PRE_RELEASE) { + const url = getZksolcUrl('https://github.com/matter-labs/zksolc-prerelease', hre.config.zksolc.version); + const salt = saltFromUrl(url); + return await getZksolcPath(COMPILER_VERSION, salt); + } else { + return await getZksolcPath(COMPILER_VERSION, ''); + } +} + +// executes a command in a new shell +// but pipes data to parent's stdout/stderr +export function spawn(command: string) { + command = command.replace(/\n/g, ' '); + const child = _spawn(command, { stdio: 'inherit', shell: true }); + return new Promise((resolve, reject) => { + child.on('error', reject); + child.on('close', (code) => { + code == 0 ? resolve(code) : reject(`Child process exited with code ${code}`); + }); + }); +} + +export async function compileYul(path: string, files: string[], outputDirName: string | null) { + if (!files.length) { + console.log(`No test files provided in folder ${path}.`); + return; + } + let paths = preparePaths(path, files, outputDirName); + + const zksolcLocation = await compilerLocation(); + await spawn( + `${zksolcLocation} ${paths.absolutePathSources}/${paths.outputDir} --optimization 3 --system-mode --yul --bin --overwrite -o ${paths.absolutePathArtifacts}/${paths.outputDir}` + ); +} + +export async function compileYulFolder(path: string) { + let files: string[] = (await fs.promises.readdir(path)).filter((fn) => fn.endsWith('.yul')); + for (const file of files) { + await compileYul(path, [file], `${file}`); + } +} + +function preparePaths(path: string, files: string[], outputDirName: string | null): CompilerPaths { + const filePaths = files + .map((val, _) => { + return `sources/${val}`; + }) + .join(' '); + const outputDir = outputDirName || files[0]; + let absolutePathSources = `${process.env.ZKSYNC_HOME}/core/tests/ts-integration/${path}`; + + let absolutePathArtifacts = `${process.env.ZKSYNC_HOME}/core/tests/ts-integration/${path}/artifacts`; + + return new CompilerPaths(filePaths, outputDir, absolutePathSources, absolutePathArtifacts); +} + +class CompilerPaths { + public filePath: string; + public outputDir: string; + public absolutePathSources: string; + public absolutePathArtifacts: string; + constructor(filePath: string, outputDir: string, absolutePathSources: string, absolutePathArtifacts: string) { + this.filePath = filePath; + this.outputDir = outputDir; + this.absolutePathSources = absolutePathSources; + this.absolutePathArtifacts = absolutePathArtifacts; + } +} + +async function main() { + await compileYulFolder('contracts/yul'); +} + +main() + .then(() => process.exit(0)) + .catch((err) => { + console.error('Error:', err.message || err); + process.exit(1); + }); diff --git a/core/tests/ts-integration/src/context-owner.ts b/core/tests/ts-integration/src/context-owner.ts index bf2d8c956f38..7237f61a9882 100644 --- a/core/tests/ts-integration/src/context-owner.ts +++ b/core/tests/ts-integration/src/context-owner.ts @@ -60,11 +60,18 @@ export class TestContextOwner { constructor(env: TestEnvironment) { this.env = env; + this.reporter.message('Using L1 provider: ' + env.l1NodeUrl); + this.reporter.message('Using L2 provider: ' + env.l2NodeUrl); + this.l1Provider = new ethers.providers.JsonRpcProvider(env.l1NodeUrl); - this.l2Provider = new RetryProvider({ - url: env.l2NodeUrl, - timeout: 1200 * 1000 - }); + this.l2Provider = new RetryProvider( + { + url: env.l2NodeUrl, + timeout: 1200 * 1000 + }, + undefined, + this.reporter + ); if (env.network == 'localhost') { // Setup small polling interval on localhost to speed up tests. @@ -454,7 +461,7 @@ export async function sendTransfers( nonce: txNonce, gasPrice }); - reporter?.debug(`Inititated ERC20 transfer with nonce: ${tx.nonce}`); + reporter?.debug(`Inititated ERC20 transfer with nonce: ${txNonce}`); // @ts-ignore return tx.then((tx) => { reporter?.debug(`Sent ERC20 transfer tx: ${tx.hash}, nonce: ${tx.nonce}`); diff --git a/core/tests/ts-integration/src/matchers/transaction.ts b/core/tests/ts-integration/src/matchers/transaction.ts index fa363ef58c09..77ed4dd85cc4 100644 --- a/core/tests/ts-integration/src/matchers/transaction.ts +++ b/core/tests/ts-integration/src/matchers/transaction.ts @@ -47,7 +47,7 @@ export async function toBeAccepted( } export async function toBeReverted( - txPromise: Promise, + txPromise: zksync.types.TransactionResponse | Promise, modifiers: MatcherModifier[] = [], additionalInfo?: string ) { @@ -85,7 +85,43 @@ export async function toBeReverted( } } -export async function toBeRejected(txPromise: Promise, errorSubstring?: string, additionalInfo?: string) { +export async function toBeRevertedEthCall( + txPromise: Promise, + revertReason?: string, + encodedRevertReason?: string, + additionalInfo?: string +) { + return await toBeRejectedWithPrefix( + txPromise, + 'call revert exception; VM Exception while processing transaction: reverted with reason string "', + revertReason, + encodedRevertReason, + additionalInfo + ); +} + +export async function toBeRevertedEstimateGas( + txPromise: Promise, + revertReason?: string, + encodedRevertReason?: string, + additionalInfo?: string +) { + return await toBeRejectedWithPrefix( + txPromise, + 'execution reverted: ', + revertReason, + encodedRevertReason, + additionalInfo + ); +} + +async function toBeRejectedWithPrefix( + txPromise: Promise, + prefix: string, + errorSubstring?: string, + dataSubstring?: string, + additionalInfo?: string +) { try { const tx = await txPromise; // Unlike with `toBeReverted` test, we don't even need to wait for the transaction to be executed. @@ -102,7 +138,8 @@ export async function toBeRejected(txPromise: Promise, errorSubstring?: str } catch (error: any) { if (errorSubstring) { // We expect thrown exception to always have the `message` field. - if (!error.message || !error.message.includes(errorSubstring)) { + let fullErrorSubstring = `${prefix}${errorSubstring}`; + if (!error.message || !error.message.includes(fullErrorSubstring)) { const message = new TestMessage() .matcherHint('.toBeRejected') .line('Transaction was expected to be rejected by the API server with the following message:') @@ -116,10 +153,29 @@ export async function toBeRejected(txPromise: Promise, errorSubstring?: str } } + if (dataSubstring) { + // We expect thrown exception to always have the `data` field. + if (!error.message || !error.message.includes(dataSubstring)) { + const message = new TestMessage() + .matcherHint('.toBeRejected') + .line('Transaction was expected to be rejected by the API server with the following data:') + .expected(dataSubstring) + .line("but it wasn't detected. Received error:") + .received(error) + .additional(additionalInfo) + .build(); + + return fail(message); + } + } return pass(); } } +export async function toBeRejected(txPromise: Promise, errorSubstring?: string, additionalInfo?: string) { + return await toBeRejectedWithPrefix(txPromise, '', errorSubstring, undefined, additionalInfo); +} + // Local helper to mark transaction test as passed. function pass() { const message = diff --git a/core/tests/ts-integration/src/retry-provider.ts b/core/tests/ts-integration/src/retry-provider.ts index cf5ddb9949cc..22ebb55a3d90 100644 --- a/core/tests/ts-integration/src/retry-provider.ts +++ b/core/tests/ts-integration/src/retry-provider.ts @@ -1,15 +1,19 @@ import * as zksync from 'zksync-web3'; import * as ethers from 'ethers'; +import { Reporter } from './reporter'; /** * RetryProvider retries every RPC request if it detects a timeout-related issue on the server side. */ export class RetryProvider extends zksync.Provider { + private reporter?: Reporter; constructor( url?: string | ethers.ethers.utils.ConnectionInfo | undefined, - network?: ethers.ethers.providers.Networkish | undefined + network?: ethers.ethers.providers.Networkish | undefined, + reporter?: Reporter | undefined ) { super(url, network); + this.reporter = reporter; } override async send(method: string, params: any): Promise { @@ -18,7 +22,7 @@ export class RetryProvider extends zksync.Provider { const result = await super.send(method, params); // If we obtained result not from the first attempt, print a warning. if (retry != 0) { - console.log(`Request for method ${method} took ${retry} retries to succeed`); + this.reporter?.debug(`Request for method ${method} took ${retry} retries to succeed`); } return result; } catch (err: any) { diff --git a/core/tests/ts-integration/src/test-master.ts b/core/tests/ts-integration/src/test-master.ts index 5dbb7b8fe896..8f59288ba5c3 100644 --- a/core/tests/ts-integration/src/test-master.ts +++ b/core/tests/ts-integration/src/test-master.ts @@ -49,7 +49,6 @@ export class TestMaster { if (!suiteWalletPK) { throw new Error(`Wallet for ${suiteName} suite was not provided`); } - this.l1Provider = new ethers.providers.JsonRpcProvider(this.env.l1NodeUrl); this.l2Provider = new RetryProvider({ url: this.env.l2NodeUrl, diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts new file mode 100644 index 000000000000..e8e860c4ec19 --- /dev/null +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -0,0 +1,76 @@ +/** + * This suite contains tests checking default ERC-20 contract behavior. + */ + +import { TestMaster } from '../../src'; +import { Token } from '../../src/types'; + +import * as zksync from 'zksync-web3'; +import { ethers } from 'ethers'; +import { BOOTLOADER_FORMAL_ADDRESS } from 'zksync-web3/build/src/utils'; + +describe('Debug methods', () => { + let testMaster: TestMaster; + let alice: zksync.Wallet; + let bob: zksync.Wallet; + let tokenDetails: Token; + let aliceErc20: zksync.Contract; + + beforeAll(async () => { + testMaster = TestMaster.getInstance(__filename); + alice = testMaster.mainAccount(); + bob = testMaster.newEmptyAccount(); + + tokenDetails = testMaster.environment().erc20Token; + aliceErc20 = new zksync.Contract(tokenDetails.l2Address, zksync.utils.IERC20, alice); + }); + + test('Debug sending erc20 token in a block', async () => { + const value = ethers.BigNumber.from(200); + await aliceErc20.transfer(bob.address, value).then((tx: any) => tx.wait()); + let tx = await aliceErc20.transfer(bob.address, value); + let receipt = await tx.wait(); + let blockCallTrace = await testMaster + .mainAccount() + .provider.send('debug_traceBlockByNumber', [receipt.blockNumber.toString(16)]); + let expected = { + error: null, + from: ethers.constants.AddressZero, + gas: expect.any(String), + gasUsed: expect.any(String), + input: expect.any(String), + output: '0x', + revertReason: null, + to: BOOTLOADER_FORMAL_ADDRESS, + type: 'Call', + value: expect.any(String), + calls: expect.any(Array) + }; + for (let i = 0; i < blockCallTrace.length; i++) { + expect(blockCallTrace[i]).toEqual({ result: expected }); + } + expected = { + error: null, + from: ethers.constants.AddressZero, + gas: expect.any(String), + gasUsed: expect.any(String), + input: `0xa9059cbb000000000000000000000000${bob.address + .slice(2, 42) + .toLowerCase()}00000000000000000000000000000000000000000000000000000000000000${value + .toHexString() + .slice(2, 4)}`, + output: '0x', + revertReason: null, + to: BOOTLOADER_FORMAL_ADDRESS, + type: 'Call', + value: '0x0', + calls: expect.any(Array) + }; + let txCallTrace = await testMaster.mainAccount().provider.send('debug_traceTransaction', [tx.hash]); + expect(txCallTrace).toEqual(expected); + }); + + afterAll(async () => { + await testMaster.deinitialize(); + }); +}); diff --git a/core/tests/ts-integration/tests/api/explorer.test.ts b/core/tests/ts-integration/tests/api/explorer.test.ts index c2b17c8c47ff..12335ae4bd63 100644 --- a/core/tests/ts-integration/tests/api/explorer.test.ts +++ b/core/tests/ts-integration/tests/api/explorer.test.ts @@ -2,6 +2,7 @@ import { TestMaster } from '../../src/index'; import * as zksync from 'zksync-web3'; import * as ethers from 'ethers'; import fetch from 'node-fetch'; +import fs from 'fs'; import { anyTransaction, deployContract, @@ -32,6 +33,9 @@ const HEX_VALUE_REGEX = /^0x[\da-fA-F]*$/; // Regular expression to match ISO dates. const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; +const ZKSOLC_VERSION = 'v1.3.10'; +const SOLC_VERSION = '0.8.16'; + describe('Tests for the Explorer API', () => { let testMaster: TestMaster; let alice: zksync.Wallet; @@ -64,7 +68,7 @@ describe('Tests for the Explorer API', () => { l1TxCount: expect.any(Number), l2TxCount: expect.any(Number), hash: expect.stringMatching(/^0x[\da-fA-F]{64}$/), - status: 'sealed', + status: expect.stringMatching(/sealed|verified/), timestamp: expect.any(Number) }); @@ -116,7 +120,7 @@ describe('Tests for the Explorer API', () => { number: expect.any(Number), l1TxCount: expect.any(Number), l2TxCount: expect.any(Number), - status: 'sealed', + status: expect.stringMatching(/sealed|verified/), timestamp: expect.any(Number) }); @@ -170,7 +174,8 @@ describe('Tests for the Explorer API', () => { default_aa: expect.stringMatching(HASH_REGEX) }, l1GasPrice: expect.any(Number), - l2FairGasPrice: expect.any(Number) + l2FairGasPrice: expect.any(Number), + operatorAddress: expect.stringMatching(/^0x[\da-f]{40}$/) }); expect(apiBlock.number).toEqual(tx.blockNumber); expect(apiBlock.rootHash).toEqual(tx.blockHash); @@ -210,7 +215,8 @@ describe('Tests for the Explorer API', () => { default_aa: expect.stringMatching(HASH_REGEX) }, l1GasPrice: expect.any(Number), - l2FairGasPrice: expect.any(Number) + l2FairGasPrice: expect.any(Number), + operatorAddress: expect.stringMatching(/^0x[\da-f]{40}$/) }); } }); @@ -339,6 +345,7 @@ describe('Tests for the Explorer API', () => { isL1Originated: false, initiatorAddress: alice.address.toLowerCase(), receivedAt: expect.stringMatching(DATE_REGEX), + miniblockTimestamp: expect.any(Number), balanceChanges: expect.any(Array), erc20Transfers: expect.any(Array), data: { @@ -373,6 +380,40 @@ describe('Tests for the Explorer API', () => { } }); + test('Should test /transaction endpoint for L1->L2', async () => { + if (testMaster.isFastMode()) { + // This test requires an L1->L2 transaction to be included, which may be time consuming on stage. + return; + } + + const amount = 1; + const txHandle = await alice.deposit({ to: alice.address, amount, token: erc20.l1Address, approveERC20: true }); + const tx = await txHandle.wait(); + + const apiTx = await query(`/transaction/${tx.transactionHash}`); + expect(apiTx).toMatchObject({ + transactionHash: tx.transactionHash, + blockNumber: tx.blockNumber, + blockHash: tx.blockHash, + indexInBlock: expect.any(Number), + status: expect.stringMatching(/included|verified/), + fee: ethers.utils.hexValue(tx.gasUsed.mul(tx.effectiveGasPrice)), + isL1Originated: true, + initiatorAddress: expect.stringMatching(HEX_VALUE_REGEX), + receivedAt: expect.stringMatching(DATE_REGEX), + miniblockTimestamp: expect.any(Number), + balanceChanges: expect.any(Array), + erc20Transfers: expect.any(Array), + data: { + calldata: expect.stringMatching(HEX_VALUE_REGEX), + contractAddress: expect.stringMatching(ADDRESS_REGEX), + factoryDeps: expect.any(Array), + value: expect.stringMatching(HEX_VALUE_REGEX) + }, + logs: expect.any(Array) + }); + }); + test('Should test /transactions endpoint', async () => { const amount = 1; const bob = testMaster.newEmptyAccount(); @@ -407,6 +448,7 @@ describe('Tests for the Explorer API', () => { isL1Originated: false, initiatorAddress: alice.address.toLowerCase(), receivedAt: expect.stringMatching(DATE_REGEX), + miniblockTimestamp: expect.any(Number), balanceChanges: expect.any(Array), erc20Transfers: expect.any(Array), data: { @@ -568,8 +610,8 @@ describe('Tests for the Explorer API', () => { contractAddress: counterContract.address, contractName: 'contracts/counter/counter.sol:Counter', sourceCode: getContractSource('counter/counter.sol'), - compilerZksolcVersion: 'v1.3.7', - compilerSolcVersion: '0.8.16', + compilerZksolcVersion: ZKSOLC_VERSION, + compilerSolcVersion: SOLC_VERSION, optimizationUsed: true, constructorArguments, isSystem: true @@ -599,7 +641,8 @@ describe('Tests for the Explorer API', () => { 'contracts/create/Foo.sol': { content: getContractSource('create/Foo.sol') } }, settings: { - optimizer: { enabled: true } + optimizer: { enabled: true }, + isSystem: true } }; @@ -610,17 +653,47 @@ describe('Tests for the Explorer API', () => { contractName: 'contracts/create/create.sol:Import', sourceCode: standardJsonInput, codeFormat: 'solidity-standard-json-input', - compilerZksolcVersion: 'v1.3.7', - compilerSolcVersion: '0.8.16', + compilerZksolcVersion: ZKSOLC_VERSION, + compilerSolcVersion: SOLC_VERSION, optimizationUsed: true, - constructorArguments, - isSystem: true + constructorArguments }; let requestId = await query('/contract_verification', undefined, requestBody); await expectVerifyRequestToSucceed(requestId, importContract.address); }); + test('should test yul contract verification', async () => { + if (process.env.RUN_CONTRACT_VERIFICATION_TEST != 'true') { + // Contract verification test is not requested to run. + return; + } + const contractPath = `${process.env.ZKSYNC_HOME}/core/tests/ts-integration/contracts/yul/Empty.yul`; + const sourceCode = fs.readFileSync(contractPath, 'utf8'); + + const bytecodePath = `${process.env.ZKSYNC_HOME}/core/tests/ts-integration/contracts/yul/artifacts/Empty.yul/Empty.yul.zbin`; + const bytecode = fs.readFileSync(bytecodePath); + + const contractFactory = new zksync.ContractFactory([], bytecode, alice); + const deployTx = await contractFactory.deploy(); + const contractAddress = (await deployTx.deployed()).address; + + const requestBody = { + contractAddress, + contractName: 'Empty', + sourceCode, + codeFormat: 'yul-single-file', + compilerZksolcVersion: ZKSOLC_VERSION, + compilerSolcVersion: SOLC_VERSION, + optimizationUsed: true, + constructorArguments: '0x', + isSystem: true + }; + let requestId = await query('/contract_verification', undefined, requestBody); + + await expectVerifyRequestToSucceed(requestId, contractAddress); + }); + afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index c800884c8783..c8e53bed9ae2 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -105,6 +105,21 @@ describe('web3 API compatibility tests', () => { const blockDetails = await alice.provider.getBlockDetails(1); const block = await alice.provider.getBlock(1); expect(blockDetails.rootHash).toEqual(block.hash); + expect(blockDetails.l1BatchNumber).toEqual(block.l1BatchNumber); + // zks_getL1BatchDetails + const batchDetails = await alice.provider.getL1BatchDetails(block.l1BatchNumber); + expect(batchDetails.number).toEqual(block.l1BatchNumber); + // zks_estimateFee + const response = await alice.provider.send('zks_estimateFee', [ + { from: alice.address, to: alice.address, value: '0x1' } + ]); + const expectedResponse = { + gas_limit: expect.stringMatching(HEX_VALUE_REGEX), + gas_per_pubdata_limit: expect.stringMatching(HEX_VALUE_REGEX), + max_fee_per_gas: expect.stringMatching(HEX_VALUE_REGEX), + max_priority_fee_per_gas: expect.stringMatching(HEX_VALUE_REGEX) + }; + expect(response).toMatchObject(expectedResponse); }); test('Should check the network version', async () => { @@ -599,6 +614,101 @@ describe('web3 API compatibility tests', () => { expect(poorBob.estimateGas({ value: 1, to: alice.address })).toBeRejected('insufficient balance for transfer'); }); + test('Should check API returns correct block for every tag', async () => { + const earliestBlock = await alice.provider.send('eth_getBlockByNumber', ['earliest', true]); + expect(+earliestBlock.number!).toEqual(0); + const committedBlock = await alice.provider.send('eth_getBlockByNumber', ['committed', true]); + expect(+committedBlock.number!).toEqual(expect.any(Number)); + const finalizedBlock = await alice.provider.send('eth_getBlockByNumber', ['finalized', true]); + expect(+finalizedBlock.number!).toEqual(expect.any(Number)); + const latestBlock = await alice.provider.send('eth_getBlockByNumber', ['latest', true]); + expect(+latestBlock.number!).toEqual(expect.any(Number)); + const pendingBlock = await alice.provider.send('eth_getBlockByNumber', ['pending', true]); + expect(pendingBlock).toEqual(null); + }); + + test('Should check sendRawTransaction returns GasPerPubDataLimitZero with 0 gas_per_pubdata_limit', async () => { + const gasPrice = await alice.provider.getGasPrice(); + const chainId = (await alice.provider.getNetwork()).chainId; + const address = zksync.Wallet.createRandom().address; + const senderNonce = await alice.getTransactionCount(); + const tx: ethers.providers.TransactionRequest = { + to: address, + from: alice.address, + nonce: senderNonce, + gasLimit: ethers.BigNumber.from(300000), + data: '0x', + value: 0, + chainId, + type: 113, + maxPriorityFeePerGas: gasPrice, + maxFeePerGas: gasPrice, + customData: { + gasPerPubdata: '0' + } + }; + + await expect(alice.sendTransaction(tx)).toBeRejected('gas per pub data limit is zero'); + }); + + test('Should check getLogs works with address/topics in filter', async () => { + // We're sending a transfer from the wallet, so we'll use a new account to make event unique. + let uniqueRecipient = testMaster.newEmptyAccount().address; + const tx = await alice.transfer({ + to: uniqueRecipient, + amount: 1, + token: l2Token + }); + const receipt = await tx.wait(); + const logs = await alice.provider.getLogs({ + fromBlock: receipt.blockNumber, + toBlock: receipt.blockNumber, + address: l2Token, + topics: [ + '0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef', + ethers.utils.hexZeroPad(alice.address, 32), + ethers.utils.hexZeroPad(uniqueRecipient, 32) + ] + }); + expect(logs).toHaveLength(1); + expect(logs[0].transactionHash).toEqual(tx.hash); + }); + + test('Should check getLogs endpoint works properly with blocks', async () => { + const earliestLogs = alice.provider.send('eth_getLogs', [ + { + fromBlock: 'earliest', + // we have to set this parameter to avoid `Query returned more than 10000 results. Try with this block range` error + toBlock: '1' + } + ]); + await expect(earliestLogs).resolves.not.toThrow(); + + const committedLogs = alice.provider.send('eth_getLogs', [ + { + fromBlock: 'committed', + address: alice.address + } + ]); + await expect(committedLogs).resolves.not.toThrow(); + + const finalizedLogs = alice.provider.send('eth_getLogs', [ + { + fromBlock: 'finalized', + address: alice.address + } + ]); + await expect(finalizedLogs).resolves.not.toThrow(); + + const latestLogs = alice.provider.send('eth_getLogs', [ + { + fromBlock: 'latest', + address: alice.address + } + ]); + await expect(latestLogs).resolves.not.toThrow(); + }); + afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index 103be2c468a7..e06c7ae97229 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -98,6 +98,8 @@ describe('Smart contract behavior checks', () => { const infiniteLoop = await deployContract(alice, contracts.infinite, []); // Test eth_call first + // await expect(infiniteLoop.callStatic.infiniteLoop()).toBeRejected('cannot estimate transaction: out of gas'); + // ...and then an actual transaction await expect(infiniteLoop.infiniteLoop({ gasLimit: 1_000_000 })).toBeReverted([]); }); @@ -198,8 +200,16 @@ describe('Smart contract behavior checks', () => { test('Should return correct error during fee estimation', async () => { const errorContract = await deployContract(alice, contracts.error, []); - await expect(errorContract.estimateGas.require_long()).toBeRejected('longlonglong'); - await expect(errorContract.require_long()).toBeRejected('longlonglong'); + await expect(errorContract.estimateGas.require_long()).toBeRevertedEstimateGas('longlonglong'); + await expect(errorContract.require_long()).toBeRevertedEthCall('longlonglong'); + await expect(errorContract.estimateGas.new_error()).toBeRevertedEstimateGas( + undefined, + '0x157bea60000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000046461746100000000000000000000000000000000000000000000000000000000' + ); + await expect(errorContract.callStatic.new_error()).toBeRevertedEthCall( + undefined, + '0x157bea60000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000046461746100000000000000000000000000000000000000000000000000000000' + ); }); test('Should check block properties for tx execution', async () => { diff --git a/core/tests/ts-integration/tests/custom-account.test.ts b/core/tests/ts-integration/tests/custom-account.test.ts index a488215fe2c1..0103b9ea7e49 100644 --- a/core/tests/ts-integration/tests/custom-account.test.ts +++ b/core/tests/ts-integration/tests/custom-account.test.ts @@ -219,12 +219,14 @@ describe('Tests for the custom account behavior', () => { const transfer = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); const nonce = await alice.provider.getTransactionCount(badCustomAccount.address); - // Create a *promise* that would await for the rejection. - // Even though we use `toBeReverted` matcher, we'll check that it's actually rejected based on the nonce. - // However, we use `await` on the `sendTransaction` to make sure that tx is past the API server checks. - const rejectionCheckPromise = expect( - await sendCustomAccountTransaction(transfer, alice.provider, badCustomAccount.address, undefined, nonce + 1) - ).toBeReverted(); + // Not using .toBeReverted matcher here in part because of BFT-170 + const delayedTx = await sendCustomAccountTransaction( + transfer, + alice.provider, + badCustomAccount.address, + undefined, + nonce + 1 + ); // Increase nonce and set flag to do many calculations during validation. const validationGasLimit = +process.env.CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT!; @@ -233,7 +235,18 @@ describe('Tests for the custom account behavior', () => { sendCustomAccountTransaction(tx, alice.provider, badCustomAccount.address, undefined, nonce) ).toBeAccepted(); - await rejectionCheckPromise; + try { + const delayedTxReceipt = await delayedTx.wait(); + fail( + 'Transaction was expected to be reverted, but it succeeded. Receipt:' + + JSON.stringify(delayedTxReceipt, null, 2) + ); + } catch (e: any) { + // We expect the transaction to fail in the state-keeper + expect(e.receipt.status).toBe(0); + expect(e.receipt.blockNumber).toBeNull(); + expect(e.receipt.blockHash).toBeNull(); + } }); afterAll(async () => { diff --git a/core/tests/ts-integration/tests/custom-erc20-bridge.test.ts b/core/tests/ts-integration/tests/custom-erc20-bridge.test.ts new file mode 100644 index 000000000000..ef26fd9165cd --- /dev/null +++ b/core/tests/ts-integration/tests/custom-erc20-bridge.test.ts @@ -0,0 +1,103 @@ +/** + * This suite contains tests checking the behavior of custom bridges. + */ + +import { TestMaster } from '../src/index'; +import { Token } from '../src/types'; +import { spawn as _spawn } from 'child_process'; + +import * as zksync from 'zksync-web3'; +import { scaledGasPrice } from '../src/helpers'; +import { + L1ERC20BridgeFactory, + TransparentUpgradeableProxyFactory, + AllowListFactory +} from 'l1-zksync-contracts/typechain'; +import { sleep } from 'zk/build/utils'; + +describe('Tests for the custom bridge behavior', () => { + let testMaster: TestMaster; + let alice: zksync.Wallet; + let bob: zksync.Wallet; + let tokenDetails: Token; + + beforeAll(() => { + testMaster = TestMaster.getInstance(__filename); + alice = testMaster.mainAccount(); + bob = testMaster.newEmptyAccount(); + tokenDetails = testMaster.environment().erc20Token; + }); + + test('Should deploy custom bridge', async () => { + let balance = await alice.getBalanceL1(); + let transferTx = await alice._signerL1().sendTransaction({ + to: bob.address, + value: balance.div(2) + }); + await transferTx.wait(); + + let allowList = new AllowListFactory(alice._signerL1()); + let allowListContract = await allowList.deploy(alice.address); + await allowListContract.deployTransaction.wait(2); + + // load the l1bridge contract + let l1bridgeFactory = new L1ERC20BridgeFactory(alice._signerL1()); + const gasPrice = await scaledGasPrice(alice); + + let l1Bridge = await l1bridgeFactory.deploy( + process.env.CONTRACTS_DIAMOND_PROXY_ADDR!, + allowListContract.address + ); + await l1Bridge.deployTransaction.wait(2); + let l1BridgeProxyFactory = new TransparentUpgradeableProxyFactory(alice._signerL1()); + let l1BridgeProxy = await l1BridgeProxyFactory.deploy(l1Bridge.address, bob.address, '0x'); + const amount = 1000; // 1 wei is enough. + await l1BridgeProxy.deployTransaction.wait(2); + + const isLocalSetup = process.env.ZKSYNC_LOCAL_SETUP; + const baseCommandL1 = isLocalSetup ? `yarn --cwd /contracts/ethereum` : `cd $ZKSYNC_HOME && yarn l1-contracts`; + let args = `--private-key ${alice.privateKey} --erc20-bridge ${l1BridgeProxy.address}`; + let command = `${baseCommandL1} initialize-bridges ${args}`; + await spawn(command); + await sleep(2); + + await allowListContract.setAccessMode(l1BridgeProxy.address, 2); + let l1bridge2 = new L1ERC20BridgeFactory(alice._signerL1()).attach(l1BridgeProxy.address); + let l2TokenAddress = await l1bridge2.callStatic.l2TokenAddress(tokenDetails.l1Address); + const initialBalanceL1 = await alice.getBalanceL1(tokenDetails.l1Address); + const initialBalanceL2 = await alice.getBalance(l2TokenAddress); + let tx = await alice.deposit({ + token: tokenDetails.l1Address, + amount, + approveERC20: true, + approveOverrides: { + gasPrice + }, + overrides: { + gasPrice + }, + bridgeAddress: l1BridgeProxy.address + }); + + await tx.wait(); + await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.bnToBeEq(initialBalanceL1.sub(amount)); + await expect(alice.getBalance(l2TokenAddress)).resolves.bnToBeEq(initialBalanceL2.add(amount)); + }); + + afterAll(async () => { + await testMaster.deinitialize(); + }); +}); + +// executes a command in a new shell +// but pipes data to parent's stdout/stderr +export function spawn(command: string) { + command = command.replace(/\n/g, ' '); + const child = _spawn(command, { stdio: 'inherit', shell: true }); + return new Promise((resolve, reject) => { + child.on('error', reject); + child.on('close', (code) => { + code == 0 ? resolve(code) : reject(`Child process exited with code ${code}`); + }); + }); +} diff --git a/core/tests/ts-integration/tests/erc20.test.ts b/core/tests/ts-integration/tests/erc20.test.ts index b5ffb8f24537..f38b24c1d780 100644 --- a/core/tests/ts-integration/tests/erc20.test.ts +++ b/core/tests/ts-integration/tests/erc20.test.ts @@ -197,6 +197,44 @@ describe('ERC20 contract checks', () => { await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.bnToBeEq(initialBalance); }); + test('Can perform a deposit with precalculated max value', async () => { + const maxAmount = await alice.getBalanceL1(tokenDetails.l1Address); + + // Approving the needed allowance to ensure that the user has enough funds. + await (await alice.approveERC20(tokenDetails.l1Address, maxAmount)).wait(); + + const depositFee = await alice.getFullRequiredDepositFee({ + token: tokenDetails.l1Address + }); + const l1Fee = depositFee.l1GasLimit.mul(depositFee.maxFeePerGas! || depositFee.gasPrice!); + const l2Fee = depositFee.baseCost; + + const aliceETHBalance = await alice.getBalanceL1(); + if (aliceETHBalance.lt(l1Fee.add(l2Fee))) { + throw new Error('Not enough ETH to perform a deposit'); + } + + const l2ERC20BalanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [ + { wallet: alice, change: maxAmount } + ]); + + const overrides: ethers.Overrides = depositFee.gasPrice + ? { gasPrice: depositFee.gasPrice } + : { + maxFeePerGas: depositFee.maxFeePerGas, + maxPriorityFeePerGas: depositFee.maxPriorityFeePerGas + }; + overrides.gasLimit = depositFee.l1GasLimit; + const depositOp = await alice.deposit({ + token: tokenDetails.l1Address, + amount: maxAmount, + l2GasLimit: depositFee.l2GasLimit, + overrides + }); + + await expect(depositOp).toBeAccepted([l2ERC20BalanceChange]); + }); + afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/core/tests/ts-integration/tests/ether.test.ts b/core/tests/ts-integration/tests/ether.test.ts index 8cecaff17c5c..4709f349c356 100644 --- a/core/tests/ts-integration/tests/ether.test.ts +++ b/core/tests/ts-integration/tests/ether.test.ts @@ -7,7 +7,7 @@ import { shouldChangeETHBalances, shouldOnlyTakeFee } from '../src/modifiers/bal import { checkReceipt } from '../src/modifiers/receipt-check'; import * as zksync from 'zksync-web3'; -import { BigNumber } from 'ethers'; +import { BigNumber, Overrides } from 'ethers'; import { scaledGasPrice } from '../src/helpers'; const ETH_ADDRESS = zksync.utils.ETH_ADDRESS; @@ -29,7 +29,6 @@ describe('ETH token checks', () => { // Unfortunately, since fee is taken in ETH, we must calculate the L1 ETH balance diff explicitly. const l1EthBalanceBefore = await alice.getBalanceL1(); - // No need to check fee as the L1->L2 are free for now const l2ethBalanceChange = await shouldChangeETHBalances([{ wallet: alice, change: amount }], { l1ToL2: true }); @@ -180,6 +179,37 @@ describe('ETH token checks', () => { await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted(); }); + test('Can perform a deposit with precalculated max value', async () => { + const depositFee = await alice.getFullRequiredDepositFee({ + token: ETH_ADDRESS + }); + const l1Fee = depositFee.l1GasLimit.mul(depositFee.maxFeePerGas! || depositFee.gasPrice!); + const l2Fee = depositFee.baseCost; + + const maxAmount = (await alice.getBalanceL1()).sub(l1Fee).sub(l2Fee); + + const l2ethBalanceChange = await shouldChangeETHBalances([{ wallet: alice, change: maxAmount }], { + l1ToL2: true + }); + + const overrides: Overrides = depositFee.gasPrice + ? { gasPrice: depositFee.gasPrice } + : { + maxFeePerGas: depositFee.maxFeePerGas, + maxPriorityFeePerGas: depositFee.maxPriorityFeePerGas + }; + overrides.gasLimit = depositFee.l1GasLimit; + + const depositOp = await alice.deposit({ + token: ETH_ADDRESS, + amount: maxAmount, + l2GasLimit: depositFee.l2GasLimit, + overrides + }); + + await expect(depositOp).toBeAccepted([l2ethBalanceChange]); + }); + afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index a4f0b0e9628d..17ad74349825 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -166,7 +166,7 @@ async function updateReport( } async function killServerAndWaitForShutdown(provider: zksync.Provider) { - await utils.exec('pkill zksync_server'); + await utils.exec('pkill -9 zksync_server'); // Wait until it's really stopped. let iter = 0; while (iter < 30) { diff --git a/core/tests/ts-integration/tests/l1.test.ts b/core/tests/ts-integration/tests/l1.test.ts index 44bd543519fe..d719350e4c24 100644 --- a/core/tests/ts-integration/tests/l1.test.ts +++ b/core/tests/ts-integration/tests/l1.test.ts @@ -374,6 +374,13 @@ function getOverheadForTransaction( blockOverheadForTransaction = overheadForLength; } + // The overhead for possible published public data + // let maxPubdataInTx = ceilDiv(bodyGasLimit, gasPricePerPubdata); + // let overheadForPublicData = ceilDiv(maxPubdataInTx.mul(maxBlockOverhead), MAX_PUBDATA_PER_BLOCK); + // if (overheadForPublicData.gt(blockOverheadForTransaction)) { + // blockOverheadForTransaction = overheadForPublicData; + // } + // The overhead for gas that could be used to use single-instance circuits let overheadForSingleInstanceCircuits = ceilDiv(bodyGasLimit.mul(maxBlockOverhead), L2_TX_MAX_GAS_LIMIT); if (overheadForSingleInstanceCircuits.gt(blockOverheadForTransaction)) { diff --git a/core/tests/ts-integration/tests/mempool.test.ts b/core/tests/ts-integration/tests/mempool.test.ts index d6c40bdcaacc..1fb3e19e38b2 100644 --- a/core/tests/ts-integration/tests/mempool.test.ts +++ b/core/tests/ts-integration/tests/mempool.test.ts @@ -99,19 +99,31 @@ describe('Tests for the mempool behavior', () => { const poorBob = testMaster.newEmptyAccount(); const nonce = 0; // No transactions from this account were sent. - const gasForTransfer = await alice.estimateGas({ to: alice.address }); + const gasLimit = await alice.estimateGas({ to: alice.address }); const gasPrice = await alice.provider.getGasPrice(); - const fund = gasForTransfer.mul(gasPrice).mul(13).div(10); + const fund = gasLimit.mul(gasPrice).mul(13).div(10); await alice.sendTransaction({ to: poorBob.address, value: fund }).then((tx) => tx.wait()); - // Create a *promise* that would await for the rejection. - // Even though we use `toBeReverted` matcher, we'll check that it's actually rejected based on the nonce. - // However, we use `await` on the `sendTransaction` to make sure that tx is past the API server checks. - const rejectionCheckPromise = expect( - await poorBob.sendTransaction({ to: poorBob.address, nonce: nonce + 1 }) - ).toBeReverted(); + // Not using .toBeReverted matcher here in part because of BFT-170 + const delayedTx = await poorBob.sendTransaction({ + to: poorBob.address, + nonce: nonce + 1 + }); + await expect(poorBob.sendTransaction({ to: poorBob.address, nonce })).toBeAccepted(); - await rejectionCheckPromise; + + try { + const delayedTxReceipt = await delayedTx.wait(); + fail( + 'Transaction was expected to be reverted, but it succeeded. Receipt:' + + JSON.stringify(delayedTxReceipt, null, 2) + ); + } catch (e: any) { + // We expect the transaction to fail in the state-keeper + expect(e.receipt.status).toBe(0); + expect(e.receipt.blockNumber).toBeNull(); + expect(e.receipt.blockHash).toBeNull(); + } // Now check that there is only one executed transaction for the account. await expect(poorBob.getTransactionCount()).resolves.toEqual(1); diff --git a/core/tests/ts-integration/tests/paymaster.test.ts b/core/tests/ts-integration/tests/paymaster.test.ts index 56d1cbf83ddc..573e4f9adcb3 100644 --- a/core/tests/ts-integration/tests/paymaster.test.ts +++ b/core/tests/ts-integration/tests/paymaster.test.ts @@ -3,7 +3,7 @@ */ import { TestMaster } from '../src/index'; import * as zksync from 'zksync-web3'; -import { Provider, Wallet, utils } from 'zksync-web3'; +import { Provider, Wallet, utils, Contract } from 'zksync-web3'; import * as ethers from 'ethers'; import { deployContract, getTestContract } from '../src/helpers'; import { L2_ETH_PER_ACCOUNT } from '../src/context-owner'; @@ -11,6 +11,9 @@ import { checkReceipt } from '../src/modifiers/receipt-check'; import { extractFee } from '../src/modifiers/balance-checker'; import { TestMessage } from '../src/matchers/matcher-helpers'; import { Address } from 'zksync-web3/build/src/types'; +import * as hre from 'hardhat'; +import { Deployer } from '@matterlabs/hardhat-zksync-deploy'; +import { ZkSyncArtifact } from '@matterlabs/hardhat-zksync-deploy/dist/types'; const contracts = { customPaymaster: getTestContract('CustomPaymaster') @@ -225,6 +228,64 @@ describe('Paymaster tests', () => { ).toBeRejected('Paymaster validation error'); }); + it('Should deploy nonce-check paymaster and not fail validation', async function () { + const deployer = new Deployer(hre, alice); + const paymaster = await deployPaymaster(deployer); + const token = testMaster.environment().erc20Token; + + await ( + await deployer.zkWallet.sendTransaction({ + to: paymaster.address, + value: ethers.utils.parseEther('0.01') + }) + ).wait(); + + const paymasterParams = utils.getPaymasterParams(paymaster.address, { + type: 'ApprovalBased', + token: token.l2Address, + minimalAllowance: ethers.BigNumber.from(1), + innerInput: new Uint8Array() + }); + + let bob = testMaster.newEmptyAccount(); + + let aliceTx = await alice.transfer({ + to: bob.address, + amount: 100, + token: token.l2Address + }); + + await aliceTx.wait(); + + let bobTx = bob.transfer({ + to: alice.address, + amount: 1, + token: token.l2Address, + overrides: { + customData: { + gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, + paymasterParams + } + } + }); + + await expect(bobTx).toBeRejected('Nonce is zerooo'); + + const aliceTx2 = alice.transfer({ + to: alice.address, + amount: 1, + token: token.l2Address, + overrides: { + customData: { + gasPerPubdata: utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, + paymasterParams + } + } + }); + + await expect(aliceTx2).toBeAccepted(); + }); + afterAll(async () => { await testMaster.deinitialize(); }); @@ -382,3 +443,8 @@ async function sendTxWithTestPaymasterParams( const signedTx = await sender.signTransaction(tx); return await web3Provider.sendTransaction(signedTx); } + +async function deployPaymaster(deployer: Deployer): Promise { + const artifactPay = getTestContract('Paymaster'); + return await deployer.deploy(artifactPay as ZkSyncArtifact); +} diff --git a/core/tests/ts-integration/tests/system.test.ts b/core/tests/ts-integration/tests/system.test.ts index e29f0bd44c15..5927bdefe8f6 100644 --- a/core/tests/ts-integration/tests/system.test.ts +++ b/core/tests/ts-integration/tests/system.test.ts @@ -193,7 +193,7 @@ describe('System behavior checks', () => { const amount = 1; // Fund bob's account. - await alice.transfer({ amount, to: bob.address, token: l2Token }); + await alice.transfer({ amount, to: bob.address, token: l2Token }).then((tx) => tx.wait()); await alice .transfer({ amount: L2_ETH_PER_ACCOUNT.div(8), to: bob.address, token: zksync.utils.ETH_ADDRESS }) .then((tx) => tx.wait()); @@ -311,6 +311,22 @@ describe('System behavior checks', () => { } }); + test('should accept transaction with duplicated factory dep', async () => { + const bytecode = contracts.counter.bytecode; + // We need some bytecodes that weren't deployed before to test behavior properly. + const dep1 = ethers.utils.hexConcat([bytecode, ethers.utils.randomBytes(64)]); + const dep2 = ethers.utils.hexConcat([bytecode, ethers.utils.randomBytes(64)]); + const dep3 = ethers.utils.hexConcat([bytecode, ethers.utils.randomBytes(64)]); + await expect( + alice.sendTransaction({ + to: alice.address, + customData: { + factoryDeps: [dep2, dep1, dep3, dep3, dep1, dep2] + } + }) + ).toBeAccepted(); + }); + it('should reject transaction with huge gas limit', async () => { await expect( alice.sendTransaction({ to: alice.address, gasLimit: ethers.BigNumber.from(2).pow(32) }) diff --git a/core/tests/ts-integration/typings/jest.d.ts b/core/tests/ts-integration/typings/jest.d.ts index de67acafab1c..9a15e4516aab 100644 --- a/core/tests/ts-integration/typings/jest.d.ts +++ b/core/tests/ts-integration/typings/jest.d.ts @@ -96,6 +96,32 @@ export declare global { * @param additionalInfo Optional message to be included if test fails. */ toBeRejected(errorSubstring?: string, additionalInfo?: string): Promise; + /** + * Checks that eth_call is rejected by the API server. + * Does NOT support `.not` modifier. Use `toBeAccepted` instead. + * + * @param revertReason Optional revert reason of eth_call. + * @param encodedRevertReason Optional RLP encoded revert reason. + * @param additionalInfo Optional message to be included if test fails. + */ + toBeRevertedEthCall( + revertReason?: string, + encodedRevertReason?: string, + additionalInfo?: string + ): Promise; + /** + * Checks that eth_estimateGas is rejected by the API server. + * Does NOT support `.not` modifier. Use `toBeAccepted` instead. + * + * @param revertReason Optional revert reason of eth_call. + * @param encodedRevertReason Optional RLP encoded revert reason. + * @param additionalInfo Optional message to be included if test fails. + */ + toBeRevertedEstimateGas( + revertReason?: string, + encodedRevertReason?: string, + additionalInfo?: string + ): Promise; } } } diff --git a/core/tests/ts-integration/yarn.lock b/core/tests/ts-integration/yarn.lock deleted file mode 100644 index 43cd4fea1a4b..000000000000 --- a/core/tests/ts-integration/yarn.lock +++ /dev/null @@ -1,3092 +0,0 @@ -# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. -# yarn lockfile v1 - - -"@ampproject/remapping@^2.1.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.0.tgz#56c133824780de3174aed5ab6834f3026790154d" - integrity sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w== - dependencies: - "@jridgewell/gen-mapping" "^0.1.0" - "@jridgewell/trace-mapping" "^0.3.9" - -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.18.6.tgz#3b25d38c89600baa2dcc219edfa88a74eb2c427a" - integrity sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q== - dependencies: - "@babel/highlight" "^7.18.6" - -"@babel/compat-data@^7.19.1": - version "7.19.1" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.19.1.tgz#72d647b4ff6a4f82878d184613353af1dd0290f9" - integrity sha512-72a9ghR0gnESIa7jBN53U32FOVCEoztyIlKaNoU05zRhEecduGK9L9c3ww7Mp06JiR+0ls0GBPFJQwwtjn9ksg== - -"@babel/core@^7.11.6", "@babel/core@^7.12.3": - version "7.19.1" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.19.1.tgz#c8fa615c5e88e272564ace3d42fbc8b17bfeb22b" - integrity sha512-1H8VgqXme4UXCRv7/Wa1bq7RVymKOzC7znjyFM8KiEzwFqcKUKYNoQef4GhdklgNvoBXyW4gYhuBNCM5o1zImw== - dependencies: - "@ampproject/remapping" "^2.1.0" - "@babel/code-frame" "^7.18.6" - "@babel/generator" "^7.19.0" - "@babel/helper-compilation-targets" "^7.19.1" - "@babel/helper-module-transforms" "^7.19.0" - "@babel/helpers" "^7.19.0" - "@babel/parser" "^7.19.1" - "@babel/template" "^7.18.10" - "@babel/traverse" "^7.19.1" - "@babel/types" "^7.19.0" - convert-source-map "^1.7.0" - debug "^4.1.0" - gensync "^1.0.0-beta.2" - json5 "^2.2.1" - semver "^6.3.0" - -"@babel/generator@^7.19.0", "@babel/generator@^7.7.2": - version "7.19.0" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.19.0.tgz#785596c06425e59334df2ccee63ab166b738419a" - integrity sha512-S1ahxf1gZ2dpoiFgA+ohK9DIpz50bJ0CWs7Zlzb54Z4sG8qmdIrGrVqmy1sAtTVRb+9CU6U8VqT9L0Zj7hxHVg== - dependencies: - "@babel/types" "^7.19.0" - "@jridgewell/gen-mapping" "^0.3.2" - jsesc "^2.5.1" - -"@babel/helper-compilation-targets@^7.19.1": - version "7.19.1" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.19.1.tgz#7f630911d83b408b76fe584831c98e5395d7a17c" - integrity sha512-LlLkkqhCMyz2lkQPvJNdIYU7O5YjWRgC2R4omjCTpZd8u8KMQzZvX4qce+/BluN1rcQiV7BoGUpmQ0LeHerbhg== - dependencies: - "@babel/compat-data" "^7.19.1" - "@babel/helper-validator-option" "^7.18.6" - browserslist "^4.21.3" - semver "^6.3.0" - -"@babel/helper-environment-visitor@^7.18.9": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz#0c0cee9b35d2ca190478756865bb3528422f51be" - integrity sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg== - -"@babel/helper-function-name@^7.19.0": - version "7.19.0" - resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz#941574ed5390682e872e52d3f38ce9d1bef4648c" - integrity sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w== - dependencies: - "@babel/template" "^7.18.10" - "@babel/types" "^7.19.0" - -"@babel/helper-hoist-variables@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz#d4d2c8fb4baeaa5c68b99cc8245c56554f926678" - integrity sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-module-imports@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz#1e3ebdbbd08aad1437b428c50204db13c5a3ca6e" - integrity sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-module-transforms@^7.19.0": - version "7.19.0" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.19.0.tgz#309b230f04e22c58c6a2c0c0c7e50b216d350c30" - integrity sha512-3HBZ377Fe14RbLIA+ac3sY4PTgpxHVkFrESaWhoI5PuyXPBBX8+C34qblV9G89ZtycGJCmCI/Ut+VUDK4bltNQ== - dependencies: - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-module-imports" "^7.18.6" - "@babel/helper-simple-access" "^7.18.6" - "@babel/helper-split-export-declaration" "^7.18.6" - "@babel/helper-validator-identifier" "^7.18.6" - "@babel/template" "^7.18.10" - "@babel/traverse" "^7.19.0" - "@babel/types" "^7.19.0" - -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.8.0": - version "7.19.0" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.19.0.tgz#4796bb14961521f0f8715990bee2fb6e51ce21bf" - integrity sha512-40Ryx7I8mT+0gaNxm8JGTZFUITNqdLAgdg0hXzeVZxVD6nFsdhQvip6v8dqkRHzsz1VFpFAaOCHNn0vKBL7Czw== - -"@babel/helper-simple-access@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.18.6.tgz#d6d8f51f4ac2978068df934b569f08f29788c7ea" - integrity sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-split-export-declaration@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz#7367949bc75b20c6d5a5d4a97bba2824ae8ef075" - integrity sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-string-parser@^7.18.10": - version "7.18.10" - resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz#181f22d28ebe1b3857fa575f5c290b1aaf659b56" - integrity sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw== - -"@babel/helper-validator-identifier@^7.18.6": - version "7.19.1" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz#7eea834cf32901ffdc1a7ee555e2f9c27e249ca2" - integrity sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w== - -"@babel/helper-validator-option@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz#bf0d2b5a509b1f336099e4ff36e1a63aa5db4db8" - integrity sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw== - -"@babel/helpers@^7.19.0": - version "7.19.0" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.19.0.tgz#f30534657faf246ae96551d88dd31e9d1fa1fc18" - integrity sha512-DRBCKGwIEdqY3+rPJgG/dKfQy9+08rHIAJx8q2p+HSWP87s2HCrQmaAMMyMll2kIXKCW0cO1RdQskx15Xakftg== - dependencies: - "@babel/template" "^7.18.10" - "@babel/traverse" "^7.19.0" - "@babel/types" "^7.19.0" - -"@babel/highlight@^7.18.6": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf" - integrity sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g== - dependencies: - "@babel/helper-validator-identifier" "^7.18.6" - chalk "^2.0.0" - js-tokens "^4.0.0" - -"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.18.10", "@babel/parser@^7.19.1": - version "7.19.1" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.19.1.tgz#6f6d6c2e621aad19a92544cc217ed13f1aac5b4c" - integrity sha512-h7RCSorm1DdTVGJf3P2Mhj3kdnkmF/EiysUkzS2TdgAYqyjFdMQJbVuXOBej2SBJaXan/lIVtT6KkGbyyq753A== - -"@babel/plugin-syntax-async-generators@^7.8.4": - version "7.8.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" - integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-bigint@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz#4c9a6f669f5d0cdf1b90a1671e9a146be5300cea" - integrity sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-class-properties@^7.8.3": - version "7.12.13" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10" - integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA== - dependencies: - "@babel/helper-plugin-utils" "^7.12.13" - -"@babel/plugin-syntax-import-meta@^7.8.3": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz#ee601348c370fa334d2207be158777496521fd51" - integrity sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - -"@babel/plugin-syntax-json-strings@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" - integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-jsx@^7.7.2": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.18.6.tgz#a8feef63b010150abd97f1649ec296e849943ca0" - integrity sha512-6mmljtAedFGTWu2p/8WIORGwy+61PLgOMPOdazc7YoJ9ZCWUyFy3A6CpPkRKLKD1ToAesxX8KGEViAiLo9N+7Q== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - -"@babel/plugin-syntax-logical-assignment-operators@^7.8.3": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz#ca91ef46303530448b906652bac2e9fe9941f699" - integrity sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - -"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9" - integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-numeric-separator@^7.8.3": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz#b9b070b3e33570cd9fd07ba7fa91c0dd37b9af97" - integrity sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - -"@babel/plugin-syntax-object-rest-spread@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" - integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-optional-catch-binding@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" - integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-optional-chaining@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a" - integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-top-level-await@^7.8.3": - version "7.14.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz#c1cfdadc35a646240001f06138247b741c34d94c" - integrity sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw== - dependencies: - "@babel/helper-plugin-utils" "^7.14.5" - -"@babel/plugin-syntax-typescript@^7.7.2": - version "7.18.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.18.6.tgz#1c09cd25795c7c2b8a4ba9ae49394576d4133285" - integrity sha512-mAWAuq4rvOepWCBid55JuRNvpTNf2UGVgoz4JV0fXEKolsVZDzsa4NqCef758WZJj/GDu0gVGItjKFiClTAmZA== - dependencies: - "@babel/helper-plugin-utils" "^7.18.6" - -"@babel/template@^7.18.10", "@babel/template@^7.3.3": - version "7.18.10" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.18.10.tgz#6f9134835970d1dbf0835c0d100c9f38de0c5e71" - integrity sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA== - dependencies: - "@babel/code-frame" "^7.18.6" - "@babel/parser" "^7.18.10" - "@babel/types" "^7.18.10" - -"@babel/traverse@^7.19.0", "@babel/traverse@^7.19.1", "@babel/traverse@^7.7.2": - version "7.19.1" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.19.1.tgz#0fafe100a8c2a603b4718b1d9bf2568d1d193347" - integrity sha512-0j/ZfZMxKukDaag2PtOPDbwuELqIar6lLskVPPJDjXMXjfLb1Obo/1yjxIGqqAJrmfaTIY3z2wFLAQ7qSkLsuA== - dependencies: - "@babel/code-frame" "^7.18.6" - "@babel/generator" "^7.19.0" - "@babel/helper-environment-visitor" "^7.18.9" - "@babel/helper-function-name" "^7.19.0" - "@babel/helper-hoist-variables" "^7.18.6" - "@babel/helper-split-export-declaration" "^7.18.6" - "@babel/parser" "^7.19.1" - "@babel/types" "^7.19.0" - debug "^4.1.0" - globals "^11.1.0" - -"@babel/types@^7.0.0", "@babel/types@^7.18.10", "@babel/types@^7.18.6", "@babel/types@^7.19.0", "@babel/types@^7.3.0", "@babel/types@^7.3.3": - version "7.19.0" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.19.0.tgz#75f21d73d73dc0351f3368d28db73465f4814600" - integrity sha512-YuGopBq3ke25BVSiS6fgF49Ul9gH1x70Bcr6bqRLjWCkcX8Hre1/5+z+IiWOIerRMSSEfGZVB9z9kyq7wVs9YA== - dependencies: - "@babel/helper-string-parser" "^7.18.10" - "@babel/helper-validator-identifier" "^7.18.6" - to-fast-properties "^2.0.0" - -"@bcoe/v8-coverage@^0.2.3": - version "0.2.3" - resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39" - integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw== - -"@cspotcode/source-map-support@^0.8.0": - version "0.8.1" - resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1" - integrity sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw== - dependencies: - "@jridgewell/trace-mapping" "0.3.9" - -"@ethersproject/abi@5.7.0", "@ethersproject/abi@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.7.0.tgz#b3f3e045bbbeed1af3947335c247ad625a44e449" - integrity sha512-351ktp42TiRcYB3H1OP8yajPeAQstMW/yCFokj/AthP9bLHzQFPlOrxOcwYEDkUAICmOHljvN4K39OMTMUa9RA== - dependencies: - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/abstract-provider@5.7.0", "@ethersproject/abstract-provider@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/abstract-provider/-/abstract-provider-5.7.0.tgz#b0a8550f88b6bf9d51f90e4795d48294630cb9ef" - integrity sha512-R41c9UkchKCpAqStMYUpdunjo3pkEvZC3FAwZn5S5MGbXoMQOHIdHItezTETxAO5bevtMApSyEhn9+CHcDsWBw== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/networks" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/web" "^5.7.0" - -"@ethersproject/abstract-signer@5.7.0", "@ethersproject/abstract-signer@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/abstract-signer/-/abstract-signer-5.7.0.tgz#13f4f32117868452191a4649723cb086d2b596b2" - integrity sha512-a16V8bq1/Cz+TGCkE2OPMTOUDLS3grCpdjoJCYNnVBbdYEMSgKrU0+B90s8b6H+ByYTBZN7a3g76jdIJi7UfKQ== - dependencies: - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - -"@ethersproject/address@5.7.0", "@ethersproject/address@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/address/-/address-5.7.0.tgz#19b56c4d74a3b0a46bfdbb6cfcc0a153fc697f37" - integrity sha512-9wYhYt7aghVGo758POM5nqcOMaE168Q6aRLJZwUmiqSrAungkG74gSSeKEIR7ukixesdRZGPgVqme6vmxs1fkA== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/rlp" "^5.7.0" - -"@ethersproject/base64@5.7.0", "@ethersproject/base64@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/base64/-/base64-5.7.0.tgz#ac4ee92aa36c1628173e221d0d01f53692059e1c" - integrity sha512-Dr8tcHt2mEbsZr/mwTPIQAf3Ai0Bks/7gTw9dSqk1mQvhW3XvRlmDJr/4n+wg1JmCl16NZue17CDh8xb/vZ0sQ== - dependencies: - "@ethersproject/bytes" "^5.7.0" - -"@ethersproject/basex@5.7.0", "@ethersproject/basex@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/basex/-/basex-5.7.0.tgz#97034dc7e8938a8ca943ab20f8a5e492ece4020b" - integrity sha512-ywlh43GwZLv2Voc2gQVTKBoVQ1mti3d8HK5aMxsfu/nRDnMmNqaSJ3r3n85HBByT8OpoY96SXM1FogC533T4zw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - -"@ethersproject/bignumber@5.7.0", "@ethersproject/bignumber@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/bignumber/-/bignumber-5.7.0.tgz#e2f03837f268ba655ffba03a57853e18a18dc9c2" - integrity sha512-n1CAdIHRWjSucQO3MC1zPSVgV/6dy/fjL9pMrPP9peL+QxEg9wOsVqwD4+818B6LUEtaXzVHQiuivzRoxPxUGw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - bn.js "^5.2.1" - -"@ethersproject/bytes@5.7.0", "@ethersproject/bytes@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/bytes/-/bytes-5.7.0.tgz#a00f6ea8d7e7534d6d87f47188af1148d71f155d" - integrity sha512-nsbxwgFXWh9NyYWo+U8atvmMsSdKJprTcICAkvbBffT75qDocbuggBU0SJiVK2MuTrp0q+xvLkTnGMPK1+uA9A== - dependencies: - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/constants@5.7.0", "@ethersproject/constants@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/constants/-/constants-5.7.0.tgz#df80a9705a7e08984161f09014ea012d1c75295e" - integrity sha512-DHI+y5dBNvkpYUMiRQyxRBYBefZkJfo70VUkUAsRjcPs47muV9evftfZ0PJVCXYbAiCgght0DtcF9srFQmIgWA== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - -"@ethersproject/contracts@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/contracts/-/contracts-5.7.0.tgz#c305e775abd07e48aa590e1a877ed5c316f8bd1e" - integrity sha512-5GJbzEU3X+d33CdfPhcyS+z8MzsTrBGk/sc+G+59+tPa9yFkl6HQ9D6L0QMgNTA9q8dT0XKxxkyp883XsQvbbg== - dependencies: - "@ethersproject/abi" "^5.7.0" - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - -"@ethersproject/hash@5.7.0", "@ethersproject/hash@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/hash/-/hash-5.7.0.tgz#eb7aca84a588508369562e16e514b539ba5240a7" - integrity sha512-qX5WrQfnah1EFnO5zJv1v46a8HW0+E5xuBBDTwMFZLuVTx0tbU2kkx15NqdjxecrLGatQN9FGQKpb1FKdHCt+g== - dependencies: - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/base64" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/hdnode@5.7.0", "@ethersproject/hdnode@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/hdnode/-/hdnode-5.7.0.tgz#e627ddc6b466bc77aebf1a6b9e47405ca5aef9cf" - integrity sha512-OmyYo9EENBPPf4ERhR7oj6uAtUAhYGqOnIS+jE5pTXvdKBS99ikzq1E7Iv0ZQZ5V36Lqx1qZLeak0Ra16qpeOg== - dependencies: - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/basex" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/pbkdf2" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/signing-key" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/wordlists" "^5.7.0" - -"@ethersproject/json-wallets@5.7.0", "@ethersproject/json-wallets@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/json-wallets/-/json-wallets-5.7.0.tgz#5e3355287b548c32b368d91014919ebebddd5360" - integrity sha512-8oee5Xgu6+RKgJTkvEMl2wDgSPSAQ9MB/3JYjFV9jlKvcYHUXZC+cQp0njgmxdHkYWn8s6/IqIZYm0YWCjO/0g== - dependencies: - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hdnode" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/pbkdf2" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - aes-js "3.0.0" - scrypt-js "3.0.1" - -"@ethersproject/keccak256@5.7.0", "@ethersproject/keccak256@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/keccak256/-/keccak256-5.7.0.tgz#3186350c6e1cd6aba7940384ec7d6d9db01f335a" - integrity sha512-2UcPboeL/iW+pSg6vZ6ydF8tCnv3Iu/8tUmLLzWWGzxWKFFqOBQFLo6uLUv6BDrLgCDfN28RJ/wtByx+jZ4KBg== - dependencies: - "@ethersproject/bytes" "^5.7.0" - js-sha3 "0.8.0" - -"@ethersproject/logger@5.7.0", "@ethersproject/logger@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/logger/-/logger-5.7.0.tgz#6ce9ae168e74fecf287be17062b590852c311892" - integrity sha512-0odtFdXu/XHtjQXJYA3u9G0G8btm0ND5Cu8M7i5vhEcE8/HmF4Lbdqanwyv4uQTr2tx6b7fQRmgLrsnpQlmnig== - -"@ethersproject/networks@5.7.1", "@ethersproject/networks@^5.7.0": - version "5.7.1" - resolved "https://registry.yarnpkg.com/@ethersproject/networks/-/networks-5.7.1.tgz#118e1a981d757d45ccea6bb58d9fd3d9db14ead6" - integrity sha512-n/MufjFYv3yFcUyfhnXotyDlNdFb7onmkSy8aQERi2PjNcnWQ66xXxa3XlS8nCcA8aJKJjIIMNJTC7tu80GwpQ== - dependencies: - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/pbkdf2@5.7.0", "@ethersproject/pbkdf2@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/pbkdf2/-/pbkdf2-5.7.0.tgz#d2267d0a1f6e123f3771007338c47cccd83d3102" - integrity sha512-oR/dBRZR6GTyaofd86DehG72hY6NpAjhabkhxgr3X2FpJtJuodEl2auADWBZfhDHgVCbu3/H/Ocq2uC6dpNjjw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - -"@ethersproject/properties@5.7.0", "@ethersproject/properties@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/properties/-/properties-5.7.0.tgz#a6e12cb0439b878aaf470f1902a176033067ed30" - integrity sha512-J87jy8suntrAkIZtecpxEPxY//szqr1mlBaYlQ0r4RCaiD2hjheqF9s1LVE8vVuJCXisjIP+JgtK/Do54ej4Sw== - dependencies: - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/providers@5.7.1": - version "5.7.1" - resolved "https://registry.yarnpkg.com/@ethersproject/providers/-/providers-5.7.1.tgz#b0799b616d5579cd1067a8ebf1fc1ec74c1e122c" - integrity sha512-vZveG/DLyo+wk4Ga1yx6jSEHrLPgmTt+dFv0dv8URpVCRf0jVhalps1jq/emN/oXnMRsC7cQgAF32DcXLL7BPQ== - dependencies: - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/base64" "^5.7.0" - "@ethersproject/basex" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/networks" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/rlp" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/web" "^5.7.0" - bech32 "1.1.4" - ws "7.4.6" - -"@ethersproject/random@5.7.0", "@ethersproject/random@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/random/-/random-5.7.0.tgz#af19dcbc2484aae078bb03656ec05df66253280c" - integrity sha512-19WjScqRA8IIeWclFme75VMXSBvi4e6InrUNuaR4s5pTF2qNhcGdCUwdxUVGtDDqC00sDLCO93jPQoDUH4HVmQ== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/rlp@5.7.0", "@ethersproject/rlp@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/rlp/-/rlp-5.7.0.tgz#de39e4d5918b9d74d46de93af80b7685a9c21304" - integrity sha512-rBxzX2vK8mVF7b0Tol44t5Tb8gomOHkj5guL+HhzQ1yBh/ydjGnpw6at+X6Iw0Kp3OzzzkcKp8N9r0W4kYSs9w== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/sha2@5.7.0", "@ethersproject/sha2@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/sha2/-/sha2-5.7.0.tgz#9a5f7a7824ef784f7f7680984e593a800480c9fb" - integrity sha512-gKlH42riwb3KYp0reLsFTokByAKoJdgFCwI+CCiX/k+Jm2mbNs6oOaCjYQSlI1+XBVejwH2KrmCbMAT/GnRDQw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - hash.js "1.1.7" - -"@ethersproject/signing-key@5.7.0", "@ethersproject/signing-key@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/signing-key/-/signing-key-5.7.0.tgz#06b2df39411b00bc57c7c09b01d1e41cf1b16ab3" - integrity sha512-MZdy2nL3wO0u7gkB4nA/pEf8lu1TlFswPNmy8AiYkfKTdO6eXBJyUdmHO/ehm/htHw9K/qF8ujnTyUAD+Ry54Q== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - bn.js "^5.2.1" - elliptic "6.5.4" - hash.js "1.1.7" - -"@ethersproject/solidity@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/solidity/-/solidity-5.7.0.tgz#5e9c911d8a2acce2a5ebb48a5e2e0af20b631cb8" - integrity sha512-HmabMd2Dt/raavyaGukF4XxizWKhKQ24DoLtdNbBmNKUOPqwjsKQSdV9GQtj9CBEea9DlzETlVER1gYeXXBGaA== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/strings@5.7.0", "@ethersproject/strings@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/strings/-/strings-5.7.0.tgz#54c9d2a7c57ae8f1205c88a9d3a56471e14d5ed2" - integrity sha512-/9nu+lj0YswRNSH0NXYqrh8775XNyEdUQAuf3f+SmOrnVewcJ5SBNAjF7lpgehKi4abvNNXyf+HX86czCdJ8Mg== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/transactions@5.7.0", "@ethersproject/transactions@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/transactions/-/transactions-5.7.0.tgz#91318fc24063e057885a6af13fdb703e1f993d3b" - integrity sha512-kmcNicCp1lp8qanMTC3RIikGgoJ80ztTyvtsFvCYpSCfkjhD0jZ2LOrnbcuxuToLIUYYf+4XwD1rP+B/erDIhQ== - dependencies: - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/rlp" "^5.7.0" - "@ethersproject/signing-key" "^5.7.0" - -"@ethersproject/units@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/units/-/units-5.7.0.tgz#637b563d7e14f42deeee39245275d477aae1d8b1" - integrity sha512-pD3xLMy3SJu9kG5xDGI7+xhTEmGXlEqXU4OfNapmfnxLVY4EMSSRp7j1k7eezutBPH7RBN/7QPnwR7hzNlEFeg== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/wallet@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/wallet/-/wallet-5.7.0.tgz#4e5d0790d96fe21d61d38fb40324e6c7ef350b2d" - integrity sha512-MhmXlJXEJFBFVKrDLB4ZdDzxcBxQ3rLyCkhNqVu3CDYvR97E+8r01UgrI+TI99Le+aYm/in/0vp86guJuM7FCA== - dependencies: - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/hdnode" "^5.7.0" - "@ethersproject/json-wallets" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/signing-key" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/wordlists" "^5.7.0" - -"@ethersproject/web@5.7.1", "@ethersproject/web@^5.7.0": - version "5.7.1" - resolved "https://registry.yarnpkg.com/@ethersproject/web/-/web-5.7.1.tgz#de1f285b373149bee5928f4eb7bcb87ee5fbb4ae" - integrity sha512-Gueu8lSvyjBWL4cYsWsjh6MtMwM0+H4HvqFPZfB6dV8ctbP9zFAO73VG1cMWae0FLPCtz0peKPpZY8/ugJJX2w== - dependencies: - "@ethersproject/base64" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/wordlists@5.7.0", "@ethersproject/wordlists@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/wordlists/-/wordlists-5.7.0.tgz#8fb2c07185d68c3e09eb3bfd6e779ba2774627f5" - integrity sha512-S2TFNJNfHWVHNE6cNDjbVlZ6MgE17MIxMbMg2zv3wn+3XSJGosL1m9ZVv3GXCf/2ymSsQ+hRI5IzoMJTG6aoVA== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@istanbuljs/load-nyc-config@^1.0.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced" - integrity sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ== - dependencies: - camelcase "^5.3.1" - find-up "^4.1.0" - get-package-type "^0.1.0" - js-yaml "^3.13.1" - resolve-from "^5.0.0" - -"@istanbuljs/schema@^0.1.2": - version "0.1.3" - resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98" - integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA== - -"@jest/console@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.0.3.tgz#a222ab87e399317a89db88a58eaec289519e807a" - integrity sha512-cGg0r+klVHSYnfE977S9wmpuQ9L+iYuYgL+5bPXiUlUynLLYunRxswEmhBzvrSKGof5AKiHuTTmUKAqRcDY9dg== - dependencies: - "@jest/types" "^29.0.3" - "@types/node" "*" - chalk "^4.0.0" - jest-message-util "^29.0.3" - jest-util "^29.0.3" - slash "^3.0.0" - -"@jest/core@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.0.3.tgz#ba22a9cbd0c7ba36e04292e2093c547bf53ec1fd" - integrity sha512-1d0hLbOrM1qQE3eP3DtakeMbKTcXiXP3afWxqz103xPyddS2NhnNghS7MaXx1dcDt4/6p4nlhmeILo2ofgi8cQ== - dependencies: - "@jest/console" "^29.0.3" - "@jest/reporters" "^29.0.3" - "@jest/test-result" "^29.0.3" - "@jest/transform" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/node" "*" - ansi-escapes "^4.2.1" - chalk "^4.0.0" - ci-info "^3.2.0" - exit "^0.1.2" - graceful-fs "^4.2.9" - jest-changed-files "^29.0.0" - jest-config "^29.0.3" - jest-haste-map "^29.0.3" - jest-message-util "^29.0.3" - jest-regex-util "^29.0.0" - jest-resolve "^29.0.3" - jest-resolve-dependencies "^29.0.3" - jest-runner "^29.0.3" - jest-runtime "^29.0.3" - jest-snapshot "^29.0.3" - jest-util "^29.0.3" - jest-validate "^29.0.3" - jest-watcher "^29.0.3" - micromatch "^4.0.4" - pretty-format "^29.0.3" - slash "^3.0.0" - strip-ansi "^6.0.0" - -"@jest/environment@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.0.3.tgz#7745ec30a954e828e8cc6df6a13280d3b51d8f35" - integrity sha512-iKl272NKxYNQNqXMQandAIwjhQaGw5uJfGXduu8dS9llHi8jV2ChWrtOAVPnMbaaoDhnI3wgUGNDvZgHeEJQCA== - dependencies: - "@jest/fake-timers" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/node" "*" - jest-mock "^29.0.3" - -"@jest/expect-utils@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.0.3.tgz#f5bb86f5565bf2dacfca31ccbd887684936045b2" - integrity sha512-i1xUkau7K/63MpdwiRqaxgZOjxYs4f0WMTGJnYwUKubsNRZSeQbLorS7+I4uXVF9KQ5r61BUPAUMZ7Lf66l64Q== - dependencies: - jest-get-type "^29.0.0" - -"@jest/expect@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.0.3.tgz#9dc7c46354eeb7a348d73881fba6402f5fdb2c30" - integrity sha512-6W7K+fsI23FQ01H/BWccPyDZFrnU9QlzDcKOjrNVU5L8yUORFAJJIpmyxWPW70+X624KUNqzZwPThPMX28aXEQ== - dependencies: - expect "^29.0.3" - jest-snapshot "^29.0.3" - -"@jest/fake-timers@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.0.3.tgz#ad5432639b715d45a86a75c47fd75019bc36b22c" - integrity sha512-tmbUIo03x0TdtcZCESQ0oQSakPCpo7+s6+9mU19dd71MptkP4zCwoeZqna23//pgbhtT1Wq02VmA9Z9cNtvtCQ== - dependencies: - "@jest/types" "^29.0.3" - "@sinonjs/fake-timers" "^9.1.2" - "@types/node" "*" - jest-message-util "^29.0.3" - jest-mock "^29.0.3" - jest-util "^29.0.3" - -"@jest/globals@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.0.3.tgz#681950c430fdc13ff9aa89b2d8d572ac0e4a1bf5" - integrity sha512-YqGHT65rFY2siPIHHFjuCGUsbzRjdqkwbat+Of6DmYRg5shIXXrLdZoVE/+TJ9O1dsKsFmYhU58JvIbZRU1Z9w== - dependencies: - "@jest/environment" "^29.0.3" - "@jest/expect" "^29.0.3" - "@jest/types" "^29.0.3" - jest-mock "^29.0.3" - -"@jest/reporters@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.0.3.tgz#735f110e08b44b38729d8dbbb74063bdf5aba8a5" - integrity sha512-3+QU3d4aiyOWfmk1obDerie4XNCaD5Xo1IlKNde2yGEi02WQD+ZQD0i5Hgqm1e73sMV7kw6pMlCnprtEwEVwxw== - dependencies: - "@bcoe/v8-coverage" "^0.2.3" - "@jest/console" "^29.0.3" - "@jest/test-result" "^29.0.3" - "@jest/transform" "^29.0.3" - "@jest/types" "^29.0.3" - "@jridgewell/trace-mapping" "^0.3.15" - "@types/node" "*" - chalk "^4.0.0" - collect-v8-coverage "^1.0.0" - exit "^0.1.2" - glob "^7.1.3" - graceful-fs "^4.2.9" - istanbul-lib-coverage "^3.0.0" - istanbul-lib-instrument "^5.1.0" - istanbul-lib-report "^3.0.0" - istanbul-lib-source-maps "^4.0.0" - istanbul-reports "^3.1.3" - jest-message-util "^29.0.3" - jest-util "^29.0.3" - jest-worker "^29.0.3" - slash "^3.0.0" - string-length "^4.0.1" - strip-ansi "^6.0.0" - terminal-link "^2.0.0" - v8-to-istanbul "^9.0.1" - -"@jest/schemas@^29.0.0": - version "29.0.0" - resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.0.0.tgz#5f47f5994dd4ef067fb7b4188ceac45f77fe952a" - integrity sha512-3Ab5HgYIIAnS0HjqJHQYZS+zXc4tUmTmBH3z83ajI6afXp8X3ZtdLX+nXx+I7LNkJD7uN9LAVhgnjDgZa2z0kA== - dependencies: - "@sinclair/typebox" "^0.24.1" - -"@jest/source-map@^29.0.0": - version "29.0.0" - resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.0.0.tgz#f8d1518298089f8ae624e442bbb6eb870ee7783c" - integrity sha512-nOr+0EM8GiHf34mq2GcJyz/gYFyLQ2INDhAylrZJ9mMWoW21mLBfZa0BUVPPMxVYrLjeiRe2Z7kWXOGnS0TFhQ== - dependencies: - "@jridgewell/trace-mapping" "^0.3.15" - callsites "^3.0.0" - graceful-fs "^4.2.9" - -"@jest/test-result@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.0.3.tgz#b03d8ef4c58be84cd5d5d3b24d4b4c8cabbf2746" - integrity sha512-vViVnQjCgTmbhDKEonKJPtcFe9G/CJO4/Np4XwYJah+lF2oI7KKeRp8t1dFvv44wN2NdbDb/qC6pi++Vpp0Dlg== - dependencies: - "@jest/console" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/istanbul-lib-coverage" "^2.0.0" - collect-v8-coverage "^1.0.0" - -"@jest/test-sequencer@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.0.3.tgz#0681061ad21fb8e293b49c4fdf7e631ca79240ba" - integrity sha512-Hf4+xYSWZdxTNnhDykr8JBs0yBN/nxOXyUQWfotBUqqy0LF9vzcFB0jm/EDNZCx587znLWTIgxcokW7WeZMobQ== - dependencies: - "@jest/test-result" "^29.0.3" - graceful-fs "^4.2.9" - jest-haste-map "^29.0.3" - slash "^3.0.0" - -"@jest/transform@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.0.3.tgz#9eb1fed2072a0354f190569807d1250572fb0970" - integrity sha512-C5ihFTRYaGDbi/xbRQRdbo5ddGtI4VSpmL6AIcZxdhwLbXMa7PcXxxqyI91vGOFHnn5aVM3WYnYKCHEqmLVGzg== - dependencies: - "@babel/core" "^7.11.6" - "@jest/types" "^29.0.3" - "@jridgewell/trace-mapping" "^0.3.15" - babel-plugin-istanbul "^6.1.1" - chalk "^4.0.0" - convert-source-map "^1.4.0" - fast-json-stable-stringify "^2.1.0" - graceful-fs "^4.2.9" - jest-haste-map "^29.0.3" - jest-regex-util "^29.0.0" - jest-util "^29.0.3" - micromatch "^4.0.4" - pirates "^4.0.4" - slash "^3.0.0" - write-file-atomic "^4.0.1" - -"@jest/types@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.0.3.tgz#0be78fdddb1a35aeb2041074e55b860561c8ef63" - integrity sha512-coBJmOQvurXjN1Hh5PzF7cmsod0zLIOXpP8KD161mqNlroMhLcwpODiEzi7ZsRl5Z/AIuxpeNm8DCl43F4kz8A== - dependencies: - "@jest/schemas" "^29.0.0" - "@types/istanbul-lib-coverage" "^2.0.0" - "@types/istanbul-reports" "^3.0.0" - "@types/node" "*" - "@types/yargs" "^17.0.8" - chalk "^4.0.0" - -"@jridgewell/gen-mapping@^0.1.0": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz#e5d2e450306a9491e3bd77e323e38d7aff315996" - integrity sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w== - dependencies: - "@jridgewell/set-array" "^1.0.0" - "@jridgewell/sourcemap-codec" "^1.4.10" - -"@jridgewell/gen-mapping@^0.3.2": - version "0.3.2" - resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9" - integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A== - dependencies: - "@jridgewell/set-array" "^1.0.1" - "@jridgewell/sourcemap-codec" "^1.4.10" - "@jridgewell/trace-mapping" "^0.3.9" - -"@jridgewell/resolve-uri@^3.0.3": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78" - integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w== - -"@jridgewell/set-array@^1.0.0", "@jridgewell/set-array@^1.0.1": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" - integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== - -"@jridgewell/sourcemap-codec@^1.4.10": - version "1.4.14" - resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24" - integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw== - -"@jridgewell/trace-mapping@0.3.9": - version "0.3.9" - resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz#6534fd5933a53ba7cbf3a17615e273a0d1273ff9" - integrity sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ== - dependencies: - "@jridgewell/resolve-uri" "^3.0.3" - "@jridgewell/sourcemap-codec" "^1.4.10" - -"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.15", "@jridgewell/trace-mapping@^0.3.9": - version "0.3.15" - resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz#aba35c48a38d3fd84b37e66c9c0423f9744f9774" - integrity sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g== - dependencies: - "@jridgewell/resolve-uri" "^3.0.3" - "@jridgewell/sourcemap-codec" "^1.4.10" - -"@sinclair/typebox@^0.24.1": - version "0.24.42" - resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.24.42.tgz#a74b608d494a1f4cc079738e050142a678813f52" - integrity sha512-d+2AtrHGyWek2u2ITF0lHRIv6Tt7X0dEHW+0rP+5aDCEjC3fiN2RBjrLD0yU0at52BcZbRGxLbAtXiR0hFCjYw== - -"@sinonjs/commons@^1.7.0": - version "1.8.3" - resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.8.3.tgz#3802ddd21a50a949b6721ddd72da36e67e7f1b2d" - integrity sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ== - dependencies: - type-detect "4.0.8" - -"@sinonjs/fake-timers@^9.1.2": - version "9.1.2" - resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-9.1.2.tgz#4eaab737fab77332ab132d396a3c0d364bd0ea8c" - integrity sha512-BPS4ynJW/o92PUR4wgriz2Ud5gpST5vz6GQfMixEDK0Z8ZCUv2M7SkBLykH56T++Xs+8ln9zTGbOvNGIe02/jw== - dependencies: - "@sinonjs/commons" "^1.7.0" - -"@tsconfig/node10@^1.0.7": - version "1.0.9" - resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.9.tgz#df4907fc07a886922637b15e02d4cebc4c0021b2" - integrity sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA== - -"@tsconfig/node12@^1.0.7": - version "1.0.11" - resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.11.tgz#ee3def1f27d9ed66dac6e46a295cffb0152e058d" - integrity sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag== - -"@tsconfig/node14@^1.0.0": - version "1.0.3" - resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.3.tgz#e4386316284f00b98435bf40f72f75a09dabf6c1" - integrity sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow== - -"@tsconfig/node16@^1.0.2": - version "1.0.3" - resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.3.tgz#472eaab5f15c1ffdd7f8628bd4c4f753995ec79e" - integrity sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ== - -"@types/babel__core@^7.1.14": - version "7.1.19" - resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.1.19.tgz#7b497495b7d1b4812bdb9d02804d0576f43ee460" - integrity sha512-WEOTgRsbYkvA/KCsDwVEGkd7WAr1e3g31VHQ8zy5gul/V1qKullU/BU5I68X5v7V3GnB9eotmom4v5a5gjxorw== - dependencies: - "@babel/parser" "^7.1.0" - "@babel/types" "^7.0.0" - "@types/babel__generator" "*" - "@types/babel__template" "*" - "@types/babel__traverse" "*" - -"@types/babel__generator@*": - version "7.6.4" - resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.4.tgz#1f20ce4c5b1990b37900b63f050182d28c2439b7" - integrity sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg== - dependencies: - "@babel/types" "^7.0.0" - -"@types/babel__template@*": - version "7.4.1" - resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.4.1.tgz#3d1a48fd9d6c0edfd56f2ff578daed48f36c8969" - integrity sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g== - dependencies: - "@babel/parser" "^7.1.0" - "@babel/types" "^7.0.0" - -"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6": - version "7.18.1" - resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.18.1.tgz#ce5e2c8c272b99b7a9fd69fa39f0b4cd85028bd9" - integrity sha512-FSdLaZh2UxaMuLp9lixWaHq/golWTRWOnRsAXzDTDSDOQLuZb1nsdCt6pJSPWSEQt2eFZ2YVk3oYhn+1kLMeMA== - dependencies: - "@babel/types" "^7.3.0" - -"@types/bn.js@^4.11.3": - version "4.11.6" - resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-4.11.6.tgz#c306c70d9358aaea33cd4eda092a742b9505967c" - integrity sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg== - dependencies: - "@types/node" "*" - -"@types/graceful-fs@^4.1.3": - version "4.1.5" - resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.5.tgz#21ffba0d98da4350db64891f92a9e5db3cdb4e15" - integrity sha512-anKkLmZZ+xm4p8JWBf4hElkM4XR+EZeA2M9BAkkTldmcyDY4mbdIJnRghDJH3Ov5ooY7/UAoENtmdMSkaAd7Cw== - dependencies: - "@types/node" "*" - -"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1": - version "2.0.4" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz#8467d4b3c087805d63580480890791277ce35c44" - integrity sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g== - -"@types/istanbul-lib-report@*": - version "3.0.0" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#c14c24f18ea8190c118ee7562b7ff99a36552686" - integrity sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg== - dependencies: - "@types/istanbul-lib-coverage" "*" - -"@types/istanbul-reports@^3.0.0": - version "3.0.1" - resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz#9153fe98bba2bd565a63add9436d6f0d7f8468ff" - integrity sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw== - dependencies: - "@types/istanbul-lib-report" "*" - -"@types/jest@^29.0.3": - version "29.0.3" - resolved "https://registry.yarnpkg.com/@types/jest/-/jest-29.0.3.tgz#b61a5ed100850686b8d3c5e28e3a1926b2001b59" - integrity sha512-F6ukyCTwbfsEX5F2YmVYmM5TcTHy1q9P5rWlRbrk56KyMh3v9xRGUO3aa8+SkvMi0SHXtASJv1283enXimC0Og== - dependencies: - expect "^29.0.0" - pretty-format "^29.0.0" - -"@types/node-fetch@^2.5.7": - version "2.6.2" - resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.2.tgz#d1a9c5fd049d9415dce61571557104dec3ec81da" - integrity sha512-DHqhlq5jeESLy19TYhLakJ07kNumXWjcDdxXsLUMJZ6ue8VZJj4kLPQVE/2mdHh3xZziNF1xppu5lwmS53HR+A== - dependencies: - "@types/node" "*" - form-data "^3.0.0" - -"@types/node@*": - version "18.7.18" - resolved "https://registry.yarnpkg.com/@types/node/-/node-18.7.18.tgz#633184f55c322e4fb08612307c274ee6d5ed3154" - integrity sha512-m+6nTEOadJZuTPkKR/SYK3A2d7FZrgElol9UP1Kae90VVU4a6mxnPuLiIW1m4Cq4gZ/nWb9GrdVXJCoCazDAbg== - -"@types/node@^14.14.5": - version "14.18.29" - resolved "https://registry.yarnpkg.com/@types/node/-/node-14.18.29.tgz#a0c58d67a42f8953c13d32f0acda47ed26dfce40" - integrity sha512-LhF+9fbIX4iPzhsRLpK5H7iPdvW8L4IwGciXQIOEcuF62+9nw/VQVsOViAOOGxY3OlOKGLFv0sWwJXdwQeTn6A== - -"@types/pbkdf2@^3.0.0": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@types/pbkdf2/-/pbkdf2-3.1.0.tgz#039a0e9b67da0cdc4ee5dab865caa6b267bb66b1" - integrity sha512-Cf63Rv7jCQ0LaL8tNXmEyqTHuIJxRdlS5vMh1mj5voN4+QFhVZnlZruezqpWYDiJ8UTzhP0VmeLXCmBk66YrMQ== - dependencies: - "@types/node" "*" - -"@types/prettier@^2.1.5": - version "2.7.0" - resolved "https://registry.yarnpkg.com/@types/prettier/-/prettier-2.7.0.tgz#ea03e9f0376a4446f44797ca19d9c46c36e352dc" - integrity sha512-RI1L7N4JnW5gQw2spvL7Sllfuf1SaHdrZpCHiBlCXjIlufi1SMNnbu2teze3/QE67Fg2tBlH7W+mi4hVNk4p0A== - -"@types/secp256k1@^4.0.1": - version "4.0.3" - resolved "https://registry.yarnpkg.com/@types/secp256k1/-/secp256k1-4.0.3.tgz#1b8e55d8e00f08ee7220b4d59a6abe89c37a901c" - integrity sha512-Da66lEIFeIz9ltsdMZcpQvmrmmoqrfju8pm1BH8WbYjZSwUgCwXLb9C+9XYogwBITnbsSaMdVPb2ekf7TV+03w== - dependencies: - "@types/node" "*" - -"@types/stack-utils@^2.0.0": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.1.tgz#20f18294f797f2209b5f65c8e3b5c8e8261d127c" - integrity sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw== - -"@types/yargs-parser@*": - version "21.0.0" - resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.0.tgz#0c60e537fa790f5f9472ed2776c2b71ec117351b" - integrity sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA== - -"@types/yargs@^17.0.8": - version "17.0.12" - resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.12.tgz#0745ff3e4872b4ace98616d4b7e37ccbd75f9526" - integrity sha512-Nz4MPhecOFArtm81gFQvQqdV7XYCrWKx5uUt6GNHredFHn1i2mtWqXTON7EPXMtNi1qjtjEM/VCHDhcHsAMLXQ== - dependencies: - "@types/yargs-parser" "*" - -acorn-walk@^8.1.1: - version "8.2.0" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1" - integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA== - -acorn@^8.4.1: - version "8.8.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.0.tgz#88c0187620435c7f6015803f5539dae05a9dbea8" - integrity sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w== - -aes-js@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.0.0.tgz#e21df10ad6c2053295bcbb8dab40b09dbea87e4d" - integrity sha512-H7wUZRn8WpTq9jocdxQ2c8x2sKo9ZVmzfRE13GiNJXfp7NcKYEdvl3vspKjXox6RIG2VtaRe4JFvxG4rqp2Zuw== - -ansi-escapes@^4.2.1: - version "4.3.2" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" - integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== - dependencies: - type-fest "^0.21.3" - -ansi-regex@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" - integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== - -ansi-styles@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" - integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== - dependencies: - color-convert "^1.9.0" - -ansi-styles@^4.0.0, ansi-styles@^4.1.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" - integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== - dependencies: - color-convert "^2.0.1" - -ansi-styles@^5.0.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" - integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== - -anymatch@^3.0.3: - version "3.1.2" - resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.2.tgz#c0557c096af32f106198f4f4e2a383537e378716" - integrity sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg== - dependencies: - normalize-path "^3.0.0" - picomatch "^2.0.4" - -arg@^4.1.0: - version "4.1.3" - resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" - integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== - -argparse@^1.0.7: - version "1.0.10" - resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" - integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== - dependencies: - sprintf-js "~1.0.2" - -asynckit@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" - integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== - -babel-jest@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.0.3.tgz#64e156a47a77588db6a669a88dedff27ed6e260f" - integrity sha512-ApPyHSOhS/sVzwUOQIWJmdvDhBsMG01HX9z7ogtkp1TToHGGUWFlnXJUIzCgKPSfiYLn3ibipCYzsKSURHEwLg== - dependencies: - "@jest/transform" "^29.0.3" - "@types/babel__core" "^7.1.14" - babel-plugin-istanbul "^6.1.1" - babel-preset-jest "^29.0.2" - chalk "^4.0.0" - graceful-fs "^4.2.9" - slash "^3.0.0" - -babel-plugin-istanbul@^6.1.1: - version "6.1.1" - resolved "https://registry.yarnpkg.com/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz#fa88ec59232fd9b4e36dbbc540a8ec9a9b47da73" - integrity sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@istanbuljs/load-nyc-config" "^1.0.0" - "@istanbuljs/schema" "^0.1.2" - istanbul-lib-instrument "^5.0.4" - test-exclude "^6.0.0" - -babel-plugin-jest-hoist@^29.0.2: - version "29.0.2" - resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.0.2.tgz#ae61483a829a021b146c016c6ad39b8bcc37c2c8" - integrity sha512-eBr2ynAEFjcebVvu8Ktx580BD1QKCrBG1XwEUTXJe285p9HA/4hOhfWCFRQhTKSyBV0VzjhG7H91Eifz9s29hg== - dependencies: - "@babel/template" "^7.3.3" - "@babel/types" "^7.3.3" - "@types/babel__core" "^7.1.14" - "@types/babel__traverse" "^7.0.6" - -babel-preset-current-node-syntax@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz#b4399239b89b2a011f9ddbe3e4f401fc40cff73b" - integrity sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ== - dependencies: - "@babel/plugin-syntax-async-generators" "^7.8.4" - "@babel/plugin-syntax-bigint" "^7.8.3" - "@babel/plugin-syntax-class-properties" "^7.8.3" - "@babel/plugin-syntax-import-meta" "^7.8.3" - "@babel/plugin-syntax-json-strings" "^7.8.3" - "@babel/plugin-syntax-logical-assignment-operators" "^7.8.3" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" - "@babel/plugin-syntax-numeric-separator" "^7.8.3" - "@babel/plugin-syntax-object-rest-spread" "^7.8.3" - "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" - "@babel/plugin-syntax-optional-chaining" "^7.8.3" - "@babel/plugin-syntax-top-level-await" "^7.8.3" - -babel-preset-jest@^29.0.2: - version "29.0.2" - resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.0.2.tgz#e14a7124e22b161551818d89e5bdcfb3b2b0eac7" - integrity sha512-BeVXp7rH5TK96ofyEnHjznjLMQ2nAeDJ+QzxKnHAAMs0RgrQsCywjAN8m4mOm5Di0pxU//3AoEeJJrerMH5UeA== - dependencies: - babel-plugin-jest-hoist "^29.0.2" - babel-preset-current-node-syntax "^1.0.0" - -balanced-match@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" - integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== - -base-x@^3.0.2: - version "3.0.9" - resolved "https://registry.yarnpkg.com/base-x/-/base-x-3.0.9.tgz#6349aaabb58526332de9f60995e548a53fe21320" - integrity sha512-H7JU6iBHTal1gp56aKoaa//YUxEaAOUiydvrV/pILqIHXTtqxSkATOnDA2u+jZ/61sD+L/412+7kzXRtWukhpQ== - dependencies: - safe-buffer "^5.0.1" - -bech32@1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/bech32/-/bech32-1.1.4.tgz#e38c9f37bf179b8eb16ae3a772b40c356d4832e9" - integrity sha512-s0IrSOzLlbvX7yp4WBfPITzpAU8sqQcpsmwXDiKwrG4r491vwCO/XpejasRNl0piBMe/DvP4Tz0mIS/X1DPJBQ== - -blakejs@^1.1.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/blakejs/-/blakejs-1.2.1.tgz#5057e4206eadb4a97f7c0b6e197a505042fc3814" - integrity sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ== - -bn.js@^4.11.0, bn.js@^4.11.8, bn.js@^4.11.9: - version "4.12.0" - resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.12.0.tgz#775b3f278efbb9718eec7361f483fb36fbbfea88" - integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA== - -bn.js@^5.2.0, bn.js@^5.2.1: - version "5.2.1" - resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.2.1.tgz#0bc527a6a0d18d0aa8d5b0538ce4a77dccfa7b70" - integrity sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ== - -brace-expansion@^1.1.7: - version "1.1.11" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== - dependencies: - balanced-match "^1.0.0" - concat-map "0.0.1" - -braces@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== - dependencies: - fill-range "^7.0.1" - -brorand@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" - integrity sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w== - -browserify-aes@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" - integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA== - dependencies: - buffer-xor "^1.0.3" - cipher-base "^1.0.0" - create-hash "^1.1.0" - evp_bytestokey "^1.0.3" - inherits "^2.0.1" - safe-buffer "^5.0.1" - -browserslist@^4.21.3: - version "4.21.4" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.4.tgz#e7496bbc67b9e39dd0f98565feccdcb0d4ff6987" - integrity sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw== - dependencies: - caniuse-lite "^1.0.30001400" - electron-to-chromium "^1.4.251" - node-releases "^2.0.6" - update-browserslist-db "^1.0.9" - -bs-logger@0.x: - version "0.2.6" - resolved "https://registry.yarnpkg.com/bs-logger/-/bs-logger-0.2.6.tgz#eb7d365307a72cf974cc6cda76b68354ad336bd8" - integrity sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog== - dependencies: - fast-json-stable-stringify "2.x" - -bs58@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/bs58/-/bs58-4.0.1.tgz#be161e76c354f6f788ae4071f63f34e8c4f0a42a" - integrity sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw== - dependencies: - base-x "^3.0.2" - -bs58check@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/bs58check/-/bs58check-2.1.2.tgz#53b018291228d82a5aa08e7d796fdafda54aebfc" - integrity sha512-0TS1jicxdU09dwJMNZtVAfzPi6Q6QeN0pM1Fkzrjn+XYHvzMKPU3pHVpva+769iNVSfIYWf7LJ6WR+BuuMf8cA== - dependencies: - bs58 "^4.0.0" - create-hash "^1.1.0" - safe-buffer "^5.1.2" - -bser@2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05" - integrity sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ== - dependencies: - node-int64 "^0.4.0" - -buffer-from@^1.0.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" - integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== - -buffer-xor@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" - integrity sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ== - -callsites@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" - integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== - -camelcase@^5.3.1: - version "5.3.1" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" - integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== - -camelcase@^6.2.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" - integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== - -caniuse-lite@^1.0.30001400: - version "1.0.30001409" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001409.tgz#6135da9dcab34cd9761d9cdb12a68e6740c5e96e" - integrity sha512-V0mnJ5dwarmhYv8/MzhJ//aW68UpvnQBXv8lJ2QUsvn2pHcmAuNtu8hQEDz37XnA1iE+lRR9CIfGWWpgJ5QedQ== - -chalk@^2.0.0: - version "2.4.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" - -chalk@^4.0.0: - version "4.1.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" - integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== - dependencies: - ansi-styles "^4.1.0" - supports-color "^7.1.0" - -char-regex@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf" - integrity sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw== - -ci-info@^3.2.0: - version "3.4.0" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.4.0.tgz#b28484fd436cbc267900364f096c9dc185efb251" - integrity sha512-t5QdPT5jq3o262DOQ8zA6E1tlH2upmUc4Hlvrbx1pGYJuiiHl7O7rvVNI+l8HTVhd/q3Qc9vqimkNk5yiXsAug== - -cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" - integrity sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q== - dependencies: - inherits "^2.0.1" - safe-buffer "^5.0.1" - -cjs-module-lexer@^1.0.0: - version "1.2.2" - resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz#9f84ba3244a512f3a54e5277e8eef4c489864e40" - integrity sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA== - -cliui@^7.0.2: - version "7.0.4" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" - integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ== - dependencies: - string-width "^4.2.0" - strip-ansi "^6.0.0" - wrap-ansi "^7.0.0" - -co@^4.6.0: - version "4.6.0" - resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" - integrity sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ== - -collect-v8-coverage@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.1.tgz#cc2c8e94fc18bbdffe64d6534570c8a673b27f59" - integrity sha512-iBPtljfCNcTKNAto0KEtDfZ3qzjJvqE3aTGZsbhjSBlorqpXJlaWWtPO35D+ZImoC3KWejX64o+yPGxhWSTzfg== - -color-convert@^1.9.0: - version "1.9.3" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" - integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== - dependencies: - color-name "1.1.3" - -color-convert@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" - integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== - dependencies: - color-name "~1.1.4" - -color-name@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" - integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== - -color-name@~1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" - integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== - -combined-stream@^1.0.8: - version "1.0.8" - resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" - integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== - dependencies: - delayed-stream "~1.0.0" - -concat-map@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" - integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== - -convert-source-map@^1.4.0, convert-source-map@^1.6.0, convert-source-map@^1.7.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.8.0.tgz#f3373c32d21b4d780dd8004514684fb791ca4369" - integrity sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA== - dependencies: - safe-buffer "~5.1.1" - -create-hash@^1.1.0, create-hash@^1.1.2, create-hash@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196" - integrity sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg== - dependencies: - cipher-base "^1.0.1" - inherits "^2.0.1" - md5.js "^1.3.4" - ripemd160 "^2.0.1" - sha.js "^2.4.0" - -create-hmac@^1.1.4, create-hmac@^1.1.7: - version "1.1.7" - resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff" - integrity sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg== - dependencies: - cipher-base "^1.0.3" - create-hash "^1.1.0" - inherits "^2.0.1" - ripemd160 "^2.0.0" - safe-buffer "^5.0.1" - sha.js "^2.4.8" - -create-require@^1.1.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" - integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== - -cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== - dependencies: - path-key "^3.1.0" - shebang-command "^2.0.0" - which "^2.0.1" - -debug@^4.1.0, debug@^4.1.1: - version "4.3.4" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" - integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== - dependencies: - ms "2.1.2" - -dedent@^0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/dedent/-/dedent-0.7.0.tgz#2495ddbaf6eb874abb0e1be9df22d2e5a544326c" - integrity sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA== - -deepmerge@^4.2.2: - version "4.2.2" - resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.2.2.tgz#44d2ea3679b8f4d4ffba33f03d865fc1e7bf4955" - integrity sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg== - -delayed-stream@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" - integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== - -detect-newline@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651" - integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA== - -diff-sequences@^29.0.0: - version "29.0.0" - resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.0.0.tgz#bae49972ef3933556bcb0800b72e8579d19d9e4f" - integrity sha512-7Qe/zd1wxSDL4D/X/FPjOMB+ZMDt71W94KYaq05I2l0oQqgXgs7s4ftYYmV38gBSrPz2vcygxfs1xn0FT+rKNA== - -diff@^4.0.1: - version "4.0.2" - resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" - integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== - -electron-to-chromium@^1.4.251: - version "1.4.257" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.257.tgz#895dc73c6bb58d1235dc80879ecbca0bcba96e2c" - integrity sha512-C65sIwHqNnPC2ADMfse/jWTtmhZMII+x6ADI9gENzrOiI7BpxmfKFE84WkIEl5wEg+7+SfIkwChDlsd1Erju2A== - -elliptic@6.5.4, elliptic@^6.5.2, elliptic@^6.5.4: - version "6.5.4" - resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.4.tgz#da37cebd31e79a1367e941b592ed1fbebd58abbb" - integrity sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ== - dependencies: - bn.js "^4.11.9" - brorand "^1.1.0" - hash.js "^1.0.0" - hmac-drbg "^1.0.1" - inherits "^2.0.4" - minimalistic-assert "^1.0.1" - minimalistic-crypto-utils "^1.0.1" - -emittery@^0.10.2: - version "0.10.2" - resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.10.2.tgz#902eec8aedb8c41938c46e9385e9db7e03182933" - integrity sha512-aITqOwnLanpHLNXZJENbOgjUBeHocD+xsSJmNrjovKBW5HbSpW3d1pEls7GFQPUWXiwG9+0P4GtHfEqC/4M0Iw== - -emoji-regex@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" - integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== - -error-ex@^1.3.1: - version "1.3.2" - resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" - integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== - dependencies: - is-arrayish "^0.2.1" - -escalade@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" - integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw== - -escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== - -escape-string-regexp@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz#a30304e99daa32e23b2fd20f51babd07cffca344" - integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w== - -esprima@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" - integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== - -ethereum-cryptography@^0.1.3: - version "0.1.3" - resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-0.1.3.tgz#8d6143cfc3d74bf79bbd8edecdf29e4ae20dd191" - integrity sha512-w8/4x1SGGzc+tO97TASLja6SLd3fRIK2tLVcV2Gx4IB21hE19atll5Cq9o3d0ZmAYC/8aw0ipieTSiekAea4SQ== - dependencies: - "@types/pbkdf2" "^3.0.0" - "@types/secp256k1" "^4.0.1" - blakejs "^1.1.0" - browserify-aes "^1.2.0" - bs58check "^2.1.2" - create-hash "^1.2.0" - create-hmac "^1.1.7" - hash.js "^1.1.7" - keccak "^3.0.0" - pbkdf2 "^3.0.17" - randombytes "^2.1.0" - safe-buffer "^5.1.2" - scrypt-js "^3.0.0" - secp256k1 "^4.0.1" - setimmediate "^1.0.5" - -ethereumjs-abi@^0.6.8: - version "0.6.8" - resolved "https://registry.yarnpkg.com/ethereumjs-abi/-/ethereumjs-abi-0.6.8.tgz#71bc152db099f70e62f108b7cdfca1b362c6fcae" - integrity sha512-Tx0r/iXI6r+lRsdvkFDlut0N08jWMnKRZ6Gkq+Nmw75lZe4e6o3EkSnkaBP5NF6+m5PTGAr9JP43N3LyeoglsA== - dependencies: - bn.js "^4.11.8" - ethereumjs-util "^6.0.0" - -ethereumjs-util@^6.0.0: - version "6.2.1" - resolved "https://registry.yarnpkg.com/ethereumjs-util/-/ethereumjs-util-6.2.1.tgz#fcb4e4dd5ceacb9d2305426ab1a5cd93e3163b69" - integrity sha512-W2Ktez4L01Vexijrm5EB6w7dg4n/TgpoYU4avuT5T3Vmnw/eCRtiBrJfQYS/DCSvDIOLn2k57GcHdeBcgVxAqw== - dependencies: - "@types/bn.js" "^4.11.3" - bn.js "^4.11.0" - create-hash "^1.1.2" - elliptic "^6.5.2" - ethereum-cryptography "^0.1.3" - ethjs-util "0.1.6" - rlp "^2.2.3" - -ethers@~5.7.0: - version "5.7.1" - resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.7.1.tgz#48c83a44900b5f006eb2f65d3ba6277047fd4f33" - integrity sha512-5krze4dRLITX7FpU8J4WscXqADiKmyeNlylmmDLbS95DaZpBhDe2YSwRQwKXWNyXcox7a3gBgm/MkGXV1O1S/Q== - dependencies: - "@ethersproject/abi" "5.7.0" - "@ethersproject/abstract-provider" "5.7.0" - "@ethersproject/abstract-signer" "5.7.0" - "@ethersproject/address" "5.7.0" - "@ethersproject/base64" "5.7.0" - "@ethersproject/basex" "5.7.0" - "@ethersproject/bignumber" "5.7.0" - "@ethersproject/bytes" "5.7.0" - "@ethersproject/constants" "5.7.0" - "@ethersproject/contracts" "5.7.0" - "@ethersproject/hash" "5.7.0" - "@ethersproject/hdnode" "5.7.0" - "@ethersproject/json-wallets" "5.7.0" - "@ethersproject/keccak256" "5.7.0" - "@ethersproject/logger" "5.7.0" - "@ethersproject/networks" "5.7.1" - "@ethersproject/pbkdf2" "5.7.0" - "@ethersproject/properties" "5.7.0" - "@ethersproject/providers" "5.7.1" - "@ethersproject/random" "5.7.0" - "@ethersproject/rlp" "5.7.0" - "@ethersproject/sha2" "5.7.0" - "@ethersproject/signing-key" "5.7.0" - "@ethersproject/solidity" "5.7.0" - "@ethersproject/strings" "5.7.0" - "@ethersproject/transactions" "5.7.0" - "@ethersproject/units" "5.7.0" - "@ethersproject/wallet" "5.7.0" - "@ethersproject/web" "5.7.1" - "@ethersproject/wordlists" "5.7.0" - -ethjs-util@0.1.6: - version "0.1.6" - resolved "https://registry.yarnpkg.com/ethjs-util/-/ethjs-util-0.1.6.tgz#f308b62f185f9fe6237132fb2a9818866a5cd536" - integrity sha512-CUnVOQq7gSpDHZVVrQW8ExxUETWrnrvXYvYz55wOU8Uj4VCgw56XC2B/fVqQN+f7gmrnRHSLVnFAwsCuNwji8w== - dependencies: - is-hex-prefixed "1.0.0" - strip-hex-prefix "1.0.0" - -evp_bytestokey@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" - integrity sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA== - dependencies: - md5.js "^1.3.4" - safe-buffer "^5.1.1" - -execa@^5.0.0: - version "5.1.1" - resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" - integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg== - dependencies: - cross-spawn "^7.0.3" - get-stream "^6.0.0" - human-signals "^2.1.0" - is-stream "^2.0.0" - merge-stream "^2.0.0" - npm-run-path "^4.0.1" - onetime "^5.1.2" - signal-exit "^3.0.3" - strip-final-newline "^2.0.0" - -exit@^0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" - integrity sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ== - -expect@^29.0.0, expect@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/expect/-/expect-29.0.3.tgz#6be65ddb945202f143c4e07c083f4f39f3bd326f" - integrity sha512-t8l5DTws3212VbmPL+tBFXhjRHLmctHB0oQbL8eUc6S7NzZtYUhycrFO9mkxA0ZUC6FAWdNi7JchJSkODtcu1Q== - dependencies: - "@jest/expect-utils" "^29.0.3" - jest-get-type "^29.0.0" - jest-matcher-utils "^29.0.3" - jest-message-util "^29.0.3" - jest-util "^29.0.3" - -fast-json-stable-stringify@2.x, fast-json-stable-stringify@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" - integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== - -fb-watchman@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.2.tgz#e9524ee6b5c77e9e5001af0f85f3adbb8623255c" - integrity sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA== - dependencies: - bser "2.1.1" - -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== - dependencies: - to-regex-range "^5.0.1" - -find-up@^4.0.0, find-up@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" - integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== - dependencies: - locate-path "^5.0.0" - path-exists "^4.0.0" - -form-data@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.1.tgz#ebd53791b78356a99af9a300d4282c4d5eb9755f" - integrity sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg== - dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.8" - mime-types "^2.1.12" - -fs.realpath@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" - integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== - -fsevents@^2.3.2: - version "2.3.2" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" - integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== - -function-bind@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" - integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== - -gensync@^1.0.0-beta.2: - version "1.0.0-beta.2" - resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" - integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== - -get-caller-file@^2.0.5: - version "2.0.5" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" - integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== - -get-package-type@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" - integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== - -get-stream@^6.0.0: - version "6.0.1" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" - integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== - -glob@^7.1.3, glob@^7.1.4: - version "7.2.3" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" - integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^3.1.1" - once "^1.3.0" - path-is-absolute "^1.0.0" - -globals@^11.1.0: - version "11.12.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" - integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== - -graceful-fs@^4.2.9: - version "4.2.10" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c" - integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA== - -has-flag@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" - integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== - -has-flag@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" - integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== - -has@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" - integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== - dependencies: - function-bind "^1.1.1" - -hash-base@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.1.0.tgz#55c381d9e06e1d2997a883b4a3fddfe7f0d3af33" - integrity sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA== - dependencies: - inherits "^2.0.4" - readable-stream "^3.6.0" - safe-buffer "^5.2.0" - -hash.js@1.1.7, hash.js@^1.0.0, hash.js@^1.0.3, hash.js@^1.1.7: - version "1.1.7" - resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42" - integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA== - dependencies: - inherits "^2.0.3" - minimalistic-assert "^1.0.1" - -hmac-drbg@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" - integrity sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg== - dependencies: - hash.js "^1.0.3" - minimalistic-assert "^1.0.0" - minimalistic-crypto-utils "^1.0.1" - -html-escaper@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" - integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg== - -human-signals@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" - integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== - -import-local@^3.0.2: - version "3.1.0" - resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.1.0.tgz#b4479df8a5fd44f6cdce24070675676063c95cb4" - integrity sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg== - dependencies: - pkg-dir "^4.2.0" - resolve-cwd "^3.0.0" - -imurmurhash@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" - integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== - -inflight@^1.0.4: - version "1.0.6" - resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" - integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== - dependencies: - once "^1.3.0" - wrappy "1" - -inherits@2, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" - integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== - -is-arrayish@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" - integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== - -is-core-module@^2.9.0: - version "2.10.0" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.10.0.tgz#9012ede0a91c69587e647514e1d5277019e728ed" - integrity sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg== - dependencies: - has "^1.0.3" - -is-fullwidth-code-point@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" - integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== - -is-generator-fn@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118" - integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ== - -is-hex-prefixed@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-hex-prefixed/-/is-hex-prefixed-1.0.0.tgz#7d8d37e6ad77e5d127148913c573e082d777f554" - integrity sha512-WvtOiug1VFrE9v1Cydwm+FnXd3+w9GaeVUss5W4v/SLy3UW00vP+6iNF2SdnfiBoLy4bTqVdkftNGTUeOFVsbA== - -is-number@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" - integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== - -is-stream@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" - integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== - -isexe@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" - integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== - -istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz#189e7909d0a39fa5a3dfad5b03f71947770191d3" - integrity sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw== - -istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.0.tgz#31d18bdd127f825dd02ea7bfdfd906f8ab840e9f" - integrity sha512-6Lthe1hqXHBNsqvgDzGO6l03XNeu3CrG4RqQ1KM9+l5+jNGpEJfIELx1NS3SEHmJQA8np/u+E4EPRKRiu6m19A== - dependencies: - "@babel/core" "^7.12.3" - "@babel/parser" "^7.14.7" - "@istanbuljs/schema" "^0.1.2" - istanbul-lib-coverage "^3.2.0" - semver "^6.3.0" - -istanbul-lib-report@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#7518fe52ea44de372f460a76b5ecda9ffb73d8a6" - integrity sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw== - dependencies: - istanbul-lib-coverage "^3.0.0" - make-dir "^3.0.0" - supports-color "^7.1.0" - -istanbul-lib-source-maps@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz#895f3a709fcfba34c6de5a42939022f3e4358551" - integrity sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw== - dependencies: - debug "^4.1.1" - istanbul-lib-coverage "^3.0.0" - source-map "^0.6.1" - -istanbul-reports@^3.1.3: - version "3.1.5" - resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.1.5.tgz#cc9a6ab25cb25659810e4785ed9d9fb742578bae" - integrity sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w== - dependencies: - html-escaper "^2.0.0" - istanbul-lib-report "^3.0.0" - -jest-changed-files@^29.0.0: - version "29.0.0" - resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.0.0.tgz#aa238eae42d9372a413dd9a8dadc91ca1806dce0" - integrity sha512-28/iDMDrUpGoCitTURuDqUzWQoWmOmOKOFST1mi2lwh62X4BFf6khgH3uSuo1e49X/UDjuApAj3w0wLOex4VPQ== - dependencies: - execa "^5.0.0" - p-limit "^3.1.0" - -jest-circus@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.0.3.tgz#90faebc90295291cfc636b27dbd82e3bfb9e7a48" - integrity sha512-QeGzagC6Hw5pP+df1+aoF8+FBSgkPmraC1UdkeunWh0jmrp7wC0Hr6umdUAOELBQmxtKAOMNC3KAdjmCds92Zg== - dependencies: - "@jest/environment" "^29.0.3" - "@jest/expect" "^29.0.3" - "@jest/test-result" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/node" "*" - chalk "^4.0.0" - co "^4.6.0" - dedent "^0.7.0" - is-generator-fn "^2.0.0" - jest-each "^29.0.3" - jest-matcher-utils "^29.0.3" - jest-message-util "^29.0.3" - jest-runtime "^29.0.3" - jest-snapshot "^29.0.3" - jest-util "^29.0.3" - p-limit "^3.1.0" - pretty-format "^29.0.3" - slash "^3.0.0" - stack-utils "^2.0.3" - -jest-cli@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.0.3.tgz#fd8f0ef363a7a3d9c53ef62e0651f18eeffa77b9" - integrity sha512-aUy9Gd/Kut1z80eBzG10jAn6BgS3BoBbXyv+uXEqBJ8wnnuZ5RpNfARoskSrTIy1GY4a8f32YGuCMwibtkl9CQ== - dependencies: - "@jest/core" "^29.0.3" - "@jest/test-result" "^29.0.3" - "@jest/types" "^29.0.3" - chalk "^4.0.0" - exit "^0.1.2" - graceful-fs "^4.2.9" - import-local "^3.0.2" - jest-config "^29.0.3" - jest-util "^29.0.3" - jest-validate "^29.0.3" - prompts "^2.0.1" - yargs "^17.3.1" - -jest-config@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.0.3.tgz#c2e52a8f5adbd18de79f99532d8332a19e232f13" - integrity sha512-U5qkc82HHVYe3fNu2CRXLN4g761Na26rWKf7CjM8LlZB3In1jadEkZdMwsE37rd9RSPV0NfYaCjHdk/gu3v+Ew== - dependencies: - "@babel/core" "^7.11.6" - "@jest/test-sequencer" "^29.0.3" - "@jest/types" "^29.0.3" - babel-jest "^29.0.3" - chalk "^4.0.0" - ci-info "^3.2.0" - deepmerge "^4.2.2" - glob "^7.1.3" - graceful-fs "^4.2.9" - jest-circus "^29.0.3" - jest-environment-node "^29.0.3" - jest-get-type "^29.0.0" - jest-regex-util "^29.0.0" - jest-resolve "^29.0.3" - jest-runner "^29.0.3" - jest-util "^29.0.3" - jest-validate "^29.0.3" - micromatch "^4.0.4" - parse-json "^5.2.0" - pretty-format "^29.0.3" - slash "^3.0.0" - strip-json-comments "^3.1.1" - -jest-diff@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.0.3.tgz#41cc02409ad1458ae1bf7684129a3da2856341ac" - integrity sha512-+X/AIF5G/vX9fWK+Db9bi9BQas7M9oBME7egU7psbn4jlszLFCu0dW63UgeE6cs/GANq4fLaT+8sGHQQ0eCUfg== - dependencies: - chalk "^4.0.0" - diff-sequences "^29.0.0" - jest-get-type "^29.0.0" - pretty-format "^29.0.3" - -jest-docblock@^29.0.0: - version "29.0.0" - resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.0.0.tgz#3151bcc45ed7f5a8af4884dcc049aee699b4ceae" - integrity sha512-s5Kpra/kLzbqu9dEjov30kj1n4tfu3e7Pl8v+f8jOkeWNqM6Ds8jRaJfZow3ducoQUrf2Z4rs2N5S3zXnb83gw== - dependencies: - detect-newline "^3.0.0" - -jest-each@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.0.3.tgz#7ef3157580b15a609d7ef663dd4fc9b07f4e1299" - integrity sha512-wILhZfESURHHBNvPMJ0lZlYZrvOQJxAo3wNHi+ycr90V7M+uGR9Gh4+4a/BmaZF0XTyZsk4OiYEf3GJN7Ltqzg== - dependencies: - "@jest/types" "^29.0.3" - chalk "^4.0.0" - jest-get-type "^29.0.0" - jest-util "^29.0.3" - pretty-format "^29.0.3" - -jest-environment-node@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.0.3.tgz#293804b1e0fa5f0e354dacbe510655caa478a3b2" - integrity sha512-cdZqRCnmIlTXC+9vtvmfiY/40Cj6s2T0czXuq1whvQdmpzAnj4sbqVYuZ4zFHk766xTTJ+Ij3uUqkk8KCfXoyg== - dependencies: - "@jest/environment" "^29.0.3" - "@jest/fake-timers" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/node" "*" - jest-mock "^29.0.3" - jest-util "^29.0.3" - -jest-get-type@^29.0.0: - version "29.0.0" - resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.0.0.tgz#843f6c50a1b778f7325df1129a0fd7aa713aef80" - integrity sha512-83X19z/HuLKYXYHskZlBAShO7UfLFXu/vWajw9ZNJASN32li8yHMaVGAQqxFW1RCFOkB7cubaL6FaJVQqqJLSw== - -jest-haste-map@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.0.3.tgz#d7f3f7180f558d760eacc5184aac5a67f20ef939" - integrity sha512-uMqR99+GuBHo0RjRhOE4iA6LmsxEwRdgiIAQgMU/wdT2XebsLDz5obIwLZm/Psj+GwSEQhw9AfAVKGYbh2G55A== - dependencies: - "@jest/types" "^29.0.3" - "@types/graceful-fs" "^4.1.3" - "@types/node" "*" - anymatch "^3.0.3" - fb-watchman "^2.0.0" - graceful-fs "^4.2.9" - jest-regex-util "^29.0.0" - jest-util "^29.0.3" - jest-worker "^29.0.3" - micromatch "^4.0.4" - walker "^1.0.8" - optionalDependencies: - fsevents "^2.3.2" - -jest-leak-detector@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.0.3.tgz#e85cf3391106a7a250850b6766b508bfe9c7bc6f" - integrity sha512-YfW/G63dAuiuQ3QmQlh8hnqLDe25WFY3eQhuc/Ev1AGmkw5zREblTh7TCSKLoheyggu6G9gxO2hY8p9o6xbaRQ== - dependencies: - jest-get-type "^29.0.0" - pretty-format "^29.0.3" - -jest-matcher-utils@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.0.3.tgz#b8305fd3f9e27cdbc210b21fc7dbba92d4e54560" - integrity sha512-RsR1+cZ6p1hDV4GSCQTg+9qjeotQCgkaleIKLK7dm+U4V/H2bWedU3RAtLm8+mANzZ7eDV33dMar4pejd7047w== - dependencies: - chalk "^4.0.0" - jest-diff "^29.0.3" - jest-get-type "^29.0.0" - pretty-format "^29.0.3" - -jest-message-util@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.0.3.tgz#f0254e1ffad21890c78355726202cc91d0a40ea8" - integrity sha512-7T8JiUTtDfppojosORAflABfLsLKMLkBHSWkjNQrjIltGoDzNGn7wEPOSfjqYAGTYME65esQzMJxGDjuLBKdOg== - dependencies: - "@babel/code-frame" "^7.12.13" - "@jest/types" "^29.0.3" - "@types/stack-utils" "^2.0.0" - chalk "^4.0.0" - graceful-fs "^4.2.9" - micromatch "^4.0.4" - pretty-format "^29.0.3" - slash "^3.0.0" - stack-utils "^2.0.3" - -jest-mock@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.0.3.tgz#4f0093f6a9cb2ffdb9c44a07a3912f0c098c8de9" - integrity sha512-ort9pYowltbcrCVR43wdlqfAiFJXBx8l4uJDsD8U72LgBcetvEp+Qxj1W9ZYgMRoeAo+ov5cnAGF2B6+Oth+ww== - dependencies: - "@jest/types" "^29.0.3" - "@types/node" "*" - -jest-pnp-resolver@^1.2.2: - version "1.2.2" - resolved "https://registry.yarnpkg.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.2.tgz#b704ac0ae028a89108a4d040b3f919dfddc8e33c" - integrity sha512-olV41bKSMm8BdnuMsewT4jqlZ8+3TCARAXjZGT9jcoSnrfUnRCqnMoF9XEeoWjbzObpqF9dRhHQj0Xb9QdF6/w== - -jest-regex-util@^29.0.0: - version "29.0.0" - resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.0.0.tgz#b442987f688289df8eb6c16fa8df488b4cd007de" - integrity sha512-BV7VW7Sy0fInHWN93MMPtlClweYv2qrSCwfeFWmpribGZtQPWNvRSq9XOVgOEjU1iBGRKXUZil0o2AH7Iy9Lug== - -jest-resolve-dependencies@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.0.3.tgz#f23a54295efc6374b86b198cf8efed5606d6b762" - integrity sha512-KzuBnXqNvbuCdoJpv8EanbIGObk7vUBNt/PwQPPx2aMhlv/jaXpUJsqWYRpP/0a50faMBY7WFFP8S3/CCzwfDw== - dependencies: - jest-regex-util "^29.0.0" - jest-snapshot "^29.0.3" - -jest-resolve@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.0.3.tgz#329a3431e3b9eb6629a2cd483e9bed95b26827b9" - integrity sha512-toVkia85Y/BPAjJasTC9zIPY6MmVXQPtrCk8SmiheC4MwVFE/CMFlOtMN6jrwPMC6TtNh8+sTMllasFeu1wMPg== - dependencies: - chalk "^4.0.0" - graceful-fs "^4.2.9" - jest-haste-map "^29.0.3" - jest-pnp-resolver "^1.2.2" - jest-util "^29.0.3" - jest-validate "^29.0.3" - resolve "^1.20.0" - resolve.exports "^1.1.0" - slash "^3.0.0" - -jest-runner@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.0.3.tgz#2e47fe1e8777aea9b8970f37e8f83630b508fb87" - integrity sha512-Usu6VlTOZlCZoNuh3b2Tv/yzDpKqtiNAetG9t3kJuHfUyVMNW7ipCCJOUojzKkjPoaN7Bl1f7Buu6PE0sGpQxw== - dependencies: - "@jest/console" "^29.0.3" - "@jest/environment" "^29.0.3" - "@jest/test-result" "^29.0.3" - "@jest/transform" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/node" "*" - chalk "^4.0.0" - emittery "^0.10.2" - graceful-fs "^4.2.9" - jest-docblock "^29.0.0" - jest-environment-node "^29.0.3" - jest-haste-map "^29.0.3" - jest-leak-detector "^29.0.3" - jest-message-util "^29.0.3" - jest-resolve "^29.0.3" - jest-runtime "^29.0.3" - jest-util "^29.0.3" - jest-watcher "^29.0.3" - jest-worker "^29.0.3" - p-limit "^3.1.0" - source-map-support "0.5.13" - -jest-runtime@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.0.3.tgz#5a823ec5902257519556a4e5a71a868e8fd788aa" - integrity sha512-12gZXRQ7ozEeEHKTY45a+YLqzNDR/x4c//X6AqwKwKJPpWM8FY4vwn4VQJOcLRS3Nd1fWwgP7LU4SoynhuUMHQ== - dependencies: - "@jest/environment" "^29.0.3" - "@jest/fake-timers" "^29.0.3" - "@jest/globals" "^29.0.3" - "@jest/source-map" "^29.0.0" - "@jest/test-result" "^29.0.3" - "@jest/transform" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/node" "*" - chalk "^4.0.0" - cjs-module-lexer "^1.0.0" - collect-v8-coverage "^1.0.0" - glob "^7.1.3" - graceful-fs "^4.2.9" - jest-haste-map "^29.0.3" - jest-message-util "^29.0.3" - jest-mock "^29.0.3" - jest-regex-util "^29.0.0" - jest-resolve "^29.0.3" - jest-snapshot "^29.0.3" - jest-util "^29.0.3" - slash "^3.0.0" - strip-bom "^4.0.0" - -jest-snapshot@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.0.3.tgz#0a024706986a915a6eefae74d7343069d2fc8eef" - integrity sha512-52q6JChm04U3deq+mkQ7R/7uy7YyfVIrebMi6ZkBoDJ85yEjm/sJwdr1P0LOIEHmpyLlXrxy3QP0Zf5J2kj0ew== - dependencies: - "@babel/core" "^7.11.6" - "@babel/generator" "^7.7.2" - "@babel/plugin-syntax-jsx" "^7.7.2" - "@babel/plugin-syntax-typescript" "^7.7.2" - "@babel/traverse" "^7.7.2" - "@babel/types" "^7.3.3" - "@jest/expect-utils" "^29.0.3" - "@jest/transform" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/babel__traverse" "^7.0.6" - "@types/prettier" "^2.1.5" - babel-preset-current-node-syntax "^1.0.0" - chalk "^4.0.0" - expect "^29.0.3" - graceful-fs "^4.2.9" - jest-diff "^29.0.3" - jest-get-type "^29.0.0" - jest-haste-map "^29.0.3" - jest-matcher-utils "^29.0.3" - jest-message-util "^29.0.3" - jest-util "^29.0.3" - natural-compare "^1.4.0" - pretty-format "^29.0.3" - semver "^7.3.5" - -jest-util@^29.0.0, jest-util@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.0.3.tgz#06d1d77f9a1bea380f121897d78695902959fbc0" - integrity sha512-Q0xaG3YRG8QiTC4R6fHjHQPaPpz9pJBEi0AeOE4mQh/FuWOijFjGXMMOfQEaU9i3z76cNR7FobZZUQnL6IyfdQ== - dependencies: - "@jest/types" "^29.0.3" - "@types/node" "*" - chalk "^4.0.0" - ci-info "^3.2.0" - graceful-fs "^4.2.9" - picomatch "^2.2.3" - -jest-validate@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.0.3.tgz#f9521581d7344685428afa0a4d110e9c519aeeb6" - integrity sha512-OebiqqT6lK8cbMPtrSoS3aZP4juID762lZvpf1u+smZnwTEBCBInan0GAIIhv36MxGaJvmq5uJm7dl5gVt+Zrw== - dependencies: - "@jest/types" "^29.0.3" - camelcase "^6.2.0" - chalk "^4.0.0" - jest-get-type "^29.0.0" - leven "^3.1.0" - pretty-format "^29.0.3" - -jest-watcher@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.0.3.tgz#8e220d1cc4f8029875e82015d084cab20f33d57f" - integrity sha512-tQX9lU91A+9tyUQKUMp0Ns8xAcdhC9fo73eqA3LFxP2bSgiF49TNcc+vf3qgGYYK9qRjFpXW9+4RgF/mbxyOOw== - dependencies: - "@jest/test-result" "^29.0.3" - "@jest/types" "^29.0.3" - "@types/node" "*" - ansi-escapes "^4.2.1" - chalk "^4.0.0" - emittery "^0.10.2" - jest-util "^29.0.3" - string-length "^4.0.1" - -jest-worker@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.0.3.tgz#c2ba0aa7e41eec9eb0be8e8a322ae6518df72647" - integrity sha512-Tl/YWUugQOjoTYwjKdfJWkSOfhufJHO5LhXTSZC3TRoQKO+fuXnZAdoXXBlpLXKGODBL3OvdUasfDD4PcMe6ng== - dependencies: - "@types/node" "*" - merge-stream "^2.0.0" - supports-color "^8.0.0" - -jest@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/jest/-/jest-29.0.3.tgz#5227a0596d30791b2649eea347e4aa97f734944d" - integrity sha512-ElgUtJBLgXM1E8L6K1RW1T96R897YY/3lRYqq9uVcPWtP2AAl/nQ16IYDh/FzQOOQ12VEuLdcPU83mbhG2C3PQ== - dependencies: - "@jest/core" "^29.0.3" - "@jest/types" "^29.0.3" - import-local "^3.0.2" - jest-cli "^29.0.3" - -js-sha3@0.8.0: - version "0.8.0" - resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.8.0.tgz#b9b7a5da73afad7dedd0f8c463954cbde6818840" - integrity sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q== - -js-tokens@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" - integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== - -js-yaml@^3.13.1: - version "3.14.1" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" - integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== - dependencies: - argparse "^1.0.7" - esprima "^4.0.0" - -jsesc@^2.5.1: - version "2.5.2" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" - integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== - -json-parse-even-better-errors@^2.3.0: - version "2.3.1" - resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" - integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== - -json5@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.1.tgz#655d50ed1e6f95ad1a3caababd2b0efda10b395c" - integrity sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA== - -keccak@^3.0.0: - version "3.0.2" - resolved "https://registry.yarnpkg.com/keccak/-/keccak-3.0.2.tgz#4c2c6e8c54e04f2670ee49fa734eb9da152206e0" - integrity sha512-PyKKjkH53wDMLGrvmRGSNWgmSxZOUqbnXwKL9tmgbFYA1iAYqW21kfR7mZXV0MlESiefxQQE9X9fTa3X+2MPDQ== - dependencies: - node-addon-api "^2.0.0" - node-gyp-build "^4.2.0" - readable-stream "^3.6.0" - -kleur@^3.0.3: - version "3.0.3" - resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" - integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== - -leven@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" - integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A== - -lines-and-columns@^1.1.6: - version "1.2.4" - resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" - integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== - -locate-path@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" - integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== - dependencies: - p-locate "^4.1.0" - -lodash.memoize@4.x: - version "4.1.2" - resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" - integrity sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag== - -lru-cache@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" - integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== - dependencies: - yallist "^4.0.0" - -make-dir@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" - integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== - dependencies: - semver "^6.0.0" - -make-error@1.x, make-error@^1.1.1: - version "1.3.6" - resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" - integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== - -makeerror@1.0.12: - version "1.0.12" - resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.12.tgz#3e5dd2079a82e812e983cc6610c4a2cb0eaa801a" - integrity sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg== - dependencies: - tmpl "1.0.5" - -md5.js@^1.3.4: - version "1.3.5" - resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f" - integrity sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg== - dependencies: - hash-base "^3.0.0" - inherits "^2.0.1" - safe-buffer "^5.1.2" - -merge-stream@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" - integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== - -micromatch@^4.0.4: - version "4.0.5" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" - integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== - dependencies: - braces "^3.0.2" - picomatch "^2.3.1" - -mime-db@1.52.0: - version "1.52.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" - integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== - -mime-types@^2.1.12: - version "2.1.35" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" - integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== - dependencies: - mime-db "1.52.0" - -mimic-fn@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" - integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== - -minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" - integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== - -minimalistic-crypto-utils@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" - integrity sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg== - -minimatch@^3.0.4, minimatch@^3.1.1: - version "3.1.2" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" - integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== - dependencies: - brace-expansion "^1.1.7" - -ms@2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" - integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== - -natural-compare@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" - integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== - -node-addon-api@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-2.0.2.tgz#432cfa82962ce494b132e9d72a15b29f71ff5d32" - integrity sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA== - -node-fetch@^2.6.1: - version "2.6.7" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.7.tgz#24de9fba827e3b4ae44dc8b20256a379160052ad" - integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ== - dependencies: - whatwg-url "^5.0.0" - -node-gyp-build@^4.2.0: - version "4.5.0" - resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.5.0.tgz#7a64eefa0b21112f89f58379da128ac177f20e40" - integrity sha512-2iGbaQBV+ITgCz76ZEjmhUKAKVf7xfY1sRl4UiKQspfZMH2h06SyhNsnSVy50cwkFQDGLyif6m/6uFXHkOZ6rg== - -node-int64@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b" - integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw== - -node-releases@^2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.6.tgz#8a7088c63a55e493845683ebf3c828d8c51c5503" - integrity sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg== - -normalize-path@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" - integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== - -npm-run-path@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" - integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== - dependencies: - path-key "^3.0.0" - -once@^1.3.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" - integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== - dependencies: - wrappy "1" - -onetime@^5.1.2: - version "5.1.2" - resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" - integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== - dependencies: - mimic-fn "^2.1.0" - -p-limit@^2.2.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" - integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== - dependencies: - p-try "^2.0.0" - -p-limit@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" - integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== - dependencies: - yocto-queue "^0.1.0" - -p-locate@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" - integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== - dependencies: - p-limit "^2.2.0" - -p-try@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" - integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== - -parse-json@^5.2.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" - integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== - dependencies: - "@babel/code-frame" "^7.0.0" - error-ex "^1.3.1" - json-parse-even-better-errors "^2.3.0" - lines-and-columns "^1.1.6" - -path-exists@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" - integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== - -path-is-absolute@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" - integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg== - -path-key@^3.0.0, path-key@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" - integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== - -path-parse@^1.0.7: - version "1.0.7" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" - integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== - -pbkdf2@^3.0.17: - version "3.1.2" - resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.1.2.tgz#dd822aa0887580e52f1a039dc3eda108efae3075" - integrity sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA== - dependencies: - create-hash "^1.1.2" - create-hmac "^1.1.4" - ripemd160 "^2.0.1" - safe-buffer "^5.0.1" - sha.js "^2.4.8" - -picocolors@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" - integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== - -picomatch@^2.0.4, picomatch@^2.2.3, picomatch@^2.3.1: - version "2.3.1" - resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" - integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== - -pirates@^4.0.4: - version "4.0.5" - resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.5.tgz#feec352ea5c3268fb23a37c702ab1699f35a5f3b" - integrity sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ== - -pkg-dir@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" - integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== - dependencies: - find-up "^4.0.0" - -pretty-format@^29.0.0, pretty-format@^29.0.3: - version "29.0.3" - resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.0.3.tgz#23d5f8cabc9cbf209a77d49409d093d61166a811" - integrity sha512-cHudsvQr1K5vNVLbvYF/nv3Qy/F/BcEKxGuIeMiVMRHxPOO1RxXooP8g/ZrwAp7Dx+KdMZoOc7NxLHhMrP2f9Q== - dependencies: - "@jest/schemas" "^29.0.0" - ansi-styles "^5.0.0" - react-is "^18.0.0" - -prompts@^2.0.1: - version "2.4.2" - resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" - integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q== - dependencies: - kleur "^3.0.3" - sisteransi "^1.0.5" - -randombytes@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" - integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== - dependencies: - safe-buffer "^5.1.0" - -react-is@^18.0.0: - version "18.2.0" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.2.0.tgz#199431eeaaa2e09f86427efbb4f1473edb47609b" - integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w== - -readable-stream@^3.6.0: - version "3.6.0" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" - integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== - dependencies: - inherits "^2.0.3" - string_decoder "^1.1.1" - util-deprecate "^1.0.1" - -require-directory@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" - integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q== - -resolve-cwd@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-3.0.0.tgz#0f0075f1bb2544766cf73ba6a6e2adfebcb13f2d" - integrity sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg== - dependencies: - resolve-from "^5.0.0" - -resolve-from@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69" - integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw== - -resolve.exports@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-1.1.0.tgz#5ce842b94b05146c0e03076985d1d0e7e48c90c9" - integrity sha512-J1l+Zxxp4XK3LUDZ9m60LRJF/mAe4z6a4xyabPHk7pvK5t35dACV32iIjJDFeWZFfZlO29w6SZ67knR0tHzJtQ== - -resolve@^1.20.0: - version "1.22.1" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.1.tgz#27cb2ebb53f91abb49470a928bba7558066ac177" - integrity sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw== - dependencies: - is-core-module "^2.9.0" - path-parse "^1.0.7" - supports-preserve-symlinks-flag "^1.0.0" - -ripemd160@^2.0.0, ripemd160@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c" - integrity sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA== - dependencies: - hash-base "^3.0.0" - inherits "^2.0.1" - -rlp@^2.2.3: - version "2.2.7" - resolved "https://registry.yarnpkg.com/rlp/-/rlp-2.2.7.tgz#33f31c4afac81124ac4b283e2bd4d9720b30beaf" - integrity sha512-d5gdPmgQ0Z+AklL2NVXr/IoSjNZFfTVvQWzL/AM2AOcSzYP2xjlb0AC8YyCLc41MSNf6P6QVtjgPdmVtzb+4lQ== - dependencies: - bn.js "^5.2.0" - -safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@~5.2.0: - version "5.2.1" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" - integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== - -safe-buffer@~5.1.1: - version "5.1.2" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== - -scrypt-js@3.0.1, scrypt-js@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-3.0.1.tgz#d314a57c2aef69d1ad98a138a21fe9eafa9ee312" - integrity sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA== - -secp256k1@^4.0.1: - version "4.0.3" - resolved "https://registry.yarnpkg.com/secp256k1/-/secp256k1-4.0.3.tgz#c4559ecd1b8d3c1827ed2d1b94190d69ce267303" - integrity sha512-NLZVf+ROMxwtEj3Xa562qgv2BK5e2WNmXPiOdVIPLgs6lyTzMvBq0aWTYMI5XCP9jZMVKOcqZLw/Wc4vDkuxhA== - dependencies: - elliptic "^6.5.4" - node-addon-api "^2.0.0" - node-gyp-build "^4.2.0" - -semver@7.x, semver@^7.3.5: - version "7.3.7" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.7.tgz#12c5b649afdbf9049707796e22a4028814ce523f" - integrity sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g== - dependencies: - lru-cache "^6.0.0" - -semver@^6.0.0, semver@^6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" - integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== - -setimmediate@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" - integrity sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA== - -sha.js@^2.4.0, sha.js@^2.4.8: - version "2.4.11" - resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7" - integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ== - dependencies: - inherits "^2.0.1" - safe-buffer "^5.0.1" - -shebang-command@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" - integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== - dependencies: - shebang-regex "^3.0.0" - -shebang-regex@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" - integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== - -signal-exit@^3.0.3, signal-exit@^3.0.7: - version "3.0.7" - resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" - integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== - -sisteransi@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" - integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== - -slash@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" - integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== - -source-map-support@0.5.13: - version "0.5.13" - resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932" - integrity sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w== - dependencies: - buffer-from "^1.0.0" - source-map "^0.6.0" - -source-map@^0.6.0, source-map@^0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" - integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== - -sprintf-js@~1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" - integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== - -stack-utils@^2.0.3: - version "2.0.5" - resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.5.tgz#d25265fca995154659dbbfba3b49254778d2fdd5" - integrity sha512-xrQcmYhOsn/1kX+Vraq+7j4oE2j/6BFscZ0etmYg81xuM8Gq0022Pxb8+IqgOFUIaxHs0KaSb7T1+OegiNrNFA== - dependencies: - escape-string-regexp "^2.0.0" - -string-length@^4.0.1: - version "4.0.2" - resolved "https://registry.yarnpkg.com/string-length/-/string-length-4.0.2.tgz#a8a8dc7bd5c1a82b9b3c8b87e125f66871b6e57a" - integrity sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ== - dependencies: - char-regex "^1.0.2" - strip-ansi "^6.0.0" - -string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - -string_decoder@^1.1.1: - version "1.3.0" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" - integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== - dependencies: - safe-buffer "~5.2.0" - -strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - -strip-bom@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878" - integrity sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w== - -strip-final-newline@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" - integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== - -strip-hex-prefix@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/strip-hex-prefix/-/strip-hex-prefix-1.0.0.tgz#0c5f155fef1151373377de9dbb588da05500e36f" - integrity sha512-q8d4ue7JGEiVcypji1bALTos+0pWtyGlivAWyPuTkHzuTCJqrK9sWxYQZUq6Nq3cuyv3bm734IhHvHtGGURU6A== - dependencies: - is-hex-prefixed "1.0.0" - -strip-json-comments@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" - integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== - -supports-color@^5.3.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" - integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== - dependencies: - has-flag "^3.0.0" - -supports-color@^7.0.0, supports-color@^7.1.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" - integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== - dependencies: - has-flag "^4.0.0" - -supports-color@^8.0.0: - version "8.1.1" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" - integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== - dependencies: - has-flag "^4.0.0" - -supports-hyperlinks@^2.0.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz#3943544347c1ff90b15effb03fc14ae45ec10624" - integrity sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA== - dependencies: - has-flag "^4.0.0" - supports-color "^7.0.0" - -supports-preserve-symlinks-flag@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" - integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== - -terminal-link@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/terminal-link/-/terminal-link-2.1.1.tgz#14a64a27ab3c0df933ea546fba55f2d078edc994" - integrity sha512-un0FmiRUQNr5PJqy9kP7c40F5BOfpGlYTrxonDChEZB7pzZxRNp/bt+ymiy9/npwXya9KH99nJ/GXFIiUkYGFQ== - dependencies: - ansi-escapes "^4.2.1" - supports-hyperlinks "^2.0.0" - -test-exclude@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e" - integrity sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w== - dependencies: - "@istanbuljs/schema" "^0.1.2" - glob "^7.1.4" - minimatch "^3.0.4" - -tmpl@1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.5.tgz#8683e0b902bb9c20c4f726e3c0b69f36518c07cc" - integrity sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw== - -to-fast-properties@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" - integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog== - -to-regex-range@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" - integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== - dependencies: - is-number "^7.0.0" - -tr46@~0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" - integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== - -ts-jest@^29.0.1: - version "29.0.1" - resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-29.0.1.tgz#3296b39d069dc55825ce1d059a9510b33c718b86" - integrity sha512-htQOHshgvhn93QLxrmxpiQPk69+M1g7govO1g6kf6GsjCv4uvRV0znVmDrrvjUrVCnTYeY4FBxTYYYD4airyJA== - dependencies: - bs-logger "0.x" - fast-json-stable-stringify "2.x" - jest-util "^29.0.0" - json5 "^2.2.1" - lodash.memoize "4.x" - make-error "1.x" - semver "7.x" - yargs-parser "^21.0.1" - -ts-node@^10.1.0: - version "10.9.1" - resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.1.tgz#e73de9102958af9e1f0b168a6ff320e25adcff4b" - integrity sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw== - dependencies: - "@cspotcode/source-map-support" "^0.8.0" - "@tsconfig/node10" "^1.0.7" - "@tsconfig/node12" "^1.0.7" - "@tsconfig/node14" "^1.0.0" - "@tsconfig/node16" "^1.0.2" - acorn "^8.4.1" - acorn-walk "^8.1.1" - arg "^4.1.0" - create-require "^1.1.0" - diff "^4.0.1" - make-error "^1.1.1" - v8-compile-cache-lib "^3.0.1" - yn "3.1.1" - -type-detect@4.0.8: - version "4.0.8" - resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" - integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== - -type-fest@^0.21.3: - version "0.21.3" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" - integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== - -typescript@^4.3.5: - version "4.8.3" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.8.3.tgz#d59344522c4bc464a65a730ac695007fdb66dd88" - integrity sha512-goMHfm00nWPa8UvR/CPSvykqf6dVV8x/dp0c5mFTMTIu0u0FlGWRioyy7Nn0PGAdHxpJZnuO/ut+PpQ8UiHAig== - -update-browserslist-db@^1.0.9: - version "1.0.9" - resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.9.tgz#2924d3927367a38d5c555413a7ce138fc95fcb18" - integrity sha512-/xsqn21EGVdXI3EXSum1Yckj3ZVZugqyOZQ/CxYPBD/R+ko9NSUScf8tFF4dOKY+2pvSSJA/S+5B8s4Zr4kyvg== - dependencies: - escalade "^3.1.1" - picocolors "^1.0.0" - -util-deprecate@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" - integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== - -v8-compile-cache-lib@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz#6336e8d71965cb3d35a1bbb7868445a7c05264bf" - integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg== - -v8-to-istanbul@^9.0.1: - version "9.0.1" - resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.0.1.tgz#b6f994b0b5d4ef255e17a0d17dc444a9f5132fa4" - integrity sha512-74Y4LqY74kLE6IFyIjPtkSTWzUZmj8tdHT9Ii/26dvQ6K9Dl2NbEfj0XgU2sHCtKgt5VupqhlO/5aWuqS+IY1w== - dependencies: - "@jridgewell/trace-mapping" "^0.3.12" - "@types/istanbul-lib-coverage" "^2.0.1" - convert-source-map "^1.6.0" - -walker@^1.0.8: - version "1.0.8" - resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" - integrity sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ== - dependencies: - makeerror "1.0.12" - -webidl-conversions@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" - integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== - -whatwg-url@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" - integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== - dependencies: - tr46 "~0.0.3" - webidl-conversions "^3.0.0" - -which@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" - integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== - dependencies: - isexe "^2.0.0" - -wrap-ansi@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" - integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== - dependencies: - ansi-styles "^4.0.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" - -wrappy@1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" - integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== - -write-file-atomic@^4.0.1: - version "4.0.2" - resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-4.0.2.tgz#a9df01ae5b77858a027fd2e80768ee433555fcfd" - integrity sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg== - dependencies: - imurmurhash "^0.1.4" - signal-exit "^3.0.7" - -ws@7.4.6: - version "7.4.6" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" - integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== - -y18n@^5.0.5: - version "5.0.8" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" - integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== - -yallist@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" - integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== - -yargs-parser@^21.0.0, yargs-parser@^21.0.1: - version "21.1.1" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" - integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== - -yargs@^17.3.1: - version "17.5.1" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.5.1.tgz#e109900cab6fcb7fd44b1d8249166feb0b36e58e" - integrity sha512-t6YAJcxDkNX7NFYiVtKvWUz8l+PaKTLiL63mJYWR2GnHq2gjEWISzsLp9wg3aY36dY1j+gfIEL3pIF+XlJJfbA== - dependencies: - cliui "^7.0.2" - escalade "^3.1.1" - get-caller-file "^2.0.5" - require-directory "^2.1.1" - string-width "^4.2.3" - y18n "^5.0.5" - yargs-parser "^21.0.0" - -yn@3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50" - integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q== - -yocto-queue@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" - integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== - -"zksync-web3@link:../../../sdk/zksync-web3.js": - version "0.0.0" - uid "" diff --git a/core/tests/vm-benchmark/.gitignore b/core/tests/vm-benchmark/.gitignore new file mode 100644 index 000000000000..10e48e4e6b49 --- /dev/null +++ b/core/tests/vm-benchmark/.gitignore @@ -0,0 +1,3 @@ +perf.data +perf.data.old +perfbench.script diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml new file mode 100644 index 000000000000..1a980a34bb9d --- /dev/null +++ b/core/tests/vm-benchmark/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "vm-benchmark" +version = "0.1.0" +edition = "2021" + +[dependencies] +vm-benchmark-harness = {path = "harness"} +metrics-exporter-prometheus = "0.11" +metrics = "0.20" +tokio = "1" + +[dev-dependencies] +criterion = "0.4" +iai = "0.1" + +[[bench]] +name = "criterion" +harness = false + +[[bench]] +name = "diy_benchmark" +harness = false + +[[bench]] +name = "iai" +harness = false + +[[bin]] +name = "iai_results_to_prometheus" +path = "src/iai_results_to_prometheus.rs" + +[[bin]] +name = "compare_iai_results" +path = "src/compare_iai_results.rs" diff --git a/core/tests/vm-benchmark/README.md b/core/tests/vm-benchmark/README.md new file mode 100644 index 000000000000..4d66f287a707 --- /dev/null +++ b/core/tests/vm-benchmark/README.md @@ -0,0 +1,42 @@ +# Benchmarking the VM + +Currently all benchmarking happens on contract deployment bytecodes. These can execute arbitrary code, so that is +surprisingly useful. This library can be used to build more complex benchmarks, however. + +## Benchmarking + +There are three different benchmarking tools available: + +```sh +cargo bench --bench criterion +cargo bench --bench diy_benchmark +cargo +nightly bench --bench iai +``` + +Criterion is the de-facto microbenchmarking tool for Rust. Run it, then optimize something and run the command again to +see if your changes have made a difference. + +The DIY benchmark works a bit better in noisy environments and is used to push benchmark data to Prometheus +automatically. + +IAI uses cachegrind to simulate the CPU, so noise is completely irrelevant to it but it also doesn't measure exactly the +same thing as normal benchmarks. You need valgrind to be able to run it. + +You can add your own bytecodes to be benchmarked into the folder "deployment_benchmarks". For iai, you also need to add +them to "benches/iai.rs". + +## Profiling (Linux only) + +You can also use `sh perf.sh bytecode_file` to produce data that can be fed into the +[firefox profiler](profiler.firefox.com) for a specific bytecode. + +## Fuzzing + +There is a fuzzer using this library at core/lib/vm/fuzz. The fuzz.sh script located there starts a fuzzer which +attempts to make cover as much code as it can to ultimately produce a valid deployment bytecode. + +It has no chance of succeeding currently because the fuzzing speed drops to 10 executions/s easily. Optimizing the VM or +lowering the gas limit will help with that. + +The fuzzer has been useful for producing synthetic benchmark inputs. It may be a good tool for finding show transactions +with a certain gas limit, an empirical way of evaluating gas prices of instructions. diff --git a/core/tests/vm-benchmark/benches/criterion.rs b/core/tests/vm-benchmark/benches/criterion.rs new file mode 100644 index 000000000000..c6c81d723365 --- /dev/null +++ b/core/tests/vm-benchmark/benches/criterion.rs @@ -0,0 +1,20 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; + +fn benches_in_folder(c: &mut Criterion) { + for path in std::fs::read_dir("deployment_benchmarks").unwrap() { + let path = path.unwrap().path(); + + let test_contract = std::fs::read(&path).expect("failed to read file"); + + let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let tx = get_deploy_tx(code); + + c.bench_function(path.file_name().unwrap().to_str().unwrap(), |b| { + b.iter(|| BenchmarkingVm::new().run_transaction(black_box(&tx))) + }); + } +} + +criterion_group!(benches, benches_in_folder); +criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/diy_benchmark.rs b/core/tests/vm-benchmark/benches/diy_benchmark.rs new file mode 100644 index 000000000000..02ea280a5675 --- /dev/null +++ b/core/tests/vm-benchmark/benches/diy_benchmark.rs @@ -0,0 +1,43 @@ +use criterion::black_box; +use std::time::{Duration, Instant}; +use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; + +fn main() { + let mut results = vec![]; + + for path in std::fs::read_dir("deployment_benchmarks").unwrap() { + let path = path.unwrap().path(); + + let test_contract = std::fs::read(&path).expect("failed to read file"); + + let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let tx = get_deploy_tx(code); + + let name = path.file_name().unwrap().to_str().unwrap(); + + println!("benchmarking: {}", name); + + let mut timings = vec![]; + let benchmark_start = Instant::now(); + while benchmark_start.elapsed() < Duration::from_secs(5) { + let start = Instant::now(); + BenchmarkingVm::new() + .run_transaction(black_box(&tx)) + .unwrap(); + timings.push(start.elapsed()); + } + + println!("{:?}", timings.iter().min().unwrap()); + results.push((name.to_owned(), timings)); + } + + if option_env!("PUSH_VM_BENCHMARKS_TO_PROMETHEUS").is_some() { + vm_benchmark::with_prometheus::with_prometheus(|| { + for (name, timings) in results { + for (i, timing) in timings.into_iter().enumerate() { + metrics::gauge!("vm_benchmark.timing", timing, "benchmark" => name.clone(), "run_no" => i.to_string()); + } + } + }); + } +} diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs new file mode 100644 index 000000000000..e45c9107f91a --- /dev/null +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -0,0 +1,31 @@ +use iai::black_box; +use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; + +fn run_bytecode(path: &str) { + let test_contract = std::fs::read(path).expect("failed to read file"); + let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let tx = get_deploy_tx(code); + + black_box(BenchmarkingVm::new().run_transaction(&tx).unwrap()); +} + +macro_rules! make_functions_and_main { + ($($file:ident,)+) => { + $( + fn $file() { + run_bytecode(concat!("deployment_benchmarks/", stringify!($file))) + } + )+ + + iai::main!($($file,)+); + }; +} + +make_functions_and_main!( + access_memory, + call_far, + decode_shl_sub, + deploy_simple_contract, + finish_eventful_frames, + write_and_decode, +); diff --git a/core/tests/vm-benchmark/deployment_benchmarks/access_memory b/core/tests/vm-benchmark/deployment_benchmarks/access_memory new file mode 100644 index 000000000000..28004624f8b3 Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/access_memory differ diff --git a/core/tests/vm-benchmark/deployment_benchmarks/call_far b/core/tests/vm-benchmark/deployment_benchmarks/call_far new file mode 100644 index 000000000000..65a57fbd0712 Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/call_far differ diff --git a/core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub b/core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub new file mode 100644 index 000000000000..87111ba6379b Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub differ diff --git a/core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract b/core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract new file mode 100644 index 000000000000..34d542387c27 Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract differ diff --git a/core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames b/core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames new file mode 100644 index 000000000000..4105e320f238 Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames differ diff --git a/core/tests/vm-benchmark/deployment_benchmarks/write_and_decode b/core/tests/vm-benchmark/deployment_benchmarks/write_and_decode new file mode 100644 index 000000000000..b1cda7d9430e Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/write_and_decode differ diff --git a/core/tests/vm-benchmark/harness/Cargo.toml b/core/tests/vm-benchmark/harness/Cargo.toml new file mode 100644 index 000000000000..f13e2e866287 --- /dev/null +++ b/core/tests/vm-benchmark/harness/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "vm-benchmark-harness" +version = "0.1.0" +edition = "2021" + +[dependencies] +vm = {path = "../../../lib/vm"} +zksync_types = {path = "../../../lib/types"} +zksync_state = {path = "../../../lib/state"} +zksync_utils = {path = "../../../lib/utils"} +zksync_config = {path = "../../../lib/config"} +zksync_contracts = {path = "../../../lib/contracts"} +zk_evm = {git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.3.2"} +once_cell = "1.17" +ouroboros = "0.15.6" diff --git a/core/tests/vm-benchmark/harness/src/in_memory_storage.rs b/core/tests/vm-benchmark/harness/src/in_memory_storage.rs new file mode 100644 index 000000000000..6fb1f73c8ab4 --- /dev/null +++ b/core/tests/vm-benchmark/harness/src/in_memory_storage.rs @@ -0,0 +1,63 @@ +use std::collections::HashMap; +use zksync_types::{ + get_code_key, get_system_context_init_logs, storage::ZkSyncReadStorage, + system_contracts::get_system_smart_contracts, L2ChainId, StorageKey, StorageLog, + StorageLogKind, StorageValue, H256, +}; +use zksync_utils::bytecode::hash_bytecode; + +/// An in-memory storage that contains the system contracts by default. +#[derive(Debug)] +pub struct InMemoryStorage { + data: HashMap, + deps: HashMap>, +} + +impl Default for InMemoryStorage { + fn default() -> Self { + let contracts = get_system_smart_contracts(); + let system_context_init_log = get_system_context_init_logs(L2ChainId(270)); + + let mut data = HashMap::new(); + for log in contracts + .iter() + .map(|contract| { + let deployer_code_key = get_code_key(contract.account_id.address()); + StorageLog::new_write_log(deployer_code_key, hash_bytecode(&contract.bytecode)) + }) + .chain(system_context_init_log) + { + if log.kind == StorageLogKind::Write { + data.insert(log.key, log.value); + } + } + + let mut deps = HashMap::new(); + + for contract in contracts { + deps.insert(hash_bytecode(&contract.bytecode), contract.bytecode); + } + + Self { data, deps } + } +} + +impl InMemoryStorage { + pub fn set_value(&mut self, key: StorageKey, value: StorageValue) { + self.data.insert(key, value); + } +} + +impl ZkSyncReadStorage for &InMemoryStorage { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + self.data.get(key).cloned().unwrap_or(H256::zero()) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.data.contains_key(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.deps.get(&hash).cloned() + } +} diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs new file mode 100644 index 000000000000..77b93b40d977 --- /dev/null +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -0,0 +1,163 @@ +use once_cell::sync::{Lazy, OnceCell}; +use ouroboros::self_referencing; +use vm::vm_with_bootloader::DerivedBlockContext; +use vm::vm_with_bootloader::TxExecutionMode::VerifyExecute; +use vm::vm_with_bootloader::{push_transaction_to_bootloader_memory, BlockContextMode}; +use vm::{HistoryEnabled, OracleTools, VmInstance}; +use zk_evm::block_properties::BlockProperties; +use zksync_config::constants::ethereum::MAX_GAS_PER_PUBDATA_BYTE; +use zksync_contracts::deployer_contract; +use zksync_state::storage_view::StorageView; +use zksync_types::ethabi::{encode, Token}; +use zksync_types::l2::L2Tx; +use zksync_types::utils::storage_key_for_eth_balance; +use zksync_types::{fee::Fee, Nonce, Transaction, H256, U256}; +use zksync_types::{L2ChainId, PackedEthSignature, CONTRACT_DEPLOYER_ADDRESS}; +use zksync_utils::bytecode::hash_bytecode; + +mod in_memory_storage; +use in_memory_storage::InMemoryStorage; + +/// Bytecodes have consist of an odd number of 32 byte words +/// This function "fixes" bytecodes of wrong length by cutting off their end. +pub fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> Option<&[u8]> { + let mut words = bytes.len() / 32; + if words == 0 { + return None; + } + + if words & 1 == 0 { + words -= 1; + } + Some(&bytes[..32 * words]) +} + +static STORAGE: Lazy = Lazy::new(|| { + let mut storage = InMemoryStorage::default(); + + // give PRIVATE_KEY some money + let my_addr = PackedEthSignature::address_from_private_key(&PRIVATE_KEY).unwrap(); + let key = storage_key_for_eth_balance(&my_addr); + storage.set_value(key, zksync_utils::u256_to_h256(U256([0, 0, 1, 0]))); + + storage +}); +static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { + deployer_contract() + .function("create") + .unwrap() + .short_signature() +}); +const PRIVATE_KEY: H256 = H256([42; 32]); +static BLOCK_PROPERTIES: OnceCell = OnceCell::new(); + +pub struct BenchmarkingVm<'a>(BenchmarkingVmInner<'a>); + +#[self_referencing] +struct BenchmarkingVmInner<'a> { + storage_view: StorageView<&'a InMemoryStorage>, + #[borrows(mut storage_view)] + oracle_tools: OracleTools<'this, false, HistoryEnabled>, + #[borrows(mut oracle_tools)] + #[not_covariant] + vm: Box>, +} + +impl BenchmarkingVm<'_> { + #[allow(clippy::new_without_default)] + pub fn new() -> Self { + let (block_context, block_properties) = vm::utils::create_test_block_params(); + let block_context = block_context.into(); + + let block_properties = BLOCK_PROPERTIES.get_or_init(|| block_properties); + + Self( + BenchmarkingVmInnerBuilder { + storage_view: StorageView::new(&*STORAGE), + oracle_tools_builder: |storage_view| { + vm::OracleTools::new(storage_view, HistoryEnabled) + }, + vm_builder: |oracle_tools| { + vm::vm_with_bootloader::init_vm( + oracle_tools, + BlockContextMode::NewBlock(block_context, Default::default()), + block_properties, + VerifyExecute, + &vm::utils::BASE_SYSTEM_CONTRACTS, + ) + }, + } + .build(), + ) + } + + pub fn run_transaction( + &mut self, + tx: &Transaction, + ) -> Result { + self.0.with_vm_mut(|vm| { + push_transaction_to_bootloader_memory(vm, tx, VerifyExecute, None); + vm.execute_next_tx(u32::MAX, false) + }) + } +} + +pub fn get_deploy_tx(code: &[u8]) -> Transaction { + let params = [ + Token::FixedBytes(vec![0u8; 32]), + Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::Bytes([].to_vec()), + ]; + let calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let (block_context, _) = vm::utils::create_test_block_params(); + let block_context: DerivedBlockContext = block_context.into(); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + calldata, + Nonce(0), + Fee { + gas_limit: U256::from(10000000u32), + max_fee_per_gas: U256::from(block_context.base_fee), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), + }, + U256::zero(), + L2ChainId(270), + &PRIVATE_KEY, + Some(vec![code.to_vec()]), // maybe not needed? + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + + signed.into() +} + +#[cfg(test)] +mod tests { + use zksync_contracts::read_bytecode; + use zksync_types::tx::tx_execution_info::TxExecutionStatus::Success; + + use crate::*; + + #[test] + fn can_deploy_contract() { + let test_contract = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", + ); + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_deploy_tx(&test_contract)); + + match res { + Ok(x) => assert_eq!(x.status, Success), + Err(_) => panic!("should succeed"), + } + } +} diff --git a/core/tests/vm-benchmark/perf.sh b/core/tests/vm-benchmark/perf.sh new file mode 100644 index 000000000000..e3e35254509f --- /dev/null +++ b/core/tests/vm-benchmark/perf.sh @@ -0,0 +1,3 @@ +cargo build --profile perf && +perf record -F999 --call-graph dwarf,65528 ../../../target/perf/vm-benchmark $1 && +perf script -F +pid > perfbench.script diff --git a/core/tests/vm-benchmark/src/compare_iai_results.rs b/core/tests/vm-benchmark/src/compare_iai_results.rs new file mode 100644 index 000000000000..535ba7faba0c --- /dev/null +++ b/core/tests/vm-benchmark/src/compare_iai_results.rs @@ -0,0 +1,33 @@ +use std::collections::HashMap; +use std::fs::File; +use std::io::BufReader; +use vm_benchmark::parse_iai::parse_iai; + +fn main() { + let args: [String; 2] = std::env::args() + .skip(1) + .take(2) + .collect::>() + .try_into() + .expect("expected two arguments"); + + let before = get_name_to_cycles(&args[0]); + let after = get_name_to_cycles(&args[1]); + + for (name, cycles) in before { + if let Some(&cycles2) = after.get(&name) { + let change = ((cycles2 as f64) - (cycles as f64)) / (cycles as f64); + if change.abs() > 0.02 { + println!("{} {:+.1}%", name, change * 100.0); + } + } + } +} + +fn get_name_to_cycles(filename: &str) -> HashMap { + parse_iai(BufReader::new( + File::open(filename).expect("failed to open file"), + )) + .map(|x| (x.name, x.cycles)) + .collect() +} diff --git a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs new file mode 100644 index 000000000000..dc3c8f6d98f7 --- /dev/null +++ b/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs @@ -0,0 +1,17 @@ +use std::io::BufReader; +use vm_benchmark::parse_iai::IaiResult; + +fn main() { + let results: Vec = + vm_benchmark::parse_iai::parse_iai(BufReader::new(std::io::stdin())).collect(); + + vm_benchmark::with_prometheus::with_prometheus(|| { + for r in results { + metrics::gauge!("vm_cachegrind.instructions", r.instructions as f64, "benchmark" => r.name.clone()); + metrics::gauge!("vm_cachegrind.l1_accesses", r.l1_accesses as f64, "benchmark" => r.name.clone()); + metrics::gauge!("vm_cachegrind.l2_accesses", r.l2_accesses as f64, "benchmark" => r.name.clone()); + metrics::gauge!("vm_cachegrind.ram_accesses", r.ram_accesses as f64, "benchmark" => r.name.clone()); + metrics::gauge!("vm_cachegrind.cycles", r.cycles as f64, "benchmark" => r.name); + } + }) +} diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs new file mode 100644 index 000000000000..38cc311105b3 --- /dev/null +++ b/core/tests/vm-benchmark/src/lib.rs @@ -0,0 +1,2 @@ +pub mod parse_iai; +pub mod with_prometheus; diff --git a/core/tests/vm-benchmark/src/main.rs b/core/tests/vm-benchmark/src/main.rs new file mode 100644 index 000000000000..0d159ffa8d80 --- /dev/null +++ b/core/tests/vm-benchmark/src/main.rs @@ -0,0 +1,18 @@ +use vm_benchmark_harness::*; + +fn main() { + let test_contract = std::fs::read( + std::env::args() + .nth(1) + .expect("please provide an input file"), + ) + .expect("failed to read file"); + + let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let tx = get_deploy_tx(code); + + for _ in 0..100 { + let mut vm = BenchmarkingVm::new(); + vm.run_transaction(&tx).unwrap(); + } +} diff --git a/core/tests/vm-benchmark/src/parse_iai.rs b/core/tests/vm-benchmark/src/parse_iai.rs new file mode 100644 index 000000000000..61376b429a32 --- /dev/null +++ b/core/tests/vm-benchmark/src/parse_iai.rs @@ -0,0 +1,53 @@ +use std::io::BufRead; + +pub struct IaiResult { + pub name: String, + pub instructions: u64, + pub l1_accesses: u64, + pub l2_accesses: u64, + pub ram_accesses: u64, + pub cycles: u64, +} + +pub fn parse_iai(iai_output: R) -> impl Iterator { + IaiResultParser { + lines: iai_output.lines().map(|x| x.unwrap()), + } +} + +struct IaiResultParser> { + lines: I, +} + +impl> Iterator for IaiResultParser { + type Item = IaiResult; + + fn next(&mut self) -> Option { + self.lines.next().map(|name| { + let result = IaiResult { + name, + instructions: self.parse_stat(), + l1_accesses: self.parse_stat(), + l2_accesses: self.parse_stat(), + ram_accesses: self.parse_stat(), + cycles: self.parse_stat(), + }; + self.lines.next(); + result + }) + } +} + +impl> IaiResultParser { + fn parse_stat(&mut self) -> u64 { + let line = self.lines.next().unwrap(); + let number = line + .split(':') + .nth(1) + .unwrap() + .split_whitespace() + .next() + .unwrap(); + number.parse().unwrap() + } +} diff --git a/core/tests/vm-benchmark/src/with_prometheus.rs b/core/tests/vm-benchmark/src/with_prometheus.rs new file mode 100644 index 000000000000..cd7b1e69d4ff --- /dev/null +++ b/core/tests/vm-benchmark/src/with_prometheus.rs @@ -0,0 +1,22 @@ +use metrics_exporter_prometheus::PrometheusBuilder; +use std::time::Duration; + +pub fn with_prometheus(f: F) { + println!("Pushing results to Prometheus"); + + let endpoint = + "http://vmagent.stage.matterlabs.corp/api/v1/import/prometheus/metrics/job/vm-benchmark"; + + tokio::runtime::Runtime::new().unwrap().block_on(async { + PrometheusBuilder::new() + .with_push_gateway(endpoint, Duration::from_millis(100)) + .unwrap() + .install() + .unwrap(); + + f(); + + println!("Waiting for push to happen..."); + tokio::time::sleep(Duration::from_secs(1)).await; + }); +} diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 8355ea217084..57c49dcb6dc5 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -11,19 +11,6 @@ FROM matterlabs/zksolc:v1.2.0 as zksolc-v1-2-0 RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget && rm -rf /var/lib/apt/lists/* -RUN mkdir -p /etc/zksolc-bin/v1.2.0 && cp /usr/local/bin/zksolc /etc/zksolc-bin/v1.2.0/ -RUN mkdir -p /etc/zksolc-bin/v1.2.1 \ - && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.2.1 \ - && cp zksolc-linux-amd64-musl-v1.2.1 /etc/zksolc-bin/v1.2.1/zksolc \ - && chmod +x /etc/zksolc-bin/v1.2.1/zksolc -RUN mkdir -p /etc/zksolc-bin/v1.2.2 \ - && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.2.2 \ - && cp zksolc-linux-amd64-musl-v1.2.2 /etc/zksolc-bin/v1.2.2/zksolc \ - && chmod +x /etc/zksolc-bin/v1.2.2/zksolc -RUN mkdir -p /etc/zksolc-bin/v1.2.3 \ - && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.2.3 \ - && cp zksolc-linux-amd64-musl-v1.2.3 /etc/zksolc-bin/v1.2.3/zksolc \ - && chmod +x /etc/zksolc-bin/v1.2.3/zksolc RUN mkdir -p /etc/zksolc-bin/v1.3.0 \ && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.0 \ && cp zksolc-linux-amd64-musl-v1.3.0 /etc/zksolc-bin/v1.3.0/zksolc \ @@ -61,6 +48,21 @@ RUN mkdir -p /etc/zksolc-bin/v1.3.7 \ && cp zksolc-linux-amd64-musl-v1.3.7 /etc/zksolc-bin/v1.3.7/zksolc \ && chmod +x /etc/zksolc-bin/v1.3.7/zksolc +RUN mkdir -p /etc/zksolc-bin/v1.3.8 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.8 \ + && cp zksolc-linux-amd64-musl-v1.3.8 /etc/zksolc-bin/v1.3.8/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.8/zksolc + +RUN mkdir -p /etc/zksolc-bin/v1.3.9 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.9 \ + && cp zksolc-linux-amd64-musl-v1.3.9 /etc/zksolc-bin/v1.3.9/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.9/zksolc + +RUN mkdir -p /etc/zksolc-bin/v1.3.10 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.10 \ + && cp zksolc-linux-amd64-musl-v1.3.10 /etc/zksolc-bin/v1.3.10/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.10/zksolc + COPY docker/contract-verifier/install-all-solc.sh install-all-solc.sh RUN sh ./install-all-solc.sh diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile new file mode 100644 index 000000000000..ee7f8da4aa28 --- /dev/null +++ b/docker/external-node/Dockerfile @@ -0,0 +1,28 @@ +# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner +# Not expected to work locally + +# syntax=docker/dockerfile:experimental +FROM rust:1.67-buster as builder +RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* +WORKDIR /usr/src/zksync +COPY . . + +RUN CARGO_HOME=./cargo cargo build --release + +FROM debian:buster-slim +RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +EXPOSE 3000 +EXPOSE 3031 +EXPOSE 3030 +COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin +COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/bootloader/build/artifacts/ +COPY etc/system-contracts/contracts/artifacts/ /etc/system-contracts/contracts/artifacts/ +COPY etc/system-contracts/contracts/precompiles/artifacts/ /etc/system-contracts/contracts/precompiles/artifacts/ +COPY etc/system-contracts/artifacts-zk /etc/system-contracts/artifacts-zk +COPY contracts/ethereum/artifacts/ /contracts/ethereum/artifacts/ +COPY contracts/zksync/artifacts-zk/ /contracts/zksync/artifacts-zk/ +COPY etc/tokens/ /etc/tokens/ +COPY etc/ERC20/ /etc/ERC20/ +COPY core/bin/verification_key_generator_and_server/data/ /core/bin/verification_key_generator_and_server/data/ + +ENTRYPOINT ["zksync_external_node"] diff --git a/docker/geth/Dockerfile b/docker/geth/Dockerfile index b52e640e62ab..721ceedc14b7 100644 --- a/docker/geth/Dockerfile +++ b/docker/geth/Dockerfile @@ -1,4 +1,4 @@ -FROM ethereum/client-go:latest +FROM ethereum/client-go:stable RUN mkdir -p /seed/keystore COPY password.sec /seed/ @@ -10,4 +10,4 @@ COPY keystore/UTC--2019-04-06T21-13-27.692266000Z--8a91dc2d28b689474298d91899f0c COPY geth-entry.sh /bin/ EXPOSE 8545 8546 30303 30303/udp -ENTRYPOINT [ "sh", "/bin/geth-entry.sh" ] \ No newline at end of file +ENTRYPOINT [ "sh", "/bin/geth-entry.sh" ] diff --git a/docker/geth/geth-entry.sh b/docker/geth/geth-entry.sh index df9cf6bfe583..cf299d95ea14 100755 --- a/docker/geth/geth-entry.sh +++ b/docker/geth/geth-entry.sh @@ -22,6 +22,7 @@ standard|fast|mainnet) ;; esac +mkdir -p /var/lib/geth/data cd /var/lib/geth/data DEV="$CONFIG"-dev.json @@ -40,7 +41,8 @@ exec geth --networkid 9 --mine --miner.threads 1 \ --http --http.addr "0.0.0.0" \ --http.corsdomain "*" --nat "any" --http.api eth,web3,personal,net \ --unlock 0 --password "./password.sec" --allow-insecure-unlock \ - --ws --ws.port 8546 \ + --ws --ws.addr "0.0.0.0" --ws.port 8546 \ --gcmode archive \ --ws.origins "*" --http.vhosts=* \ - --miner.gastarget=10000000 --miner.gaslimit=11000000 + --miner.gaslimit=11000000 \ + --miner.etherbase=0x8a91dc2d28b689474298d91899f0c1baf62cb85b diff --git a/docker/local-node/Dockerfile b/docker/local-node/Dockerfile index faf880d31afc..d6275b008404 100644 --- a/docker/local-node/Dockerfile +++ b/docker/local-node/Dockerfile @@ -20,10 +20,6 @@ RUN npm install -g yarn RUN wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.12%2Bcommit.f00d7308 \ && mv solc-linux-amd64-v0.8.12+commit.f00d7308 /usr/bin/solc \ && chmod +x /usr/bin/solc -# Obtain `zksolc` 1.1.5. -RUN wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.1.6 \ - && mv zksolc-linux-amd64-musl-v1.1.6 /usr/bin/zksolc \ - && chmod +x /usr/bin/zksolc # Copy required packages while preserving the folders structure from the repo # It's required because these packages use relative paths to the SDK diff --git a/docker/local-node/entrypoint.sh b/docker/local-node/entrypoint.sh index beab804514fe..440dd3fa318d 100755 --- a/docker/local-node/entrypoint.sh +++ b/docker/local-node/entrypoint.sh @@ -20,11 +20,11 @@ then # Override values for database URL and eth client in the toml config files # so they will be taken into account - sed -i 's!^database_url=.*$!database_url="'"$DATABASE_URL"'"!' /etc/env/dev/private.toml - sed -i 's!^web3_url=.*$!web3_url="'"$ETH_CLIENT_WEB3_URL"'"!' /etc/env/dev/eth_client.toml - sed -i 's!^path=.*$!path="/var/lib/zksync/data"!' /etc/env/dev/database.toml - sed -i 's!^state_keeper_db_path=.*$!state_keeper_db_path="/var/lib/zksync/data/state_keeper"!' /etc/env/dev/database.toml - sed -i 's!^merkle_tree_backup_path=.*$!merkle_tree_backup_path="/var/lib/zksync/data/backups"!' /etc/env/dev/database.toml + sed -i 's!^database_url=.*$!database_url="'"$DATABASE_URL"'"!' /etc/env/base/private.toml + sed -i 's!^web3_url=.*$!web3_url="'"$ETH_CLIENT_WEB3_URL"'"!' /etc/env/base/eth_client.toml + sed -i 's!^path=.*$!path="/var/lib/zksync/data"!' /etc/env/base/database.toml + sed -i 's!^state_keeper_db_path=.*$!state_keeper_db_path="/var/lib/zksync/data/state_keeper"!' /etc/env/base/database.toml + sed -i 's!^merkle_tree_backup_path=.*$!merkle_tree_backup_path="/var/lib/zksync/data/backups"!' /etc/env/base/database.toml # Switch zksolc compiler source from docker to binary sed -i "s!'docker'!'binary'!" /contracts/zksync/hardhat.config.ts @@ -42,4 +42,5 @@ fi # start server source /etc/env/dev.env +source /etc/env/.init.env zksync_server diff --git a/docker/prover-gar/Dockerfile b/docker/prover-gar/Dockerfile new file mode 100644 index 000000000000..71553650d19f --- /dev/null +++ b/docker/prover-gar/Dockerfile @@ -0,0 +1,7 @@ + +ARG PROVER_IMAGE + +FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-v2:2.0-$PROVER_IMAGE + +COPY keys/ /usr/src/setup-keys/ + diff --git a/docker/prover/Dockerfile b/docker/prover/Dockerfile index 8fe3c5252de3..a31fdb2303eb 100644 --- a/docker/prover/Dockerfile +++ b/docker/prover/Dockerfile @@ -35,6 +35,7 @@ COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/boot COPY etc/system-contracts/artifacts-zk /etc/system-contracts/artifacts-zk COPY contracts/ethereum/artifacts/ /contracts/ethereum/artifacts/ COPY contracts/zksync/artifacts-zk/ /contracts/zksync/artifacts-zk/ +COPY setup_2\^26.key /etc/ COPY core/bin/verification_key_generator_and_server/data/ /core/bin/verification_key_generator_and_server/data/ diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile new file mode 100644 index 000000000000..5ca517ca3e75 --- /dev/null +++ b/docker/witness-generator/Dockerfile @@ -0,0 +1,18 @@ +# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner +# Not expected to work locally + +# syntax=docker/dockerfile:experimental +FROM rust:1.67-buster as builder +RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* +WORKDIR /usr/src/zksync +COPY . . + +RUN CARGO_HOME=./cargo cargo build --release + +FROM debian:buster-slim +RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + +COPY core/bin/verification_key_generator_and_server/data/ /core/bin/verification_key_generator_and_server/data/ + +COPY --from=builder /usr/src/zksync/target/release/zksync_witness_generator /usr/bin/ +ENTRYPOINT ["zksync_witness_generator"] diff --git a/docs/advanced/01_initialization.md b/docs/advanced/01_initialization.md new file mode 100644 index 000000000000..673d06630e0e --- /dev/null +++ b/docs/advanced/01_initialization.md @@ -0,0 +1,149 @@ +# zkSync deeper dive + +The goal of this doc, is to show you some more details on how zkSync works internally. + +Please do the the dev_setup.md and development.md (these commands do all the heavy lifting on starting the components of +the system). + +Now let's take a look what's inside: + +### Initializiation (zk init) + +Let's take a deeper look into what `zk init` does. + +#### zk tool + +`zk` itself is implemented in typescript (you can see the code in `infrastructure` directory). If you change anything +there, make sure to run `zk` (that compiles this code), before re-running `zk init`. + +#### zk init + +As first step, it gets the docker images for postgres and geth. + +Geth (one of the ethereum clients) will be used to setup our own copy of L1 chain (that our local zkSync would use). + +Postgres is one of the two databases, that is used by zkSync (the other one is RocksDB). Currently most of the data is +stored in postgres (blocks, transations etc) - while RocksDB is only storing the state (Tree & Map) - and it used by VM. + +Then we compile JS packages (these include our web3 sdk, tools and testing infrastructure). + +Then L1 & L2 contracts. + +And now we're ready to start setting up the system. + +#### Postgres + +First - postgres database: you'll be able to see something like + +``` +DATABASE_URL = postgres://postgres@localhost/zksync_local +``` + +After which we setup the schema (lots of lines with `Applied XX`). + +You can try connecting to postgres now, to see what's inside: + +``` +psql postgres://postgres@localhost/zksync_local +``` + +(and then commands like `\dt` to see the tables, `\d TABLE_NAME` to see the schema, and `select * from XX` to see the +contents). + +As our network has just started, the database would be quite empty. + +You can see the schema for the database in [dal/README.md](../../core/lib/dal/README.md) +document with DB schema. + +#### Docker + +We're running two things in a docker: + +- a postgres (that we've covered above) +- a geth (that is the L1 Ethereum chain). + +Let's see if they are running: + +``` +docker container ls +``` + +and then we can look at the Geth logs: + +``` +docker logs zksync-2-dev_geth_1 +``` + +Where zksync-2-dev_geth_1 is the container id, that we got from the first command. + +If everything goes well, you should see that L1 blocks are being produced. + +#### Server + +Now we can start the main server: + +``` +zk server +``` + +This will actually run a cargo binary (`zksync_server`). + +The server will wait for the new transactions to generate the blocks (these can either be sent via JSON RPC, but it also +listens on the logs from the L1 contract - as things like token bridging etc comes from there). + +Currently we don't send any transactions there (so the logs might be empty). + +But you should see some initial blocks in postgres: + +``` +select * from miniblocks; +``` + +#### Our L1 (geth) + +Let's finish this article, by taking a look at our L1: + +``` +docker container exec -it zksync-2-dev_geth_1 geth attach http://localhost:8545 +``` + +The command above will start a shell - and you can check that you're a (localnet) crypto trillionaire, by running: + +``` +eth.getBalance(personal.listAccounts[0]) +``` + +**Note:** This geth shell is running official Ethereum JSON RPC with Geth-specific extensions documented at +[Ethereum Geth](https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-eth) + +In order to communicate with L2 (our zkSync) - we have to deploy multiple contracts onto L1 (our local geth created +Ethereum). You can look on the `deployL1.log` file - to see the list of contracts that were deployed and their accounts. + +First thing in the file, is the deployer/governor wallet - this is the account that can change, freeze and unfreeze the +contracts (basically the owner). You can also verify (using the getBalance method above), that is has a lot of tokens. + +Then, there are a bunch of contracts (CRATE2_FACTOR, DIAMOND_PROXY, L1_ALLOW_LIST etc etc) - for each one, the file +contains the address. + +You can quickly verify that they were really deployed, by calling: + +``` +eth.getCode("XXXX") +``` + +Where XXX is the address in the file. + +The most important one of them is CONTRACTS_DIAMOND_PROXY_ADDR (which acts as 'loadbalancer/router' for others - and +this is the contract that our server is 'listening' on). + +## Summary + +Ok - so let's sum up what we have: + +- a postgress running in docker (main database) +- a local instance of ethereum (get running in docker) + - which also has a bunch of 'magic' contracts deployed + - and two accounts with lots of tokens +- and a server process + +In the next article, we'll start playing with the system (bridging tokens etc). diff --git a/docs/advanced/README.md b/docs/advanced/README.md new file mode 100644 index 000000000000..65f2e38a1713 --- /dev/null +++ b/docs/advanced/README.md @@ -0,0 +1,14 @@ +# Advanced documentation + +This documentation is aimed at advanced users who are interested in developing the zkSyncERA itself (rather than just +the contracts on top) - and would like to understand how the system works internally. + +The documents in this directory are not meant to be a full specification, but give you the rough understanding of the +system internals. + +Suggested order of reading: + +- 01_initialization +- deposit +- withdrawal +- contracts diff --git a/docs/advanced/advanced_debugging.md b/docs/advanced/advanced_debugging.md new file mode 100644 index 000000000000..a5f51c698871 --- /dev/null +++ b/docs/advanced/advanced_debugging.md @@ -0,0 +1,175 @@ +# Advanced debugging + +## Debugging backend in vscode + +Our backend takes configuration from environment variables, so before starting the debugging, we must make sure that +they are properly set. + +You should create the following file in your `$workspaceFolder/.vscode/` called `prelaunch.py`: + +```python +import os +import lldb + +# Read the .env file and store the key-value pairs in a array with format ["key=value"] +env_array = [] +with open(os.path.join("etc/env/.init.env")) as f: + for line in f: + if line.strip() and line.strip()[0] != "#": + env_array.append(line.strip()) + +with open(os.path.join("etc/env/dev.env")) as f: + for line in f: + if line.strip() and line.strip()[0] != "#": + env_array.append(line.strip()) + +target = lldb.debugger.GetSelectedTarget() + +launch_info = target.GetLaunchInfo() +launch_info.SetEnvironmentEntries(env_array, True) +target.SetLaunchInfo(launch_info) +``` + +This file will load environment variables from `.init.env` and `dev.env` before starting the binary (notice that we do +this in a particular order, as values in dev.env should be overwriting the ones in .init.env). + +Afterwards you need to add something like this to your launch.json: + +``` + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug executable 'zksync_server' DEV ENV", + "cargo": { + "args": [ + "build", + "--bin=zksync_server", + "--package=zksync_core" + ], + "filter": { + "name": "zksync_server", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}", + "preRunCommands": [ + "command script import ${workspaceFolder}/.vscode/prelaunch.py" + ] + }, + ... + ] +``` + +## Debugging contracts in vscode (using hardhat) + +Assuming that you created project in hardhat, that you'd normally test with `hardhat test` - you also also test it with +vscode (which is super powerful - especially as you can have both binaries' debug sessions running in VSCode at the same +time). + +in package.json, make sure to have: + +```json +"scripts": { + //... + "test": "hardhat test", + //... + } +``` + +and then in VSCode's launch.json: + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "type": "node", + "request": "launch", + "name": "Launch Program", + "console": "integratedTerminal", + "runtimeExecutable": "yarn", + "runtimeArgs": ["test", "${workspaceFolder}/test/try.js"] + } + ] +} +``` + +where `test/try.js` is your test code. + +## Performance analysis of rust binaries (flame graphs) + +If you'd like to analyze the CPU performance of your rust binary, you can use 'perf' to compute the flame graphs. + +First - run the binary with perf enabled (this will make the binary a little bit slower): + +``` +sudo perf record -F500 --call-graph=dwarf,65528 /path/to/binary --other --flags +``` + +(you can also connect to already running binary - by providing its PID with `-p` option) + +When you're done collecting records, you have to convert them into flame-graph friendly format, by running: + +``` +sudo perf script -F +pid > perfbench.script +``` + +This will create the perfbench.script file, that you can later upload to and see the +detailed flame graph. + +## Debugging/understanding/tracing zkEVM assembly + +Currently this is quite a complex process, but we're working on making it a little bit smoother. + +You start by installing the 'compiler-tester' repo (see its README.md instructions for details) - it is quite heavy as +it needs the LLVM etc etc. + +Afterwards, you can look at one of the tests (for example +[tests/solidity/simple/default.sol](https://github.com/matter-labs/era-compiler-tests/blob/main/solidity/simple/default.sol)). + +```solidity +//! { "cases": [ { +//! "name": "first", +//! "inputs": [ +//! { +//! "method": "first", +//! "calldata": [ +//! ] +//! } +//! ], +//! "expected": [ +//! "42" +//! ] +//! }, ] } + +// SPDX-License-Identifier: MIT + +pragma solidity >=0.4.16; + +contract Test { + function first() public pure returns (uint64) { + uint64 result = 42; + return result; + } +} + +``` + +As you can see - it is self-contained - it has the solidity code at the bottom, and the top comments are used to define +the test case - and expected result. + +You can run it by calling: + +```shell +cargo run --release --bin compiler-tester -- -DT \ + --path='tests/solidity/simple/default.sol' \ + --mode='Y+M3B3 0.8.19' +``` + +And then collect the detailed tracing information from trace directory. You'll notice that you have 2 files for each +test - one covering the deployment, and one covering the test run. + +You can take test run one and upload it to [our debugger](https://explorer.zksync.io/tools/debugger) to see detailed +zkAssembler and state of memory, heap, stack and registers at each execution step. diff --git a/docs/advanced/blocks_and_batches.md b/docs/advanced/blocks_and_batches.md new file mode 100644 index 000000000000..1de21435d8ba --- /dev/null +++ b/docs/advanced/blocks_and_batches.md @@ -0,0 +1,85 @@ +# Blocks & Batches - How we package transactions + +In this article, we will explore the processing of transactions, how we group them into blocks, what it means to "seal" +a block, and why it is important to have rollbacks in our virtual machine (VM). + +At the basic level, we have individual transactions. However, to execute them more efficiently, we group them together +into blocks & batches + +## L1 Batch vs L2 Block (a.k.a MiniBlock) vs Transaction + +To help visualize the concept, here are two images: + +![Block layout][block_layout] + +You can refer to the Block layout image to see how the blocks are organized. It provides a graphical representation of +how transactions are arranged within the blocks and the arrangement of L2 blocks within L1 "batches." + +![Explorer example][explorer_example] + +### L2 blocks (aka Miniblocks) + +Currently, the L2 blocks do not have a major role in the system, until we transition to a decentralized sequencer. We +introduced them mainly as a "compatibility feature" to accommodate various tools, such as Metamask, which expect a block +that changes frequently. This allows these tools to provide feedback to users, confirming that their transaction has +been added. + +As of now, an L2 block is created every 2 seconds (controlled by StateKeeper's config `miniblock_commit_deadline_ms`), +and it includes all the transactions received during that time period. This periodic creation of L2 blocks ensures that +transactions are processed and included in the blocks regularly. + +### L1 batches + +L1 batches play a crucial role because they serve as the fundamental unit for generating proofs. From the perspective of +the virtual machine (VM), each L1 batch represents the execution of a single program, specifically the Bootloader. The +Bootloader internally processes all the transactions belonging to that particular batch. Therefore, the L1 batch serves +as the container for executing the program and handling the transactions within it. + +#### So how large can L1 batch be + +Most blockchains use factors like time and gas usage to determine when a block should be closed or sealed. However, our +case is a bit more complex because we also need to consider prover capacity and limits related to publishing to L1. + +The decision of when to seal the block is handled by the code in the [conditional_sealer][conditional_sealer] module. It +maintains a list of `SealCriterion` and at the time of writing this article, [we have 9 reasons to seal the +block][reasons_for_sealing], which include: + +- Transaction slots limit (currently set to 750 transactions in `StateKeeper`'s config - `transaction_slots`). +- Gas limit (currently set to `MAX_L2_TX_GAS_LIMIT` = 80M). +- Published data limit (as each L1 batch must publish information about the changed slots to L1, so all the changes must + fit within the L1 transaction limit, currently set to `MAX_PUBDATA_PER_L1_BATCH`= 120k). +- zkEVM Geometry limits - For certain operations like merklelization, there is a maximum number of circuits that can be + included in a single L1 batch. If this limit is exceeded, we wouldn't be able to generate the proof. + +We also have a `TimeoutCriterion` - but it is not enabled. + +However, these sealing criteria pose a significant challenge because it is difficult to predict in advance whether +adding a given transaction to the current batch will exceed the limits or not. This unpredictability adds complexity to +the process of determining when to seal the block. + +#### What if a transaction doesn't fit + +To handle situations where a transaction exceeds the limits of the currently active L1 batch, we employ a "try and +rollback" approach. This means that we attempt to add the transaction to the active L1 batch, and if we receive a +`ExcludeAndSeal` response indicating that it doesn't fit, we roll back the virtual machine (VM) to the state before the +transaction was attempted. + +Implementing this approach introduces a significant amount of complexity in the `oracles` (also known as interfaces) of +the VM. These oracles need to support snapshotting and rolling back operations to ensure consistency when handling +transactions that don't fit. + +In a separate article, we will delve into more details about how these oracles and the VM work, providing a +comprehensive understanding of their functionality and interactions. + +[block_layout]: + https://user-images.githubusercontent.com/128217157/236494232-aeed380c-78f6-4fda-ab2a-8de26c1089ff.png + 'block layout' +[explorer_example]: + https://user-images.githubusercontent.com/128217157/236500717-165470ad-30b8-4ad6-97ed-fc29c8eb1fe0.png + 'explorer example' +[conditional_sealer]: + https://github.com/matter-labs/zksync-2-dev/blob/1ef7fd03c1cbd175dc9be1309ec7698d91d90571/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs#L21 + 'Conditional Sealer' +[reasons_for_sealing]: + https://github.com/matter-labs/zksync-2-dev/blob/1ef7fd03c1cbd175dc9be1309ec7698d91d90571/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs#L119 + 'Reasons for Sealing' diff --git a/docs/advanced/contracts.md b/docs/advanced/contracts.md new file mode 100644 index 000000000000..ddbce2d423bf --- /dev/null +++ b/docs/advanced/contracts.md @@ -0,0 +1,95 @@ +# zkSync contracts + +Now that we know how to bridge tokens back and forth, let's talk about running things on zkSync. + +We have a bunch of great tutorials (like this one ) that +you can follow to get the exact code & command line calls to create the contracts - so in this article, let's focus on +how things differ between zkSync and Ethereum. + +**Note** Before reading this article, I'd recommend doing the hardhat tutorial above. + +## Ethereum flow + +In case of Ethereum, you start by writing a contract code in solidity, then you compile it with `solc`, and you get the +EVM bytecode, deployment bytecode (which is a function that should return the bytecode itself) and ABI (interface). + +Afterwards, you send the deployment bytecode to the 0x000 address on Ethereum, which does some magic (executes the +deployment bytecode, that should contain the constructor etc) and puts the contract under the address that is generated +based on your account id and a nonce. + +From this moment on, you can send the transactions to this new address (and most of the tools would ask you to provide +the ABI, so that they can set the proper function arguments). + +All the bytecode will be run on the EVM (Ethereum Virtual Machine) - that has a stack, access to memory and storage, and +a bunch of opcodes. + +## zkSync flow + +The main part (and the main cost) of the zkSync is the proving system. In order to make proof as fast as possible, we're +running a little bit different virtual machine (zkEVM) - that has a slightly different set of opcodes, and also contains +a bunch of registers. More details on this will be written in the future articles. + +Having a different VM means that we must have a separate compiler [zk-solc](https://github.com/matter-labs/zksolc-bin) - +as the bytecode that is produced by this compiler has to use the zkEVM specific opcodes. + +While having a separte compiler introduces a bunch of challenges (for example, we need a custom +[hardhat plugins](https://github.com/matter-labs/hardhat-zksync) ), it brings a bunch of benefits too: for example it +allows us to move some of the VM logic (like new contract deployment) into System contracts - which allows faster & +cheaper modifications and increased flexibility. + +### zkSync system contracts + +Small note on system contracts: as mentioned above, we moved some of the VM logic into system contracts, which allows us +to keep VM simpler (and with this - keep the proving system simpler). + +You can see the full list (and codes) of the system contracts here: . + +While some of them are not really visible to the contract developer (like the fact that we're running a special +`Bootleader` to package a bunch of transactions together - more info in a future article) - some others are very +visible - like our `ContractDeployer` + +### ContractDeployer + +Deploying a new contract differs on Ethereum and zkSync. + +While on Ethereum - you send the transaction to 0x00 address - on zkSync you have to call the special `ContractDeployer` +system contract. + +If you look on your hardhat example, you'll notice that your `deploy.ts` is actually using a `Deployer` class from the +`hardhat-zksync-deploy` plugin. + +Which inside uses the zkSync's web3.js, that calls the contract deployer +[here](https://github.com/matter-labs/zksync-2-dev/blob/a2853871778cebe8f09faebe6f2f5c07d29b81f1/sdk/zksync-web3.js/src/contract.ts#L62) + +```typescript +override getDeployTransaction(..) { + ... + txRequest.to = CONTRACT_DEPLOYER_ADDRESS; + ... +} +``` + +Also `ContractDeployer` adding a special prefix for all the new contract addresses. This means that contract addesses +WILL be different on `zkSync` and Ethereum (and also leaves us the possibility of adding Ethereum addresses in the +future if needed). + +You can look for `CREATE2_PREFIX` and `CREATE_PREFIX` in the code. + +### Gas costs + +Another part, where zkSync differs from Ethereum is gas cost. The best example for this are storage slots. + +If you have two transactions that are updating the same storage slot - and they are in the same 'batch' - only the first +one would be charged (as when we write the final storage to ethereum, we just write the final diff of what slots have +changed - so updating the same slot multiple times doesn't increase the amount of data that we have to write to L1). + +### Account abstraction and some method calls + +As `zkSync` has a built-in AccountAbstration (more on this in a separate article) - you shouldn't depend on some of the +solidity functions (like `ecrecover` - that checks the keys, or `tx.origin`) - in all the cases, the compiler will try +to warn you. + +## Summary + +In this article, we looked at how contract development & deployment differs on Ethereum and zkSync (looking at +differences in VMs, compilers and system contracts). diff --git a/docs/advanced/deposit.md b/docs/advanced/deposit.md new file mode 100644 index 000000000000..58bcfee5106c --- /dev/null +++ b/docs/advanced/deposit.md @@ -0,0 +1,172 @@ +# ZK-Sync deeper dive - bridging & deposits + +In the first article, we've managed to setup our system on local machine and verify that it works. Now let's actually +start using it. + +## Seeing the status of the accounts + +Let's use a small command line tool (web3 - ) to interact with our blockchains. + +``` +git clone https://github.com/mm-zk/web3 +make build +``` + +Then let's create the keypair for our temporary account: + +``` +./web3 account create +``` + +It will produce a public and private key (for example): + +``` +Private key: 0x5090c024edb3bdf4ce2ebc2da96bedee925d9d77d729687e5e2d56382cf0a5a6 +Public address: 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd +``` + +Now, let's see how many tokens we have: + +``` +// This checks the tokens on 'L1' (geth) +./web3 --rpc-url http://localhost:8545 balance 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd + +// This checks the tokens on 'L2' (zkSync) +./web3 --rpc-url http://localhost:3050 balance 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd +``` + +Unsurprisingly we have 0 on both - let's fix it by first transferring some tokens on L1: + +``` +docker container exec -it zksync-2-dev_geth_1 geth attach http://localhost:8545 +//and inside: +eth.sendTransaction({from: personal.listAccounts[0], to: "0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd", value: "7400000000000000000"}) +``` + +And now when we check the balance, we should see: + +``` +./web3 --rpc-url http://localhost:8545 balance 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd +``` + +that we have 7.4 ETH. + +and now let's bridge it over to L2. + +## Bridging over to L2 + +We'll use the zksync-cli from: and then run: + +```shell +npm run build +npm exec zksync-cli deposit +``` + +you should choose the 'localnet' as network, and provide the public key as address. + +If everything goes well, you should be able to see the tokens transferred: + +```shell +./web3 --rpc-url http://localhost:3050 balance 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd +``` + +### Diving deeper - what exactly happened + +Let's take a deeper look at what the 'deposit' call actually did. + +If we look at what 'deposit' command has printed, we'll see something like this: + +``` +Transaction submitted 💸💸💸 +L1 transaction: 0xe27dc466c36ad2046766e191017e7acf29e84356465feef76e821708ff18e179 +``` + +Let's run the `geth attach` (exact command is above) and see the details: + +``` +eth.getTransaction("0xe27dc466c36ad2046766e191017e7acf29e84356465feef76e821708ff18e179") + +{ + accessList: [], + blockHash: "0xd319b685a1a0b88545ec6df473a3efb903358ac655263868bb14b92797ea7504", + blockNumber: 79660, + chainId: "0x9", + from: "0x618263ce921f7dd5f4f40c29f6c524aaf97b9bbd", + gas: 125060, + gasPrice: 1500000007, + hash: "0xe27dc466c36ad2046766e191017e7acf29e84356465feef76e821708ff18e179", + input: "0xeb672419000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd00000000000000000000000000000000000000000000000029a2241af62c000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000009cb4200000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000000000100000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + maxFeePerGas: 1500000010, + maxPriorityFeePerGas: 1500000000, + nonce: 40, + r: "0xc9b0548ade9c5d7334f1ebdfba9239cf1acca7873381b8f0bc0e8f49ae1e456f", + s: "0xb9dd338283a3409c281b69c3d6f1d66ea6ee5486ee6884c71d82f596d6a934", + to: "0x54e8159f006750466084913d5bd288d4afb1ee9a", + transactionIndex: 0, + type: "0x2", + v: "0x1", + value: 3000320929000000000 +} +``` + +The witdraw command has called the contract on address `0x54e8` (which is exactly the DIAMOND_PROXY_ADDRESS), and it has +called the method `0xeb672419` - which is the `requestL2Transaction` from +[Mailbox.sol](https://github.com/matter-labs/zksync-2-contracts/blob/main/ethereum/contracts/zksync/facets/Mailbox.sol#L205) + +#### Quick note on our L1 contracts + +We're using the DiamondProxy setup, that allows us to have a fixed immutable entry point (DiamondProxy) - that forwards +the requests to different contracts (facets) that can be independently updated and/or frozen. + +![Diamond proxy layout](https://user-images.githubusercontent.com/128217157/229521292-1532a59b-665c-4cc4-8342-d25ad45a8fcd.png) + +You can find more detailed description in +[Contract docs](https://github.com/matter-labs/zksync-2-contracts/blob/main/docs/Overview.md) + +#### requestL2Transaction Function details + +You can use some of the online tools (like ) and pass the input data to it - and +get the nice result: + +```json +"function": "requestL2Transaction(address,uint256,bytes,uint256,uint256,bytes[],address)", +"params": [ + "0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd", + "3000000000000000000", + "0x", + "641858", + "800", + [], + "0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd" + ] + +``` + +This means that we requested that the 3 ETH (2nd argument) is transferred to 0x6182 (1st argument). The Calldata being +0x0 - means that we're talking about ETH (this would be a different value for other ERC tokens). Then we also specify a +gas limit (641k) and set the gas per pubdata byte limit to 800. + +#### What happens under the hood + +The call to requestL2Transaction, is adding the transaction to the priorityQueue and then emits the NewPriorityRequest. + +The zk server (that you started with `zk server` command) is listening on events that are emitted from this contract +(via eth_watcher module - +[`loop_iteration` function](https://github.com/matter-labs/zksync-2-dev/blob/main/core/bin/zksync_core/src/eth_watch/mod.rs#L128]) +) and adds them to the postgres database (into `transactions` table). + +You can actually check it - by running the psql and looking at the contents of the table - then you'll notice that +transaction was succesfully inserted, and it was also marked as 'priority' (as it came from L1) - as regular +transactions that are received by the server directly are not marked as priority. + +You can verify that this is your transaction, by looking at the l1_block_number column (it should match the block_number +from the eth.getTransaction call above). + +Notice that the hash of the transaction in the postgres will be different from the one returned by eth.getTransaction. +This is because the postgres keeps the hash of the 'L2' transaction (which was 'inside' the L1 transaction that +eth.getTransaction returned). + +## Summary + +In this article, we've learned how ETH gets bridged from L1 to L2. In the next episode, we'll look at the other +direction - how we transmit messages (and ETH) from L2 to L1 - stay tuned. diff --git a/docs/advanced/gas_and_fees.md b/docs/advanced/gas_and_fees.md new file mode 100644 index 000000000000..21dd1d117877 --- /dev/null +++ b/docs/advanced/gas_and_fees.md @@ -0,0 +1,134 @@ +# Fees (a.k.a gas) + +What is the L2 gas price? It's **0.25 Gwei** (and as we improve our provers/VM we hope it will go down). However, it can +vary at times. Please see further information below. + +## What do you pay for + +The gas fee covers the following expenses: + +- Calculation and storage (related to most operations) +- Publishing data to L1 (a significant cost for many transactions, with the exact amount depending on L1) +- Sending 'bytecode' to L1 (if not already there) - typically a one-time cost when deploying a new contract +- Closing the batch and handling proofs - This aspect also relies on L1 costs (since proof publication must be covered). + +## L1 vs L2 pricing + +Here is a simplified table displaying various scenarios that illustrate the relationship between L1 and L2 fees: + +| L1 gas price | L2 'fair price' | L2 'gas price' | L2 gas per pubdata | Note | +| ------------ | --------------- | -------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.25 Gwei | 0.25 Gwei | 0.25 Gwei | 17 | Gas prices are equal, so the charge is 17 gas, just like on L1. | +| 10 Gwei | 0.25 Gwei | 0.25 Gwei | 680 | L1 is 40 times more expensive, so we need to charge more L2 gas per pubdata byte to cover L1 publishing costs. | +| 250 Gwei | 0.25 Gwei | 0.25 Gwei | 17000 | L1 is now very expensive (1000 times more than L2), so each pubdata costs a lot of gas. | +| 10000 Gwei | 0.25 Gwei | 8.5 Gwei | 20000 | L1 is so expensive that we have to raise the L2 gas price, so the gas needed for publishing doesn't exceed the 20k limit, ensuring L2 remains usable. | + +**Why is there a 20k gas per pubdata limit?** - We want to make sure every transaction can publish at least 4kb of data +to L1. The maximum gas for a transaction is 80 million (80M/4k = 20k). + +### L2 Fair price + +The L2 fair gas price is currently determined by the StateKeeper configuration and is set at 0.25 Gwei (see +`fair_l2_gas_price` in the config). This price is meant to cover the compute costs (CPU + GPU) for the sequencer and +prover. It can be changed as needed, with a safety limit of 10k Gwei in the bootloader. Once the system is +decentralized, more deterministic rules will be established for this price. + +### L1 Gas price + +The L1 gas price is fetched by querying L1 every 20 seconds. This is managed by the [`GasAdjuster`][gas_adjuster], which +calculates the median price from recent blocks and enables more precise price control via the config (for example, +adjusting the price with `internal_l1_pricing_multiplier` or setting a specific value using +`internal_enforced_l1_gas_price`). + +### Overhead gas + +As mentioned earlier, fees must also cover the overhead of generating proofs and submitting them to L1. While the +detailed calculation is complex, the short version is that a full proof of an L1 batch costs around **1 million L2 gas, +plus 1M L1 gas (roughly equivalent of 60k published bytes)**. In every transaction, you pay a portion of this fee +proportional to the part of the batch you are using. + +## Transactions + +| Transaction Field | Conditions | Note | +| ----------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| gas_limit | `<= max_allowed_l2_tx_gas_limit` | The limit (4G gas) is set in the `StateKeeper` config; it's the limit for the entire L1 batch. | +| gas_limit | `<= MAX_GAS_PER_TRANSACTION` | This limit (80M) is set in bootloader. | +| gas_limit | `> l2_tx_intrinsic_gas` | This limit (around 14k gas) is hardcoded to ensure that the transaction has enough gas to start. | +| max_fee_per_gas | `<= fair_l2_gas_price` | Fair L2 gas price (0.25 Gwei) is set in the `StateKeeper` config | +| | `<=validation_computational_gas_limit` | There is an additional, stricter limit (300k gas) on the amount of gas that a transaction can use during validation. | + +### Why do we have two limits: 80M and 4G + +The operator can set a custom transaction limit in the bootloader. However, this limit must be within a specific range, +meaning it cannot be less than 80M or more than 4G. + +### Why validation is special + +In Ethereum, there is a fixed cost for verifying a transaction's correctness by checking its signature. However, in +zkSync, due to Account Abstraction, we may need to execute some contract code to determine whether it's ready to accept +the transaction. If the contract rejects the transaction, it must be dropped, and there's no one to charge for that +process. + +Therefore, a stricter limit on validation is necessary. This prevents potential DDoS attacks on the servers, where +people could send invalid transactions to contracts that require expensive and time-consuming verifications. By imposing +a stricter limit, the system maintains stability and security. + +## Actual gas calculation + +From the Virtual Machine (VM) point of view, there is only a bootloader. When executing transactions, we insert the +transaction into the bootloader memory and let it run until it reaches the end of the instructions related to that +transaction (for more details, refer to the 'Life of a Call' article). + +To calculate the gas used by a transaction, we record the amount of gas used by the VM before the transaction execution +and subtract it from the remaining gas after the execution. This difference gives us the actual gas used by the +transaction. + +```rust +let gas_remaining_before = vm.gas_remaining(); +execute_tx(); +let gas_used = gas_remainig_before = vm.gas_remaining(); +``` + +## Gas estimation + +Before sending a transaction to the system, most users will attempt to estimate the cost of the request using the +`eth_estimateGas` call. + +To estimate the gas limit for a transaction, we perform a binary search (between 0 and the `MAX_L2_TX_GAS_LIMIT` of 80M) +to find the smallest amount of gas under which the transaction still succeeds. + +For added safety, we include some 'padding' by using two additional config options: `gas_price_scale_factor` (currently +1.5) and `estimate_gas_scale_factor` (currently 1.3). These options are used to increase the final estimation. + +The first option simulates the volatility of L1 gas (as mentioned earlier, high L1 gas can affect the actual gas cost of +data publishing), and the second one serves as a 'safety margin'. + +You can find this code in [get_txs_fee_in_wei][get_txs_fee_in_wei] function. + +## Q&A + +### Is zkSync really cheaper + +In short, yes. As seen in the table at the beginning, the regular L2 gas price is set to 0.25 Gwei, while the standard +Ethereum price is around 60-100 Gwei. However, the cost of publishing to L1 depends on L1 prices, meaning that the +actual transaction costs will increase if the L1 gas price rises. + +### Why do I hear about large refunds + +There are a few reasons why refunds might be 'larger' on zkSync (i.e., why we might be overestimating the fees): + +- We must assume (pessimistically) that you'll have to pay for all the slot/storage writes. In practice, if multiple + transactions touch the same slot, we only charge one of them. +- We have to account for larger fluctuations in the L1 gas price (using gas_price_scale_factor mentioned earlier) - this + might cause the estimation to be significantly higher, especially when the L1 gas price is already high, as it then + impacts the amount of gas used by pubdata. + +[main_node_fetcher]: + https://github.com/matter-labs/zksync-2-dev/blob/d590b3f0965a23eb0011779aab829d86d4fdc1d1/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs#L33 + 'main node fetcher' +[gas_adjuster]: + https://github.com/matter-labs/zksync-2-dev/blob/d590b3f0965a23eb0011779aab829d86d4fdc1d1/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs#L25 + 'gas_adjuster' +[get_txs_fee_in_wei]: + https://github.com/matter-labs/zksync-2-dev/blob/d590b3f0965a23eb0011779aab829d86d4fdc1d1/core/bin/zksync_core/src/api_server/tx_sender/mod.rs#L450 + 'get_txs_fee_in_wei' diff --git a/docs/advanced/how_call_works.md b/docs/advanced/how_call_works.md new file mode 100644 index 000000000000..601922346e83 --- /dev/null +++ b/docs/advanced/how_call_works.md @@ -0,0 +1,125 @@ +# Life of a 'call' + +This article will show you how the `call` method works in our backend. The `call` method is a 'read-only' operation, +which means it doesn't change anything on the blockchain. This will give you a chance to understand the system, +including the bootloader and VM. + +For this example, let's assume that the contract is already deployed, and we will use the `call` method to interact with +it. + +Since the 'call' method is only for reading data, all the calculations will happen in the `api_server`. + +### Calling the 'call' method + +If you need to make calls quickly, you can use the 'cast' binary from the +[foundry](https://github.com/foundry-rs/foundry) suite: + +```shell= +cast call 0x23DF7589897C2C9cBa1C3282be2ee6a938138f10 "myfunction()()" --rpc-url http://localhost:3050 +``` + +The address of your contract is represented by 0x23D... + +Alternatively, you can make an RPC call directly, but this can be complicated as you will have to create the correct +payload, which includes computing the ABI for the method, among other things. + +An example of an RPC call would be: + +```shell= +curl --location 'http://localhost:3050' \ +--header 'Content-Type: application/json' \ +--data '{ + "jsonrpc": "2.0", + "id": 2, + "method": "eth_call", + "params": [ + { + "from": "0x0000000000000000000000000000000000000000", + "data": "0x0dfe1681", + "to": "0x2292539b1232A0022d1Fc86587600d86e26396D2" + } + + ] +}' +``` + +As you can see, using the RPC call directly is much more complex. That's why I recommend using the 'cast' tool instead. + +### What's happening in the server + +Under the hood, the 'cast' tool calls the `eth_call` RPC method, which is part of the official Ethereum API set. You can +find the definition of these methods in the [namespaces/eth.rs][namespaces_rpc_api] file in our code. + +Afterward, it goes to the implementation, which is also in the [namespaces/eth.rs][namespaces_rpc_impl] file but in a +different parent directory. + +The server then executes the function in a VM sandbox. Since this is a `call` function, the VM only runs this function +before shutting down. This is handled by the `execute_tx_eth_call` method, which fetches metadata like block number and +timestamp from the database, and the `execute_tx_in_sandbox` method, which takes care of the execution itself. Both of +these functions are in the [api_server/execution_sandbox.rs][execution_sandbox] file. + +Finally, the transaction is pushed into bootloader memory, and the VM executes it until it finishes. + +### VM + +Before we look at the bootloader, let's briefly examine the VM itself. + +The zkEVM is a state machine with a heap, stack, 16 registers, and state. It executes zkEVM assembly, which has many +opcodes similar to EVM, but operates on registers rather than a stack. We have two implementations of the VM: one is in +'pure rust' without circuits (in the zk_evm repository), and the other has circuits (in the sync_vm repository). In this +example, the api server uses the 'zk_evm' implementation without circuits. + +Most of the code that the server uses to interact with the VM is in [core/lib/vm/src/vm.rs][vm_code]. + +In this line, we're calling self.state.cycle(), which executes a single VM instruction. You can see that we do a lot of +things around this, such as executing multiple tracers after each instruction. This allows us to debug and provide +additional feedback about the state of the VM. + +### Bootloader & transaction execution + +The Bootloader is a large 'quasi' system contract, written in Yul and located in +[system_contracts/bootloader/bootloader.yul][bootloader_code] . + +It's a 'quasi' contract because it isn't actually deployed under any address. Instead, it's loaded directly into the VM +by the binary in the constructor [init_vm_inner][init_vm_inner]. + +So why do we still need a bootloader if we have the call data, contract binary, and VM? There are two main reasons: + +- It allows us to 'glue' transactions together into one large transaction, making proofs a lot cheaper. +- It allows us to handle some system logic (checking gas, managing some L1-L2 data, etc.) in a provable way. From the + circuit/proving perspective, this behaves like contract code. +- You'll notice that the way we run the bootloader in the VM is by first 'kicking it off' and cycling step-by-step until + it's ready to accept the first transaction. Then we 'inject' the transaction by putting it in the right place in VM + memory and start iterating the VM again. The bootloader sees the new transaction and simply executes its opcodes. + +This allows us to 'insert' transactions one by one and easily revert the VM state if something goes wrong. Otherwise, +we'd have to start with a fresh VM and re-run all the transactions again. + +### Final steps + +Since our request was just a 'call', after running the VM to the end, we can collect the result and return it to the +caller. Since this isn't a real transaction, we don't have to do any proofs, witnesses, or publishing to L1. + +## Summary + +In this article, we covered the 'life of a call' from the RPC to the inner workings of the system, and finally to the +'out-of-circuit' VM with the bootloader. + +[namespaces_rpc_api]: + https://github.com/matter-labs/zksync-2-dev/blob/edd48fc37bdd58f9f9d85e27d684c01ef2cac8ae/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs + 'namespaces RPC api' +[namespaces_rpc_impl]: + https://github.com/matter-labs/zksync-2-dev/blob/edd48fc37bdd58f9f9d85e27d684c01ef2cac8ae/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs#L94 + 'namespaces RPC implementation' +[execution_sandbox]: + https://github.com/matter-labs/zksync-2-dev/blob/dc3b3d6b055c558b0e1a76ef5de3184291489d9f/core/bin/zksync_core/src/api_server/execution_sandbox.rs#L99 + 'execution sandbox' +[vm_code]: + https://github.com/matter-labs/zksync-2-dev/blob/dc3b3d6b055c558b0e1a76ef5de3184291489d9f/core/lib/vm/src/vm.rs#L544 + 'vm code' +[bootloader_code]: + https://github.com/matter-labs/system-contracts/blob/93a375ef6ccfe0181a248cb712c88a1babe1f119/bootloader/bootloader.yul + 'bootloader code' +[init_vm_inner]: + https://github.com/matter-labs/zksync-2-dev/blob/dc3b3d6b055c558b0e1a76ef5de3184291489d9f/core/lib/vm/src/vm_with_bootloader.rs#L348 + 'vm constructor' diff --git a/docs/advanced/how_transaction_works.md b/docs/advanced/how_transaction_works.md new file mode 100644 index 000000000000..d24dc30fd092 --- /dev/null +++ b/docs/advanced/how_transaction_works.md @@ -0,0 +1,97 @@ +# Life of transaction + +In this article, we will explore the lifecycle of a transaction, which is an operation that is stored permanently in the +blockchain and results in a change of its overall state. + +To better understand the content discussed here, it is recommended that you first read the [life of a +call][life_of_call]. + +## L1 vs L2 transactions + +There are two main methods through which transactions can enter the system. The most common approach involves making a +call to the RPC (Remote Procedure Call), where you send what is known as an [`L2Tx`][l2_tx] transaction. + +The second method involves interacting with Ethereum directly by sending a 'wrapped' transaction to our Ethereum +contract. These transactions are referred to as [`L1Tx`][l1_tx] or Priority transactions, and the process of sending +transactions in this manner is called the 'priority queue'. + +### Transaction types + +We provide support for five different types of transactions. + +Here's a simplified table of the transaction types: + +| Type id | Transaction type | Features | Use cases | % of transactions (mainnet/testnet) | +| ------- | ---------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ----------------------------------- | +| 0x0 | 'Legacy' | Only includes `gas price` | These are traditional Ethereum transactions. | 60% / 82% | +| 0x1 | EIP-2930 | Contains a list of storage keys/addresses the transaction will access | At present, this type of transaction is not enabled. | +| 0x2 | EIP-1559 | Includes `max_priority_fee_per_gas`, `max_gas_price` | These are Ethereum transactions that provide more control over the gas fee. | 35% / 12% | +| 0x71 | EIP-712 (specific to zkSync) | Similar to EIP-1559, but also adds `max_gas_per_pubdata`, custom signatures, and Paymaster support | This is used by those who are using zkSync specific Software Development Kits (SDKs). | 1% / 2% | +| 0xFF | L1 transactions also known as priority transactions `L1Tx` | Originating from L1, these have more custom fields like 'refund' addresses etc | Mainly used to transfer funds/data between L1 & L2 layer. | 4% / 3% | + +Here's the code that does the parsing: [TransactionRequest::from_bytes][transaction_request_from_bytes] + +## Transactions lifecycle + +### Priority queue (L1 Tx only) + +L1 transactions are first 'packaged' and then sent to our Ethereum contract. After this, the L1 contract records this +transaction in L1 logs. Our 'eth_watcher' constantly monitors these logs through the +[`get_priority_op_events`][get_priority_op_events] method and then adds them to the database (mempool). + +### RPC & validation (L2 Tx only) + +Transactions are received via the `eth_sendRawTransaction` method. These are then parsed and validated using the +[`submit_tx`][submit_tx] method on the API server. + +The validations ensure that the correct amount of gas has been assigned by the user and that the user's account has +sufficient gas, among other things. + +As part of this validation, we also perform a `validation_check` to ensure that if account abstraction / paymaster is +used, they are prepared to cover the fees. Additionally, we perform a 'dry_run' of the transaction for a better +developer experience, providing almost immediate feedback if the transaction fails. + +Please note, that transaction can still fail in the later phases, even if it succeeded in the API, as it is going to be +executed in the context of a different block. + +Once validated, the transaction is added to the mempool for later execution. Currently, the mempool is stored in the +`transactions` table in postgres (see the `insert_transaction_l2()` method). + +### Batch executor & State keeper + +The State Keeper's job is to take transactions from the mempool and place them into an L1 batch. This is done using the +[`process_l1_batch()`][process_l1_batch] method. + +This method takes the next transaction from the mempool (which could be either an L1Tx or L2Tx - but L1Tx are always +given the priority and they are taken first), executes it, and checks if the L1 batch is ready to be sealed (for more +details on when we finalize L1 batches, see the 'Blocks & Batches' article). + +Once the batch is sealed, it's ready to be sent for proof generation and have this proof committed into L1. More details +on this will be covered in a separate article. + +The transaction can have three different results in state keeper: + +- Success +- Failure (but still included in the block, and gas was charged) +- Rejection - when it fails validation, and cannot be included in the block. This last case should (in theory) never + happen - as we cannot charge the fee in such scenario, and it opens the possiblity for the DDoS attack. + +[transaction_request_from_bytes]: + https://github.com/matter-labs/zksync-2-dev/blob/e0820f994982f179c0466cd724a0a327327a501a/core/lib/types/src/transaction_request.rs#L539 + 'transaction request from bytes' +[get_priority_op_events]: + https://github.com/matter-labs/zksync-2-dev/blob/e0820f994982f179c0466cd724a0a327327a501a/core/bin/zksync_core/src/eth_watch/client.rs#L112 + 'get priority op events' +[l1_tx]: + https://github.com/matter-labs/zksync-2-dev/blob/e0820f994982f179c0466cd724a0a327327a501a/core/lib/types/src/l1/mod.rs#L196 + 'l1 tx' +[l2_tx]: + https://github.com/matter-labs/zksync-2-dev/blob/e0820f994982f179c0466cd724a0a327327a501a/core/lib/types/src/l2/mod.rs#L110 + 'l2 tx' +[submit_tx]: + https://github.com/matter-labs/zksync-2-dev/blob/e0820f994982f179c0466cd724a0a327327a501a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs#L146 + 'submit tx' +[process_l1_batch]: + https://github.com/matter-labs/zksync-2-dev/blob/e0820f994982f179c0466cd724a0a327327a501a/core/bin/zksync_core/src/state_keeper/keeper.rs#L257 + 'process l1 batch' +[life_of_call]: how_call_works.md 'life of call' diff --git a/docs/advanced/withdrawal.md b/docs/advanced/withdrawal.md new file mode 100644 index 000000000000..925a0b60a442 --- /dev/null +++ b/docs/advanced/withdrawal.md @@ -0,0 +1,139 @@ +# zkSync deeper dive bridging stuff back (a.k.a withdrawals) + +Assuming that we have the tools from part 1 installed, we can bridge the tokens back by simply calling the zksync-cli: + +``` +npm exec zksync-cli withdraw +``` + +And providing the account name (public key) and private key. + +Afterward, by using `web3` tools, we can quickly check that funds were transferred back to L1. **And you discover that +they didn't** - what happened? + +Actually we'll have to run one additional step: + +``` +npm exec zksync-cli confirm_withdrawal +``` + +and pass the transaction that we received from the first call, into the `confirm_withdrawal` call. + +**Note:** This is not needed on testnet - as we (MatterLabs) - are running an automatic tool that confirms withdrawals. + +### Looking deeper + +But let's take a look what happened under the hood. + +Let's start by looking at the output of our `zksync-cli`: + +``` +Withdrawing 7ETH to 0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd on localnet +Transaction submitted 💸💸💸 +L2: tx/0xe2c8a7beaf8879cb197555592c6eb4b6e4c39a772c3b54d1b93da14e419f4683 +Your funds will be available in L1 in a couple of minutes. +``` + +**important** - your transaction id will be different - make sure that you use it in the methods below. + +The tool created the withdraw transaction and it sent it directly to our server (so this is a L2 transaction). The zk +server has received it, and added it into its database. You can check it by querying the `transactions` table: + +``` +# select * from transactions where hash = '\x` +select * from transactions where hash = '\xe2c8a7beaf8879cb197555592c6eb4b6e4c39a772c3b54d1b93da14e419f4683'; +``` + +This will print a lot of columns, but let's start by looking at the `data` column: + +``` + {"value": "0x6124fee993bc0000", "calldata": "0x51cff8d9000000000000000000000000618263ce921f7dd5f4f40c29f6c524aaf97b9bbd", "factoryDeps": null, "contractAddress": "0x000000000000000000000000000000000000800a"} +``` + +We can use the ABI decoder tool to see what this call data means: + +```json +{ + "function": "withdraw(address)", + "params": ["0x618263CE921F7dd5F4f40C29f6c524Aaf97b9bbd"] +} +``` + +(and the 0x6124fee993bc0000 in the value is 7000000000000000000 == 7 ETH that we wanted to send). + +So the last question is -- what is the 'magic' contract address: `0x800a` ? + +```solidity +/// @dev The address of the eth token system contract +address constant L2_ETH_TOKEN_SYSTEM_CONTRACT_ADDR = address(0x800a); + +``` + +### System contracts (on L2) + +This is a good opportunity to talk about system contracts that are automatically deployed on L2. You can find the full +list here +[in github](https://github.com/matter-labs/system-contracts/blob/436d57da2fb35c40e38bcb6637c3a090ddf60701/scripts/constants.ts#L29) + +This is the place were we specify that `bootloader` is at address 0x8001, `NonceHolder` at 0x8003 etc. + +This brings us to [L2EthToken.sol](https://github.com/matter-labs/system-contracts/blob/main/contracts/L2EthToken.sol) +that has the implementation of the L2 Eth. + +When we look inside, we can see: + +```solidity +// Send the L2 log, a user could use it as proof of the withdrawal +bytes memory message = _getL1WithdrawMessage(_l1Receiver, amount); +L1_MESSENGER_CONTRACT.sendToL1(message); +``` + +And `L1MessagerContract` (that is deployed at 0x8008). + +### Committing to L1 + +And how do these messages get into the L1? The `eth_sender` class from our server is taking care of this. You can see +the details of the transactions that it posts to L1 in our database in `eth_txs` column. + +If you look at the `tx_type` column (in psql), you can see that we have 3 different transaction types: + +```sql +zksync_local=# select contract_address, tx_type from eth_txs; + contract_address | tx_type +--------------------------------------------+--------------------------- + 0x54e8159f006750466084913d5bd288d4afb1ee9a | CommitBlocks + 0x54e8159f006750466084913d5bd288d4afb1ee9a | PublishProofBlocksOnchain + 0x54e8159f006750466084913d5bd288d4afb1ee9a | ExecuteBlocks + 0x54e8159f006750466084913d5bd288d4afb1ee9a | CommitBlocks + 0x54e8159f006750466084913d5bd288d4afb1ee9a | PublishProofBlocksOnchain + 0x54e8159f006750466084913d5bd288d4afb1ee9a | ExecuteBlocks +``` + +BTW - all the transactions are sent to the 0x54e address - which is the `DiamonProxy` deployed on L1 (this address will +be different on your local node - see previous tutorial for more info) . + +And inside, all three methods above belong to +[Executor.sol](https://github.com/matter-labs/zksync-2-contracts/blob/main/ethereum/contracts/zksync/facets/Executor.sol) +facet and you can look at +[README](https://github.com/matter-labs/zksync-2-contracts/blob/main/docs/Overview.md#executorfacet) to see the details +of what each method does. + +The short description is: + +- 'CommitBlocks' - is verifying the block metadata and stores the hash into the L1 contract storage. +- 'PublishProof' - gets the proof, checks that the proof is correct and that it is a proof for the block hash that was + stored in commit blocks. (IMPORTANT: in testnet/localnet we allow empty proofs - so that you don't have to run the + full prover locally) +- 'ExecuteBlocks' - is the final call, that stores the root hashes in L1 storage. This allows other calls (like + finalizeWithdrawal) to work. + +So to sum it up - after these 3 calls, the L1 contract has a root hash of a merkle tree, that contains the 'message' +about the withdrawal. + +### Final step - finalizing withdrawal + +Now we're ready to actually claim our ETH on L1. We do this by calling a `finalizeEthWithdrawal` function on the +DiamondProxy contract (Mailbox.sol to be exact). + +To prove that we actually can withdraw the money, we have to say in which L2 block the withdrawal happened, and provide +the merkle proof from our withdrawal log, to the root that is stored in the L1 contract. diff --git a/docs/external-node/01_intro.md b/docs/external-node/01_intro.md new file mode 100644 index 000000000000..9e2596afcee5 --- /dev/null +++ b/docs/external-node/01_intro.md @@ -0,0 +1,127 @@ +# External Node Documentation + +This documentation explains the basics of the zkSync Era External Node. + +## What is the external node + +The external node (herein EN) is a read-replica of the main (centralized) node that can be run by external parties. It +functions by fetching data from the zkSync API and re-applying transactions locally, starting from the genesis block. +The EN shares most of its codebase with the main node. Consequently, when it re-applies transactions, it does so exactly +as the main node did in the past. + +In Ethereum terms, the current state of the EN represents an archive node, providing access to the entire history of the +blockchain. + +## High-level overview + +At a high level, the EN can be seen as an application that has the following modules: + +- API server that provides the publicly available Web3 interface. +- Synchronization layer that interacts with the main node and retrieves transactions and blocks to re-execute. +- Sequencer component that actually executes and persists transactions received from the synchronization layer. +- Several checker modules that ensure the consistency of the EN state. + +With the EN, you are able to: + +- Locally recreate and verify the zkSync Era mainnet/testnet state. +- Interact with the recreated state in a trustless way (in a sense that the validity is locally verified, and you should + not rely on a third-party API zkSync Era provides). +- Use the Web3 API without having to query the main node. +- Send L2 transactions (that will be proxied to the main node). + +With the EN, you _can not_: + +- Create L2 blocks or L1 batches on your own. +- Generate proofs. +- Submit data to L1. + +A more detailed overview of the EN's components is provided in the [components](./06_components.md) section. + +## API overview + +API exposed by the EN strives to be Web3-compliant. If some method is exposed but behaves differently compared to +Ethereum, it should be considered a bug. Please [report][contact_us] such cases. + +[contact_us]: https://docs.zksync.io/contact/ + +### `eth` namespace + +Data getters in this namespace operate in the L2 space: require/return L2 block numbers, check balances in L2, etc. + +Available methods: + +| Method | Notes | +| ----------------------------------------- | ------------------------------------------------------------------------- | +| `eth_blockNumber` | | +| `eth_chainId` | | +| `eth_call` | | +| `eth_estimateGas` | | +| `eth_gasPrice` | | +| `eth_newFilter` | Maximum amount of installed filters is configurable | +| `eth_newBlockFilter` | Same as above | +| `eth_newPendingTransactionsFilter` | Same as above | +| `eth_uninstallFilter` | | +| `eth_getLogs` | Maximum amount of returned entities can be configured | +| `eth_getFilterLogs` | Same as above | +| `eth_getFilterChanges` | Same as above | +| `eth_getBalance` | | +| `eth_getBlockByNumber` | | +| `eth_getBlockByHash` | | +| `eth_getBlockTransactionCountByNumber` | | +| `eth_getBlockTransactionCountByHash` | | +| `eth_getCode` | | +| `eth_getStorageAt` | | +| `eth_getTransactionCount` | | +| `eth_getTransactionByHash` | | +| `eth_getTransactionByBlockHashAndIndex` | | +| `eth_getTransactionByBlockNumberAndIndex` | | +| `eth_getTransactionReceipt` | | +| `eth_protocolVersion` | | +| `eth_sendRawTransaction` | | +| `eth_syncing` | EN is considered synced if it's less than 11 blocks behind the main node. | +| `eth_coinbase` | Always returns a zero address | +| `eth_accounts` | Always returns an empty list | +| `eth_getCompilers` | Always returns an empty list | +| `eth_hashrate` | Always returns zero | +| `eth_getUncleCountByBlockHash` | Always returns zero | +| `eth_getUncleCountByBlockNumber` | Always returns zero | +| `eth_mining` | Always returns false | + +### PubSub + +Only available on the WebSocket servers. + +Available methods: + +| Method | Notes | +| ------------------ | ----------------------------------------------- | +| `eth_subscribe` | Maximum amount of subscriptions is configurable | +| `eth_subscription` | | + +### `net` namespace + +Available methods: + +| Method | Notes | +| ---------------- | -------------------- | +| `net_version` | | +| `net_peer_count` | Always returns 0 | +| `net_listening` | Always returns false | + +### `web3` namespace + +Available methods: + +| Method | Notes | +| -------------------- | ----- | +| `web3_clientVersion` | | + +### `zks` namespace + +This namespace contains rollup-specific extensions to the Web3 API. Note that _only methods_ specified in the +[documentation][zks_docs] are considered public. There may be other methods exposed in this namespace, but undocumented +methods come without any kind of stability guarantees and can be changed or removed without notice. + +Always refer to the documentation linked above to see the list of stabilized methods in this namespace. + +[zks_docs]: https://era.zksync.io/docs/api/api.html#zksync-specific-json-rpc-methods diff --git a/docs/external-node/02_configuration.md b/docs/external-node/02_configuration.md new file mode 100644 index 000000000000..3a8ac81b0e45 --- /dev/null +++ b/docs/external-node/02_configuration.md @@ -0,0 +1,55 @@ +# External Node Configuration + +This document outlines various configuration options for the EN. Currently, the EN requires the definition of numerous +environment variables. To streamline this process, we provide prepared configs for the zkSync Era - for both +[mainnet](prepared_configs/mainnet-config.env) and [testnet](prepared_configs/testnet-config.env). You can use these +files as a starting point and modify only the necessary sections. + +## Database + +The EN uses two databases: PostgreSQL and RocksDB. + +PostgreSQL serves as the main source of truth in the EN, so all the API requests fetch the state from there. PostgreSQL +connection is configured by the `DATABASE_URL`. Additionally, the `DATABASE_POOL_SIZE` variable defines the size of the +connection pool. + +RocksDB is used in components where IO is a bottleneck, such as the State Keeper and the Merkle tree. If possible, it is +recommended to use an NVME SSD for RocksDB. RocksDB requires two variables to be set: `DATABASE_STATE_KEEPER_DB_PATH` +and `DATABASE_NEW_MERKLE_TREE_SSD_PATH`, which must point to different directories. + +## L1 Web3 client + +EN requires a connection to an Ethereum node. The corresponding env variable is `ETH_CLIENT_WEB3_URL`. Make sure to set +the URL corresponding to the correct L1 network (L1 mainnet for L2 mainnet and L1 goerli for L2 testnet). + +Note: Currently, the EN makes 2 requests to the L1 per L1 batch, so the Web3 client usage for a synced node should not +be high. However, during the synchronization phase the new batches would be persisted on the EN quickly, so make sure +that the L1 client won't exceed any limits (e.g. in case you use Infura). + +## Exposed ports + +The dockerized version of the server exposes the following ports: + +- HTTP JSON-RPC: 3060 +- WebSocket JSON-RPC: 3061 +- Prometheus listener: 3322 +- Healtcheck server: 3081 + +While the configuration variables for them exist, you are not expected to change them unless you want to use the EN +outside of provided docker environment (not supported at the time of writing). + +## API limits + +A set of variables prefixed with `API_WEB3_JSON_RPC_` allows you to fine-tune the limits of the RPC servers, such as +limits on the number of returned entries or the limit for the accepted transaction size. Provided files contain sane +defaults that are recommended for use, but these can be edited, e.g. to make the EN more/less restrictive. + +## Logging and observability + +`MISC_LOG_FORMAT` defines the format in which logs are shown: `plain` corresponds to the human-readable format, while +the other option is `json` (recommended for deployments). + +`RUST_LOG` variable allows you to set up the logs granularity (e.g. make the EN emit fewer logs). You can read about the +format [here](https://docs.rs/env_logger/0.10.0/env_logger/#enabling-logging). + +`MISC_SENTRY_URL` and `MISC_OTLP_URL` variables can be configured to set up Sentry and OpenTelementry exporters. diff --git a/docs/external-node/03_running.md b/docs/external-node/03_running.md new file mode 100644 index 000000000000..59dc1e8bc6ae --- /dev/null +++ b/docs/external-node/03_running.md @@ -0,0 +1,42 @@ +# Running the External Node + +This section assumes that you have prepared a configuration file as described on the +[previous page](./02_configuration.md). + +## Preferred hardware configuration + +This configuration is approximate, expect updates to these specs. + +- 32-core CPU +- 32GB RAM +- 400GB SSD storage (NVMe recommended) +- 100 Mbps network connection. + +## Infrastructure + +You need to set up a PostgreSQL server capable of holding at least 1TB of data. + +You are expected to have a DB dump from a corresponding env. You can restore it using +`pg_restore -O -C --dbname=`. + +## Running + +Assuming you have the EN Docker image and an env file with the prepared configuration, that is all you need. + +Sample running command: + +```sh +docker run --env-file --mount type=bind,source=,target= +``` + +Helm charts and other infrastructure configuration options, if required, would be available later. + +## First start + +When you start the node for the first time, the state in PostgreSQL corresponds to the dump you have used, but the state +in RocksDB (mainly the Merkle tree) is absent. Before the node can make any progress, it has to rebuild the state in +RocksDB and verify consistency. The exact time required for that depends on the hardware configuration, but it is +reasonable to expect the state rebuild on the mainnet to take more than 8 hours. + +Monitoring the node behavior and analyzing the state it's in is covered in the +[observability section](./04_observability.md). diff --git a/docs/external-node/04_observability.md b/docs/external-node/04_observability.md new file mode 100644 index 000000000000..f4333f0650af --- /dev/null +++ b/docs/external-node/04_observability.md @@ -0,0 +1,51 @@ +# EN Observability + +The EN provides several options for setting up observability. Configuring logs and sentry is described in the +[configuration](./02_configuration.md) section, so this section focuses on the exposed metrics. + +This section is written with the assumption that you're familiar with +[Prometheus](https://prometheus.io/docs/introduction/overview/) and [Grafana](https://grafana.com/docs/). + +## Buckets + +By default, latency histograms are distributed in the following buckets (in seconds): + +``` +[0.001, 0.005, 0.025, 0.1, 0.25, 1.0, 5.0, 30.0, 120.0] +``` + +## Metrics + +EN exposes a lot of metrics, a significant amount of which aren't interesting outside of the development flow. This +section's purpose is to highlight metrics that may be worth observing in the external setup. + +| Metric name | Type | Labels | Description | +| ---------------------------------------------- | --------- | ------------------------------------- | ------------------------------------------------------------------ | +| `external_node_synced` | Gauge | - | 1 if synced, 0 otherwise. Matches `eth_call` behavior | +| `external_node_sync_lag` | Gauge | - | How many blocks behind the main node the EN is | +| `external_node_fetcher_requests` | Histogram | `stage`, `actor` | Duration of requests performed by the different fetcher components | +| `external_node_fetcher_cache_requests` | Histogram | - | Duration of requests performed by the fetcher cache layer | +| `external_node_fetcher_miniblock` | Gauge | `status` | The number of the last L2 block update fetched from the main node | +| `external_node_fetcher_l1_batch` | Gauge | `status` | The number of the last batch update fetched from the main node | +| `external_node_action_queue_action_queue_size` | Gauge | - | Amount of fetched items waiting to be processed | +| `server_miniblock_number` | Gauge | `stage`=`sealed` | Last locally applied L2 block number | +| `server_block_number` | Gauge | `stage`=`sealed` | Last locally applied L1 batch number | +| `server_block_number` | Gauge | `stage`=`tree_lightweight_new_mode` | Last L1 batch number processed by the tree | +| `server_processed_txs` | Counter | `stage`=`mempool_added, state_keeper` | Can be used to show incoming and processing TPS values | +| `api_web3_call` | Histogram | `method` | Duration of Web3 API calls | +| `sql_connection_acquire` | Histogram | - | Time to get an SQL connection from the connection pool | + +## Interpretation + +After applying a dump, the EN has to rebuild the Merkle tree to verify the correctness of the state in PostgreSQL. +During this stage, `server_block_number { stage='tree_lightweight_new_mode' }` is increasing from 0 to +`server_block_number { stage='sealed' }`, while the latter does not increase (EN needs the tree to be up-to-date to +progress). + +After that, the EN has to sync with the main node. `server_block_number { stage='sealed' }` is increasing, and +`external_node_sync_lag` is decreasing. + +Once the node is synchronized, it is indicated by the `external_node_synced`. + +Metrics can be used to detect anomalies in configuration, which is described in more detail in the +[next section](./05_troubleshooting.md). diff --git a/docs/external-node/05_troubleshooting.md b/docs/external-node/05_troubleshooting.md new file mode 100644 index 000000000000..1b60c3e880df --- /dev/null +++ b/docs/external-node/05_troubleshooting.md @@ -0,0 +1,54 @@ +# EN Troubleshooting + +The EN tries to follow the fail-fast principle: if an anomaly is discovered, instead of attempting state recovery, in +most cases it will restart. Most of the time it will manifest as crashes, and if it happens once, it shouldn't be +treated as a problem. + +However, if the node enters the crash loop or otherwise behaves unexpectedly, it may indicate either a bug in the +implementation or a problem with configuration. This section tries to cover common problems. + +## Panics + +Panics is the Rust programming language notion of irrecoverable errors, and normally if panic happens, the application +will immediately crash. + +- Panic matching `called Result::unwrap() on an Err value: Database(PgDatabaseError`: problem communicating with the + PostgreSQL, most likely some of the connections have died. +- Panic matching `failed to init rocksdb: Error { message: "IO error: No space left on device`: more space on SSD is + required. +- Anything that mentions "Poison Error": a "secondary" panic that may occur if one of the components panicked first. If + you see this panic, look for a panic that happened shortly before it to find the real cause. + +Other kinds of panic aren't normally expected. While in most cases, the state will be recovered after a restart, please +[report][contact_us] such cases to Matter Labs regardless. + +[contact_us]: https://docs.zksync.io/contact/ + +## Logs + +_Note: logs with the `error` level are reported to Sentry if it's configured. If you notice unneeded alerts there that +you don't consider actionable, you may disable logs for a component by tweaking the configuration._ + +| Level | Log substring | Interpretation | +| ----- | ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | +| ERROR | "One of the tokio actors unexpectedly finished" | One of the components crashed, and the node is restarting. | +| WARN | "Stop signal received, is shutting down" | Satellite log of the message above | +| ERROR | "A lot of requests to the remote API failed in a row" | The remote API used to update token lists is probably down. Logs should disappear once API is available. | +| WARN | "Server returned an error status code: 429" | The main API rate limits are too strict. [Contact][contact_us] Matter Labs to discuss the situation. | +| WARN | "Following transport error occurred" | There was a problem with fetching data from the main node. | +| WARN | "Unable to get the gas price" | There was a problem with fetching data from the main node. | +| WARN | "Consistency checker error" | There are problems querying L1, check the Web3 URL you specified in the config. | +| WARN | "Reorg detected" | Reorg was detected on the main node, the EN will rollback and restart | + +Same as with panics, normally it's only a problem if a WARN+ level log appears many times in a row. + +## Metrics anomalies + +The following common anomalies can be discovered by observing metrics _after the tree is rebuilt to match the DB +snapshot_: + +- `external_node_sync_lag` doesn't decrease and `external_node_action_queue_action_queue_size` is near 0. Cause: The + fetcher can't fetch new blocks quickly enough. Most likely, the network connection is too slow. +- `external_node_sync_lag` doesn't decrease and `external_node_action_queue_action_queue_size` is at some high level. + Cause: The State Keeper doesn't process fetched data quickly enough. Most likely, a more powerful CPU is needed. +- `sql_connection_acquire` skyrockets. Probably, there are not enough connections in the pool to match the demand. diff --git a/docs/external-node/06_components.md b/docs/external-node/06_components.md new file mode 100644 index 000000000000..98f4a9b15fb1 --- /dev/null +++ b/docs/external-node/06_components.md @@ -0,0 +1,77 @@ +# EN components + +This section contains an overview of the EN's main components. + +## API + +The EN can serve both the HTTP and the WS Web3 API, as well as PubSub. Whenever possible, it provides data based on the +local state, with a few exceptions: + +- Submitting transactions: Since it is a read replica, submitted transactions are proxied to the main node, and the + response is returned from the main node. +- Querying transactions: The EN is not aware of the main node's mempool, and it does not sync rejected transactions. + Therefore, if a local lookup for a transaction or its receipt fails, the EN will attempt the same query on the main + node. + +Apart from these cases, the API does not depend on the main node. Even if the main node is temporarily unavailable, the +EN can continue to serve the state it has locally. + +## Fetcher + +The Fetcher component is responsible for maintaining synchronization between the EN and the main node. Its primary task +is to fetch new blocks in order to update the local chain state. However, its responsibilities extend beyond that. For +instance, the Fetcher is also responsible for keeping track of L1 batch statuses. This involves monitoring whether +locally applied batches have been committed, proven, or executed on L1. + +It is worth noting that in addition to fetching the _state_, the EN also retrieves the L1 gas price from the main node +for the purpose of estimating fees for L2 transactions (since this also happens based on the local state). This +information is necessary to ensure that gas estimations are performed in the exact same manner as the main node, thereby +reducing the chances of a transaction not being included in a block. + +## State Keeper / VM + +The State Keeper component serves as the "sequencer" part of the node. It shares most of its functionality with the main +node, with one key distinction. The main node retrieves transactions from the mempool and has the authority to decide +when a specific L2 block or L1 batch should be sealed. On the other hand, the EN retrieves transactions from the queue +populated by the Fetcher and seals the corresponding blocks/batches based on the data obtained from the Fetcher queue. + +The actual execution of batches takes place within the VM, which is identical in both the Main and External nodes. + +## Reorg Detector + +In zkSync Era, it is theoretically possible for L1 batches to be reverted before the corresponding "execute" operation +is applied on L1, that is before the block is [final][finality]. Such situations are highly uncommon and typically occur +due to significant issues: e.g. a bug in the sequencer implementation preventing L1 batch commitment. Prior to batch +finality, the zkSync operator can perform a rollback, reverting one or more batches and restoring the blockchain state +to a previous point. Finalized batches cannot be reverted at all. + +However, even though such situations are rare, the EN must handle them correctly. + +To address this, the EN incorporates a Reorg Detector component. This module keeps track of all L1 batches that have not +yet been finalized. It compares the locally obtained state root hashes with those provided by the main node's API. If +the root hashes for the latest available L1 batch do not match, the Reorg Detector searches for the specific L1 batch +responsible for the divergence. Subsequently, it rolls back the local state and restarts the node. Upon restart, the EN +resumes normal operation. + +[finality]: https://docs.zksync.io/userdocs/tech/#transaction-finality + +## Consistency Checker + +The main node API serves as the primary source of information for the EN. However, relying solely on the API may not +provide sufficient security since the API data could potentially be incorrect due to various reasons. The primary source +of truth for the rollup system is the L1 smart contract. Therefore, to enhance the security of the EN, each L1 batch +undergoes cross-checking against the L1 smart contract by a component called the Consistency Checker. + +When the Consistency Checker detects that a particular batch has been sent to L1, it recalculates a portion of the input +known as the "block commitment" for the L1 transaction. The block commitment contains crucial data such as the state +root and batch number, and is the same commitment that is used for generating a proof for the batch. The Consistency +Checker then compares the locally obtained commitment with the actual commitment sent to L1. If the data does not match, +it indicates a potential bug in either the main node or external node implementation or that the main node API has +provided incorrect data. In either case, the state of the EN cannot be trusted, and the EN enters a crash loop until the +issue is resolved. + +## Health check server + +The EN also exposes an additional server that returns HTTP 200 response when the EN is operating normally, and HTTP 503 +response when some of the health checks don't pass (e.g. when the EN is not fully initialized yet). This server can be +used, for example, to implement the readiness probe in an orchestration solution you use. diff --git a/docs/external-node/prepared_configs/mainnet-config.env b/docs/external-node/prepared_configs/mainnet-config.env new file mode 100644 index 000000000000..c72b7d5cf069 --- /dev/null +++ b/docs/external-node/prepared_configs/mainnet-config.env @@ -0,0 +1,314 @@ +# ------------------------------------------------------------------------ +# -------------- YOU MUST CHANGE THE FOLLOWING VARIABLES ----------------- +# ------------------------------------------------------------------------ + +# URL of the Postgres DB. +DATABASE_URL=postgres://postgres@localhost/zksync_local_ext_node +# PostgreSQL connection pool size +DATABASE_POOL_SIZE=50 + +# Folder where the state_keeper cache will be stored (RocksDB). +# If containerized, this path should be mounted to a volume. +DATABASE_STATE_KEEPER_DB_PATH=./db/ext-node/state_keeper +# Folder where the Merkle Tree will be stored (RocksDB). +# If containerized, this path should be mounted to a volume. +DATABASE_NEW_MERKLE_TREE_SSD_PATH=./db/ext-node/lightweight + +# URL of the Ethereum client (e.g. infura / alchemy). +ETH_CLIENT_WEB3_URL=http://127.0.0.1:8545 + +# ------------------------------------------------------------------------ +# -------------- YOU MAY CONFIGURE THE FOLLOWING VARIABLES --------------- +# ------------------------------------------------------------------------ + +# Port on which to serve the HTTP JSONRPC API. +API_WEB3_JSON_RPC_HTTP_PORT=3060 +# Port on which to serve the WebSocket JSONRPC API. +API_WEB3_JSON_RPC_WS_PORT=3061 +# Port on which to serve metrics to be collected by Prometheus. +API_PROMETHEUS_LISTENER_PORT=3322 +# Port on which to serve the healthcheck endpoint (to check if the service is live). +API_HEALTHCHECK_PORT=3081 + +# Max possible limit of entities to be requested at once. +API_WEB3_JSON_RPC_REQ_ENTITIES_LIMIT=10000 +# Max possible limit of filters to be active at once. +API_WEB3_JSON_RPC_FILTERS_LIMIT=10000 +# Max possible limit of subscriptions to be active at once. +API_WEB3_JSON_RPC_SUBSCRIPTIONS_LIMIT=10000 +# Interval for polling the DB for pubsub (in ms). +API_WEB3_JSON_RPC_PUBSUB_POLLING_INTERVAL=200 +# Number of threads per API server. +API_WEB3_JSON_RPC_THREADS_PER_SERVER=128 +# Tx nonce: how far ahead from the committed nonce can it be. +# This shouldn't be larger than the value on the main node (50). +API_WEB3_JSON_RPC_MAX_NONCE_AHEAD=50 +# The multiplier to use when suggesting gas price. Should be higher than one, +# otherwise if the L1 prices soar, the suggested gas price won't be sufficient to be included in block. +API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 +# Timeout for requests (in seconds). +API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 +# The factor by which to scale the gasLimit +API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.2 +# The max possible number of gas that `eth_estimateGas` is allowed to overestimate. +API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 +# Max possible size of an ABI encoded tx (in bytes). +# This shouldn't be larger than the value on the main node. +API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 + +# Settings related to sentry and opentelemetry. +MISC_LOG_FORMAT=plain +MISC_SENTRY_URL=unset +MISC_SENTRY_PANIC_INTERVAL=1800 +MISC_SENTRY_ERROR_INTERVAL=10800 +MISC_OTLP_URL=unset + +# Settings related to Rust logging and backtraces. +# You can read about the format [here](https://docs.rs/env_logger/0.10.0/env_logger/#enabling-logging) to fine-tune logging. +RUST_LOG=zksync_core=debug,zksync_dal=info,zksync_eth_client=info,zksync_storage=info,zksync_state=debug,zksync_types=info,vm=info,zksync_external_node=info, +RUST_BACKTRACE=full +RUST_LIB_BACKTRACE=1 + + +# ------------------------------------------------------------------------ +# -------------- THE FOLLOWING VARIABLES DEPEND ON THE ENV --------------- +# ------------------------------------------------------------------------ + +# URL of the main zkSync node. +API_WEB3_JSON_RPC_MAIN_NODE_URL=https://zksync2-mainnet.zksync.io:443 + +# Indicator of the API to be used for getting information. +FETCHER_TOKEN_LIST_SOURCE=OneInch +# URL of the API to use for fetching a JSON of well-known tokens. +FETCHER_TOKEN_LIST_URL=https://api.1inch.exchange +# Interval for fetching API data in seconds. Basically, how often we need to poll third-party APIs. +FETCHER_TOKEN_LIST_FETCHING_INTERVAL=3600 + +CHAIN_ETH_NETWORK=mainnet +CHAIN_ETH_ZKSYNC_NETWORK=mainnet2 +CHAIN_ETH_ZKSYNC_NETWORK_ID=324 +ETH_CLIENT_CHAIN_ID=1 + +CHAIN_STATE_KEEPER_BOOTLOADER_HASH=0x010007794e73f682ad6d27e86b6f71bbee875fc26f5708d1713e7cfd476098d3 +CHAIN_STATE_KEEPER_DEFAULT_AA_HASH=0x0100067d861e2f5717a12c3e869cfb657793b86bbb0caa05cc1421f16c5217bc + +CONTRACTS_DIAMOND_CUT_FACET_ADDR=0x2a2d6010202B93E727b61a60dfC1d5CF2707c1CE +CONTRACTS_DIAMOND_INIT_ADDR=0xb91d905A698c28b73C61aF60C63919b754FCF4DE +CONTRACTS_DIAMOND_PROXY_ADDR=0x32400084c286cf3e17e7b677ea9583e60a000324 +CONTRACTS_DIAMOND_UPGRADE_INIT_ADDR=0xf48b2a42712BFBBb95f4000AEe3873410DC0546F +CONTRACTS_EXECUTOR_FACET_ADDR=0x389a081BCf20e5803288183b929F08458F1d863D +CONTRACTS_GENESIS_BLOCK_COMMITMENT=0x034fb8586032c0d6f07b4a8db319590095e390bd0dbf84976f8d009ad16cd18d +CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX=21 +CONTRACTS_GENESIS_ROOT=0xbc59c242d551e3939b9b2939b8b686efa77ba3833183045d548aa5f53357ba95 +CONTRACTS_GENESIS_TX_HASH=0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e +CONTRACTS_GETTERS_FACET_ADDR=0xF1fB730b7f8E8391B27B91f8f791e10E4a53CEcc +CONTRACTS_GOVERNANCE_FACET_ADDR=0x6df4A6D71622860dcc64C1FD9645d9a5BE96f088 +CONTRACTS_L1_ALLOW_LIST_ADDR=0x8ffd57A9B2dcc10327768b601468FA192adC5C86 +CONTRACTS_L1_ERC20_BRIDGE_IMPL_ADDR=0x38ABF296EE79621A225AA85086853b0dA3225D2F +CONTRACTS_L1_ERC20_BRIDGE_PROXY_ADDR=0x57891966931Eb4Bb6FB81430E6cE0A03AAbDe063 +CONTRACTS_L2_ERC20_BRIDGE_ADDR=0x11f943b2c77b743AB90f4A0Ae7d5A4e7FCA3E102 +CONTRACTS_MAILBOX_FACET_ADDR=0xb2097DBe4410B538a45574B1FCD767E2303c7867 +CONTRACTS_VALIDATOR_TIMELOCK_ADDR=0x3dB52cE065f728011Ac6732222270b3F2360d919 +CONTRACTS_VERIFIER_ADDR=0x473b1887D45D61EfD87731a1D8eC3590b93c565d + +# ------------------------------------------------------------------------ +# -------------- THE FOLLOWING VARIABLES ARE NOT USED -------------------- +# -------------- BUT HAVE TO BE SET. JUST LEAVE THEM AS IS --------------- +# ------------------------------------------------------------------------ + +CHAIN_STATE_KEEPER_TRANSACTION_SLOTS=250 +CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS=2500 +CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS=1000 +CHAIN_STATE_KEEPER_MAX_SINGLE_TX_GAS=6000000 +CHAIN_STATE_KEEPER_MAX_ALLOWED_L2_TX_GAS_LIMIT=4000000000 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GEOMETRY_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_ETH_PARAMS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GAS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_GEOMETRY_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_ETH_PARAMS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_FAIR_L2_GAS_PRICE=250000000 +CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT=300000 +CHAIN_STATE_KEEPER_SAVE_CALL_TRACES=false + +API_PROMETHEUS_PUSHGATEWAY_URL= +API_PROMETHEUS_PUSH_INTERVAL_MS=0 +DATABASE_MAX_BLOCK_BATCH=0 +CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL=0 +CONTRACTS_VALIDATOR_TIMELOCK_EXECUTION_DELAY=0 +CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT=0 +CONTRACTS_DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT=0 +CONTRACTS_TEST_DUMMY_VERIFIER=true +CONTRACTS_TEST_EASY_PRIORITY_MODE=false +CHAIN_MEMPOOL_SYNC_INTERVAL_MS=0 +CHAIN_MEMPOOL_SYNC_BATCH_SIZE=0 +CHAIN_MEMPOOL_CAPACITY=0 +CHAIN_MEMPOOL_STUCK_TX_TIMEOUT=0 +CHAIN_MEMPOOL_REMOVE_STUCK_TXS=true +CHAIN_CIRCUIT_BREAKER_SYNC_INTERVAL_MS=0 +CHAIN_CIRCUIT_BREAKER_HTTP_REQ_MAX_RETRY_NUMBER=0 +CHAIN_CIRCUIT_BREAKER_HTTP_REQ_RETRY_INTERVAL_SEC=0 +CONTRACTS_VK_COMMITMENT_BASIC_CIRCUITS=0x0000000000000000000000000000000000000000000000000000000000000000 +CONTRACTS_VK_COMMITMENT_LEAF=0x0000000000000000000000000000000000000000000000000000000000000000 +CONTRACTS_VK_COMMITMENT_NODE=0x0000000000000000000000000000000000000000000000000000000000000000 +API_WEB3_JSON_RPC_HTTP_URL= +API_WEB3_JSON_RPC_WS_URL= +CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR=0x0000000000000000000000000000000000000000 +MISC_FEE_ACCOUNT_PRIVATE_KEY=0x0000000000000000000000000000000000000000000000000000000000000000 +CONTRACT_VERIFIER_COMPILATION_TIMEOUT=0 +CONTRACT_VERIFIER_POLLING_INTERVAL=0 +CONTRACT_VERIFIER_PROMETHEUS_PORT=0 +DATABASE_MERKLE_TREE_BACKUP_PATH= +DATABASE_PATH= +DATABASE_BACKUP_COUNT=0 +DATABASE_BACKUP_INTERVAL_MS=0 +TEST_DATABASE_URL= +ETH_WATCH_CONFIRMATIONS_FOR_ETH_EVENT=0 +ETH_WATCH_ETH_NODE_POLL_INTERVAL=0 +FETCHER_TOKEN_PRICE_SOURCE=Mock +FETCHER_TOKEN_PRICE_URL= +FETCHER_TOKEN_PRICE_FETCHING_INTERVAL=0 +FETCHER_TOKEN_TRADING_VOLUME_SOURCE=Mock +FETCHER_TOKEN_TRADING_VOLUME_URL= +FETCHER_TOKEN_TRADING_VOLUME_FETCHING_INTERVAL=0 +API_EXPLORER_PORT=0 +API_EXPLORER_URL= +API_EXPLORER_NETWORK_STATS_POLLING_INTERVAL=0 +API_EXPLORER_REQ_ENTITIES_LIMIT=0 +API_EXPLORER_OFFSET_LIMIT=0 +API_EXPLORER_THREADS_PER_SERVER=0 +API_WEB3_JSON_RPC_ACCOUNT_PKS= +ETH_SENDER_SENDER_WAIT_CONFIRMATIONS=0 +ETH_SENDER_SENDER_EXPECTED_WAIT_TIME_BLOCK=0 +ETH_SENDER_SENDER_TX_POLL_PERIOD=0 +ETH_SENDER_SENDER_AGGREGATE_TX_POLL_PERIOD=0 +ETH_SENDER_SENDER_MAX_TXS_IN_FLIGHT=0 +ETH_SENDER_SENDER_PROOF_SENDING_MODE=SkipEveryProof +ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_COMMIT=0 +ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_EXECUTE=0 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE=0 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_PROVE_DEADLINE=0 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE=0 +ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG=0 +ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE=0 +ETH_SENDER_SENDER_AGGREGATED_PROOF_SIZES=0 +ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS=0 +ETH_SENDER_SENDER_MAX_SINGLE_TX_GAS=0 +ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI=0 +ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY=0x0000000000000000000000000000000000000000000000000000000000000000 +ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR=0x0000000000000000000000000000000000000000 +ETH_SENDER_GAS_ADJUSTER_DEFAULT_PRIORITY_FEE_PER_GAS=0 +ETH_SENDER_GAS_ADJUSTER_MAX_BASE_FEE_SAMPLES=0 +ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_A=0 +ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_B=0 +ETH_SENDER_GAS_ADJUSTER_INTERNAL_L1_PRICING_MULTIPLIER=0 +ETH_SENDER_GAS_ADJUSTER_POLL_PERIOD=0 +WITNESS_GENERATION_TIMEOUT_IN_SECS=0 +WITNESS_INITIAL_SETUP_KEY_PATH= +WITNESS_KEY_DOWNLOAD_URL= +WITNESS_MAX_ATTEMPTS=0 +WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS=0 +CIRCUIT_SYNTHESIZER_GENERATION_TIMEOUT_IN_SECS=0 +CIRCUIT_SYNTHESIZER_MAX_ATTEMPTS=0 +CIRCUIT_SYNTHESIZER_GPU_PROVER_QUEUE_TIMEOUT_IN_SECS=0 +CIRCUIT_SYNTHESIZER_PROVER_INSTANCE_WAIT_TIMEOUT_IN_SECS=0 +CIRCUIT_SYNTHESIZER_PROVER_INSTANCE_POLL_TIME_IN_MILLI_SECS=0 +CIRCUIT_SYNTHESIZER_PROMETHEUS_LISTENER_PORT=0 +CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSHGATEWAY_URL= +CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSH_INTERVAL_MS=0 +CIRCUIT_SYNTHESIZER_PROVER_GROUP_ID=0 +PROVER_GROUP_GROUP_0_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_1_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_2_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_3_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_4_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_5_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_6_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_7_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_8_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_9_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_100_CIRCUIT_IDS=0 +PROVER_GROUP_REGION_READ_URL= +PROVER_GROUP_REGION_OVERRIDE= +PROVER_GROUP_ZONE_READ_URL= +PROVER_GROUP_ZONE_OVERRIDE= +PROVER_GROUP_SYNTHESIZER_PER_GPU=0 +HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS=0 +HOUSE_KEEPER_BLOB_CLEANING_INTERVAL_MS=0 +HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS=0 +HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS=0 +HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS=0 +HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS=0 +HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS=0 +OBJECT_STORE_BUCKET_BASE_URL= +OBJECT_STORE_MODE=FileBacked +OBJECT_STORE_FILE_BACKED_BASE_PATH= +OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH= +OBJECT_STORE_MAX_RETRIES=0 +NFS_SETUP_KEY_MOUNT_PATH= +PROVER_NON_GPU_PROMETHEUS_PORT=0 +PROVER_NON_GPU_INITIAL_SETUP_KEY_PATH= +PROVER_NON_GPU_KEY_DOWNLOAD_URL= +PROVER_NON_GPU_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_NON_GPU_NUMBER_OF_THREADS=0 +PROVER_NON_GPU_MAX_ATTEMPTS=0 +PROVER_NON_GPU_POLLING_DURATION_IN_MILLIS=0 +PROVER_NON_GPU_SETUP_KEYS_PATH= +PROVER_NON_GPU_NUMBER_OF_SETUP_SLOTS=0 +PROVER_NON_GPU_ASSEMBLY_RECEIVER_PORT=0 +PROVER_NON_GPU_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_NON_GPU_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_NON_GPU_SPECIALIZED_PROVER_GROUP_ID=0 +PROVER_TWO_GPU_FORTY_GB_MEM_PROMETHEUS_PORT=0 +PROVER_TWO_GPU_FORTY_GB_MEM_INITIAL_SETUP_KEY_PATH= +PROVER_TWO_GPU_FORTY_GB_MEM_KEY_DOWNLOAD_URL= +PROVER_TWO_GPU_FORTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_NUMBER_OF_THREADS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_MAX_ATTEMPTS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_POLLING_DURATION_IN_MILLIS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_SETUP_KEYS_PATH= +PROVER_TWO_GPU_FORTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=0 +PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_TWO_GPU_FORTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH= +PROVER_ONE_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL= +PROVER_ONE_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH= +PROVER_ONE_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH= +PROVER_TWO_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL= +PROVER_TWO_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH= +PROVER_TWO_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH= +PROVER_FOUR_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL= +PROVER_FOUR_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH= +PROVER_FOUR_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=0 +ZKSYNC_ACTION=dont_ask diff --git a/docs/external-node/prepared_configs/testnet-config.env b/docs/external-node/prepared_configs/testnet-config.env new file mode 100644 index 000000000000..fab80fe341ae --- /dev/null +++ b/docs/external-node/prepared_configs/testnet-config.env @@ -0,0 +1,313 @@ +# ------------------------------------------------------------------------ +# -------------- YOU MUST CHANGE THE FOLLOWING VARIABLES ----------------- +# ------------------------------------------------------------------------ + +# URL of the Postgres DB. +DATABASE_URL=postgres://postgres@localhost/zksync_local_ext_node +# PostgreSQL connection pool size +DATABASE_POOL_SIZE=50 + +# Folder where the state_keeper cache will be stored (RocksDB). +DATABASE_STATE_KEEPER_DB_PATH=./db/ext-node/state_keeper +# Folder where the Merkle Tree will be stored (RocksDB). +DATABASE_NEW_MERKLE_TREE_SSD_PATH=./db/ext-node/lightweight + +# URL of the Ethereum client (e.g. infura / alchemy). +ETH_CLIENT_WEB3_URL=http://127.0.0.1:8545 + +# ------------------------------------------------------------------------ +# -------------- YOU MAY CONFIGURE THE FOLLOWING VARIABLES --------------- +# ------------------------------------------------------------------------ + +# Port on which to serve the HTTP JSONRPC API. +API_WEB3_JSON_RPC_HTTP_PORT=3060 +# Port on which to serve the WebSocket JSONRPC API. +API_WEB3_JSON_RPC_WS_PORT=3061 +# Port on which to serve metrics to be collected by Prometheus. +API_PROMETHEUS_LISTENER_PORT=3322 +# Port on which to serve the healthcheck endpoint (to check if the service is live). +API_HEALTHCHECK_PORT=3081 + +# Max possible limit of entities to be requested at once. +API_WEB3_JSON_RPC_REQ_ENTITIES_LIMIT=10000 +# Max possible limit of filters to be active at once. +API_WEB3_JSON_RPC_FILTERS_LIMIT=10000 +# Max possible limit of subscriptions to be active at once. +API_WEB3_JSON_RPC_SUBSCRIPTIONS_LIMIT=10000 +# Interval for polling the DB for pubsub (in ms). +API_WEB3_JSON_RPC_PUBSUB_POLLING_INTERVAL=200 +# Number of threads per API server. +API_WEB3_JSON_RPC_THREADS_PER_SERVER=128 +# Tx nonce: how far ahead from the committed nonce can it be. +# This shouldn't be larger than the value on the main node (50). +API_WEB3_JSON_RPC_MAX_NONCE_AHEAD=50 +# The multiplier to use when suggesting gas price. Should be higher than one, +# otherwise if the L1 prices soar, the suggested gas price won't be sufficient to be included in block. +API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 +# Timeout for requests (in seconds). +API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 +# The factor by which to scale the gasLimit +API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.2 +# The max possible number of gas that `eth_estimateGas` is allowed to overestimate. +API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 +# Max possible size of an ABI encoded tx (in bytes). +# This shouldn't be larger than the value on the main node. +API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 + +# Settings related to sentry and opentelemetry. +MISC_LOG_FORMAT=plain +MISC_SENTRY_URL=unset +MISC_SENTRY_PANIC_INTERVAL=1800 +MISC_SENTRY_ERROR_INTERVAL=10800 +MISC_OTLP_URL=unset + +# Settings related to Rust logging and backtraces. +# You can read about the format [here](https://docs.rs/env_logger/0.10.0/env_logger/#enabling-logging) to fine-tune logging. +RUST_LOG=zksync_core=debug,zksync_dal=info,zksync_eth_client=info,zksync_storage=info,zksync_state=debug,zksync_types=info,vm=info,zksync_external_node=info, +RUST_BACKTRACE=full +RUST_LIB_BACKTRACE=1 + + +# ------------------------------------------------------------------------ +# -------------- THE FOLLOWING VARIABLES DEPEND ON THE ENV --------------- +# ------------------------------------------------------------------------ + +# URL of the main zkSync node. +API_WEB3_JSON_RPC_MAIN_NODE_URL=https://zksync2-testnet.zksync.dev:443 + +# Indicator of the API to be used for getting information. +FETCHER_TOKEN_LIST_SOURCE=Mock +# URL of the API to use for fetching a JSON of well-known tokens. +FETCHER_TOKEN_LIST_URL= +# Interval for fetching API data in seconds. Basically, how often we need to poll third-party APIs. +FETCHER_TOKEN_LIST_FETCHING_INTERVAL=3600 + +CHAIN_ETH_NETWORK=goerli +CHAIN_ETH_ZKSYNC_NETWORK=testnet2 +CHAIN_ETH_ZKSYNC_NETWORK_ID=280 +ETH_CLIENT_CHAIN_ID=5 + +CHAIN_STATE_KEEPER_BOOTLOADER_HASH=0x010007794e73f682ad6d27e86b6f71bbee875fc26f5708d1713e7cfd476098d3 +CHAIN_STATE_KEEPER_DEFAULT_AA_HASH=0x0100067d861e2f5717a12c3e869cfb657793b86bbb0caa05cc1421f16c5217bc + +CONTRACTS_DIAMOND_CUT_FACET_ADDR=0x0E5e07A7E881EC9596Fa85cA34A11e122C1b5451 +CONTRACTS_DIAMOND_INIT_ADDR=0x81aE464127286C26f21495d053AA19Eec708055F +CONTRACTS_DIAMOND_PROXY_ADDR=0x1908e2BF4a88F91E4eF0DC72f02b8Ea36BEa2319 +CONTRACTS_DIAMOND_UPGRADE_INIT_ADDR=0xA65b4bf273b528116b1d84aC9b183B20c5e7C1b2 +CONTRACTS_EXECUTOR_FACET_ADDR=0x0CFc82A3A8576c6eE5d686dF161Edce1f5b0E9C8 +CONTRACTS_GENESIS_BLOCK_COMMITMENT=0x60d946bb425a7a5f6104fe7ef4b854fac4c32b6ba5b3601c82014fa28f6511d6 +CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX=21 +CONTRACTS_GENESIS_ROOT=0xc4b302452a77d72c6ad618ab1f18ec3a7699520dc053e767eca6427d24bd0848 +CONTRACTS_GENESIS_TX_HASH=0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e +CONTRACTS_GETTERS_FACET_ADDR=0x06E702991c12F3c935416be8dAF124C67B97Ae64 +CONTRACTS_GOVERNANCE_FACET_ADDR=0xB76c27cCF3b20D39a6d98fF1ec2BA919eC008cC9 +CONTRACTS_L1_ALLOW_LIST_ADDR=0xe8DF7183D637a8a86B5F63c046A191C486CFF0B8 +CONTRACTS_L1_ERC20_BRIDGE_IMPL_ADDR=0xA3167fC0d8ff106BceA833F2dDBd9C253095fa6e +CONTRACTS_L1_ERC20_BRIDGE_PROXY_ADDR=0x927DdFcc55164a59E0F33918D13a2D559bC10ce7 +CONTRACTS_L2_ERC20_BRIDGE_ADDR=0x00ff932A6d70E2B8f1Eb4919e1e09C1923E7e57b +CONTRACTS_L2_TESTNET_PAYMASTER_ADDR=0x8f0ea1312da29f17eabeb2f484fd3c112cccdd63 +CONTRACTS_MAILBOX_FACET_ADDR=0xc03Dd333392c312717f71970F14f42Ba511ceAA1 +CONTRACTS_VALIDATOR_TIMELOCK_ADDR=0xB949b4E3945628650862a29Abef3291F2eD52471 +CONTRACTS_VERIFIER_ADDR=0xc61FFb3C75594b58e01E5D323B0F9FaFAE37e413 + +# ------------------------------------------------------------------------ +# -------------- THE FOLLOWING VARIABLES ARE NOT USED -------------------- +# -------------- BUT HAVE TO BE SET. JUST LEAVE THEM AS IS --------------- +# ------------------------------------------------------------------------ + +CHAIN_STATE_KEEPER_TRANSACTION_SLOTS=250 +CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS=2500 +CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS=1000 +CHAIN_STATE_KEEPER_MAX_SINGLE_TX_GAS=6000000 +CHAIN_STATE_KEEPER_MAX_ALLOWED_L2_TX_GAS_LIMIT=4000000000 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GEOMETRY_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_ETH_PARAMS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_CLOSE_BLOCK_AT_GAS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_GEOMETRY_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_ETH_PARAMS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE=0.95 +CHAIN_STATE_KEEPER_FAIR_L2_GAS_PRICE=250000000 +CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT=300000 +CHAIN_STATE_KEEPER_SAVE_CALL_TRACES=false + +API_PROMETHEUS_PUSHGATEWAY_URL= +API_PROMETHEUS_PUSH_INTERVAL_MS=0 +DATABASE_MAX_BLOCK_BATCH=0 +CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL=0 +CONTRACTS_VALIDATOR_TIMELOCK_EXECUTION_DELAY=0 +CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT=0 +CONTRACTS_DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT=0 +CONTRACTS_TEST_DUMMY_VERIFIER=true +CONTRACTS_TEST_EASY_PRIORITY_MODE=false +CHAIN_MEMPOOL_SYNC_INTERVAL_MS=0 +CHAIN_MEMPOOL_SYNC_BATCH_SIZE=0 +CHAIN_MEMPOOL_CAPACITY=0 +CHAIN_MEMPOOL_STUCK_TX_TIMEOUT=0 +CHAIN_MEMPOOL_REMOVE_STUCK_TXS=true +CHAIN_CIRCUIT_BREAKER_SYNC_INTERVAL_MS=0 +CHAIN_CIRCUIT_BREAKER_HTTP_REQ_MAX_RETRY_NUMBER=0 +CHAIN_CIRCUIT_BREAKER_HTTP_REQ_RETRY_INTERVAL_SEC=0 +CONTRACTS_VK_COMMITMENT_BASIC_CIRCUITS=0x0000000000000000000000000000000000000000000000000000000000000000 +CONTRACTS_VK_COMMITMENT_LEAF=0x0000000000000000000000000000000000000000000000000000000000000000 +CONTRACTS_VK_COMMITMENT_NODE=0x0000000000000000000000000000000000000000000000000000000000000000 +API_WEB3_JSON_RPC_HTTP_URL= +API_WEB3_JSON_RPC_WS_URL= +CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR=0x0000000000000000000000000000000000000000 +MISC_FEE_ACCOUNT_PRIVATE_KEY=0x0000000000000000000000000000000000000000000000000000000000000000 +CONTRACT_VERIFIER_COMPILATION_TIMEOUT=0 +CONTRACT_VERIFIER_POLLING_INTERVAL=0 +CONTRACT_VERIFIER_PROMETHEUS_PORT=0 +DATABASE_MERKLE_TREE_BACKUP_PATH= +DATABASE_PATH= +DATABASE_BACKUP_COUNT=0 +DATABASE_BACKUP_INTERVAL_MS=0 +TEST_DATABASE_URL= +ETH_WATCH_CONFIRMATIONS_FOR_ETH_EVENT=0 +ETH_WATCH_ETH_NODE_POLL_INTERVAL=0 +FETCHER_TOKEN_PRICE_SOURCE=Mock +FETCHER_TOKEN_PRICE_URL= +FETCHER_TOKEN_PRICE_FETCHING_INTERVAL=0 +FETCHER_TOKEN_TRADING_VOLUME_SOURCE=Mock +FETCHER_TOKEN_TRADING_VOLUME_URL= +FETCHER_TOKEN_TRADING_VOLUME_FETCHING_INTERVAL=0 +API_EXPLORER_PORT=0 +API_EXPLORER_URL= +API_EXPLORER_NETWORK_STATS_POLLING_INTERVAL=0 +API_EXPLORER_REQ_ENTITIES_LIMIT=0 +API_EXPLORER_OFFSET_LIMIT=0 +API_EXPLORER_THREADS_PER_SERVER=0 +API_WEB3_JSON_RPC_ACCOUNT_PKS= +ETH_SENDER_SENDER_WAIT_CONFIRMATIONS=0 +ETH_SENDER_SENDER_EXPECTED_WAIT_TIME_BLOCK=0 +ETH_SENDER_SENDER_TX_POLL_PERIOD=0 +ETH_SENDER_SENDER_AGGREGATE_TX_POLL_PERIOD=0 +ETH_SENDER_SENDER_MAX_TXS_IN_FLIGHT=0 +ETH_SENDER_SENDER_PROOF_SENDING_MODE=SkipEveryProof +ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_COMMIT=0 +ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_EXECUTE=0 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE=0 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_PROVE_DEADLINE=0 +ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE=0 +ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG=0 +ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE=0 +ETH_SENDER_SENDER_AGGREGATED_PROOF_SIZES=0 +ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS=0 +ETH_SENDER_SENDER_MAX_SINGLE_TX_GAS=0 +ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI=0 +ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY=0x0000000000000000000000000000000000000000000000000000000000000000 +ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR=0x0000000000000000000000000000000000000000 +ETH_SENDER_GAS_ADJUSTER_DEFAULT_PRIORITY_FEE_PER_GAS=0 +ETH_SENDER_GAS_ADJUSTER_MAX_BASE_FEE_SAMPLES=0 +ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_A=0 +ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_B=0 +ETH_SENDER_GAS_ADJUSTER_INTERNAL_L1_PRICING_MULTIPLIER=0 +ETH_SENDER_GAS_ADJUSTER_POLL_PERIOD=0 +WITNESS_GENERATION_TIMEOUT_IN_SECS=0 +WITNESS_INITIAL_SETUP_KEY_PATH= +WITNESS_KEY_DOWNLOAD_URL= +WITNESS_MAX_ATTEMPTS=0 +WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS=0 +CIRCUIT_SYNTHESIZER_GENERATION_TIMEOUT_IN_SECS=0 +CIRCUIT_SYNTHESIZER_MAX_ATTEMPTS=0 +CIRCUIT_SYNTHESIZER_GPU_PROVER_QUEUE_TIMEOUT_IN_SECS=0 +CIRCUIT_SYNTHESIZER_PROVER_INSTANCE_WAIT_TIMEOUT_IN_SECS=0 +CIRCUIT_SYNTHESIZER_PROVER_INSTANCE_POLL_TIME_IN_MILLI_SECS=0 +CIRCUIT_SYNTHESIZER_PROMETHEUS_LISTENER_PORT=0 +CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSHGATEWAY_URL= +CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSH_INTERVAL_MS=0 +CIRCUIT_SYNTHESIZER_PROVER_GROUP_ID=0 +PROVER_GROUP_GROUP_0_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_1_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_2_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_3_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_4_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_5_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_6_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_7_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_8_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_9_CIRCUIT_IDS=0 +PROVER_GROUP_GROUP_100_CIRCUIT_IDS=0 +PROVER_GROUP_REGION_READ_URL= +PROVER_GROUP_REGION_OVERRIDE= +PROVER_GROUP_ZONE_READ_URL= +PROVER_GROUP_ZONE_OVERRIDE= +PROVER_GROUP_SYNTHESIZER_PER_GPU=0 +HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS=0 +HOUSE_KEEPER_BLOB_CLEANING_INTERVAL_MS=0 +HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS=0 +HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS=0 +HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS=0 +HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS=0 +HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS=0 +OBJECT_STORE_BUCKET_BASE_URL= +OBJECT_STORE_MODE=FileBacked +OBJECT_STORE_FILE_BACKED_BASE_PATH= +OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH= +OBJECT_STORE_MAX_RETRIES=0 +NFS_SETUP_KEY_MOUNT_PATH= +PROVER_NON_GPU_PROMETHEUS_PORT=0 +PROVER_NON_GPU_INITIAL_SETUP_KEY_PATH= +PROVER_NON_GPU_KEY_DOWNLOAD_URL= +PROVER_NON_GPU_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_NON_GPU_NUMBER_OF_THREADS=0 +PROVER_NON_GPU_MAX_ATTEMPTS=0 +PROVER_NON_GPU_POLLING_DURATION_IN_MILLIS=0 +PROVER_NON_GPU_SETUP_KEYS_PATH= +PROVER_NON_GPU_NUMBER_OF_SETUP_SLOTS=0 +PROVER_NON_GPU_ASSEMBLY_RECEIVER_PORT=0 +PROVER_NON_GPU_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_NON_GPU_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_NON_GPU_SPECIALIZED_PROVER_GROUP_ID=0 +PROVER_TWO_GPU_FORTY_GB_MEM_PROMETHEUS_PORT=0 +PROVER_TWO_GPU_FORTY_GB_MEM_INITIAL_SETUP_KEY_PATH= +PROVER_TWO_GPU_FORTY_GB_MEM_KEY_DOWNLOAD_URL= +PROVER_TWO_GPU_FORTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_NUMBER_OF_THREADS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_MAX_ATTEMPTS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_POLLING_DURATION_IN_MILLIS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_SETUP_KEYS_PATH= +PROVER_TWO_GPU_FORTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=0 +PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_TWO_GPU_FORTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_TWO_GPU_FORTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH= +PROVER_ONE_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL= +PROVER_ONE_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH= +PROVER_ONE_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_ONE_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH= +PROVER_TWO_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL= +PROVER_TWO_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH= +PROVER_TWO_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_TWO_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_PROMETHEUS_PORT=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_INITIAL_SETUP_KEY_PATH= +PROVER_FOUR_GPU_EIGHTY_GB_MEM_KEY_DOWNLOAD_URL= +PROVER_FOUR_GPU_EIGHTY_GB_MEM_GENERATION_TIMEOUT_IN_SECS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_NUMBER_OF_THREADS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_MAX_ATTEMPTS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_POLLING_DURATION_IN_MILLIS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_SETUP_KEYS_PATH= +PROVER_FOUR_GPU_EIGHTY_GB_MEM_NUMBER_OF_SETUP_SLOTS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_PORT=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_RECEIVER_POLL_TIME_IN_MILLIS=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_ASSEMBLY_QUEUE_CAPACITY=0 +PROVER_FOUR_GPU_EIGHTY_GB_MEM_SPECIALIZED_PROVER_GROUP_ID=0 +ZKSYNC_ACTION=dont_ask diff --git a/docs/launch.md b/docs/launch.md index 7f68f927e4c8..90ee266eeb55 100644 --- a/docs/launch.md +++ b/docs/launch.md @@ -20,7 +20,7 @@ zk init During the first initialization you have to download around 8 GB of setup files, this should be done once. If you have a problem on this step of the initialization, see help for the `zk run plonk-setup` command. -If you face any other problems with the `zk init` command, go to the [Troubleshooting](##Troubleshooting) section at the +If you face any other problems with the `zk init` command, go to the [Troubleshooting](#Troubleshooting) section at the end of this file. There are solutions for some common error cases. To completely reset the dev environment: @@ -106,7 +106,7 @@ zk f cargo +nightly run --features gpu --release --bin zksync_prover ## Running the verification key generator ```shell -# ensure that the setup_2^26.key in the current directory, the file can be download from https://storage.googleapis.com/universal-setup/setup_2\^26.key +# ensure that the setup_2^26.key in the current directory, the file can be download from https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key # To generate all verification keys cargo run --release --bin zksync_verification_key_generator @@ -166,7 +166,7 @@ zk contract_verifier **Problem**. `zk init` fails with the following error: ``` -Initializing download: https://storage.googleapis.com/universal-setup/setup_2%5E20.key +Initializing download: https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2%5E20.key SSL error: certificate verify failed ``` @@ -181,3 +181,150 @@ fs_1.default.rmSync is not a function ``` **Solution**. Make sure that the version of `node.js` installed on your computer is `14.14.0` or higher. + +### Invalid bytecode: () + +**Problem**. `zk init` fails with an error similar to: + +``` +Running `target/release/zksync_server --genesis` +2023-04-05T14:23:40.291277Z INFO zksync_core::genesis: running regenesis +thread 'main' panicked at 'Invalid bytecode: ()', core/lib/utils/src/bytecode.rs:159:10 +stack backtrace: + 0: 0x104551410 - std::backtrace_rs::backtrace::libunwind::trace::hf9c5171f212b04e2 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/../../backtrace/src/backtrace/libunwind.rs:93:5 + 1: 0x104551410 - std::backtrace_rs::backtrace::trace_unsynchronized::h179003f6ec753118 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/../../backtrace/src/backtrace/mod.rs:66:5 + 2: 0x104551410 - std::sys_common::backtrace::_print_fmt::h92d38f701cf42b17 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:65:5 + 3: 0x104551410 - ::fmt::hb33e6e8152f78c95 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:44:22 + 4: 0x10456cdb0 - core::fmt::write::hd33da007f7a27e39 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/fmt/mod.rs:1208:17 + 5: 0x10454b41c - std::io::Write::write_fmt::h7edc10723862001e + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/io/mod.rs:1682:15 + 6: 0x104551224 - std::sys_common::backtrace::_print::h5e00f05f436af01f + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:47:5 + 7: 0x104551224 - std::sys_common::backtrace::print::h895ee35b3f17b334 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:34:9 + 8: 0x104552d84 - std::panicking::default_hook::{{closure}}::h3b7ee083edc2ea3e + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:267:22 + 9: 0x104552adc - std::panicking::default_hook::h4e7c2c28eba716f5 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:286:9 + 10: 0x1045533a8 - std::panicking::rust_panic_with_hook::h1672176227032c45 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:688:13 + 11: 0x1045531c8 - std::panicking::begin_panic_handler::{{closure}}::h0b2d072f9624d32e + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:579:13 + 12: 0x104551878 - std::sys_common::backtrace::__rust_end_short_backtrace::he9abda779115b93c + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:137:18 + 13: 0x104552f24 - rust_begin_unwind + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:575:5 + 14: 0x1045f89c0 - core::panicking::panic_fmt::h23ae44661fec0889 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/panicking.rs:64:14 + 15: 0x1045f8ce0 - core::result::unwrap_failed::h414a6cbb12b1e143 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/result.rs:1791:5 + 16: 0x103f79a30 - zksync_utils::bytecode::hash_bytecode::h397dd7c5b6202bf4 + 17: 0x103e47e78 - zksync_contracts::BaseSystemContracts::load_from_disk::h0e2da8f63292ac46 + 18: 0x102d885a0 - zksync_core::genesis::ensure_genesis_state::{{closure}}::h5143873f2c337e11 + 19: 0x102d7dee0 - zksync_core::genesis_init::{{closure}}::h4e94f3d4ad984788 + 20: 0x102d9c048 - zksync_server::main::{{closure}}::h3fe943a3627d31e1 + 21: 0x102d966f8 - tokio::runtime::park::CachedParkThread::block_on::h2f2fdf7edaf08470 + 22: 0x102df0dd4 - tokio::runtime::runtime::Runtime::block_on::h1fd1d83272a23194 + 23: 0x102e21470 - zksync_server::main::h500621fd4d160768 + 24: 0x102d328f0 - std::sys_common::backtrace::__rust_begin_short_backtrace::h52973e519e2e8a0d + 25: 0x102e08ea8 - std::rt::lang_start::{{closure}}::hbd395afe0ab3b799 + 26: 0x10454508c - core::ops::function::impls:: for &F>::call_once::ha1c2447b9b665e13 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/ops/function.rs:606:13 + 27: 0x10454508c - std::panicking::try::do_call::ha57d6d1e9532dc1f + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:483:40 + 28: 0x10454508c - std::panicking::try::hca0526f287961ecd + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:447:19 + 29: 0x10454508c - std::panic::catch_unwind::hdcaa7fa896e0496a + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panic.rs:137:14 + 30: 0x10454508c - std::rt::lang_start_internal::{{closure}}::h142ec071d3766871 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/rt.rs:148:48 + 31: 0x10454508c - std::panicking::try::do_call::h95f5e55d6f048978 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:483:40 + 32: 0x10454508c - std::panicking::try::h0fa00e2f7b4a5c64 + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:447:19 + 33: 0x10454508c - std::panic::catch_unwind::h1765f149814d4d3e + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panic.rs:137:14 + 34: 0x10454508c - std::rt::lang_start_internal::h00a235e820a7f01c + at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/rt.rs:148:20 + 35: 0x102e21578 - _main +Error: Genesis is not needed (either Postgres DB or tree's Rocks DB is not empty) +``` + +**Description**. This means that your bytecode config file has an empty entry: `"bytecode": "0x"`. This happens because +your `zksync-2-dev/etc/system-contracts/package.json`'s dependency on `"@matterlabs/hardhat-zksync-solc"` is outdated. +We don't expect this error to happen as we've updated to latest version which fixes the problem. + +**Solution**. Update your dependency and reinit: + +``` +yarn add -D @matterlabs/hardhat-zksync-solc # in the system-contracts folder +zk clean --all && zk init +``` + +On the run, it moved from: + +``` + "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", +``` + +to: + +``` + "@matterlabs/hardhat-zksync-solc": "^0.3.15", +``` + +### Error: Bytecode length in 32-byte words must be odd + +**Problem**. `zk init` fails with an error similar to: + +``` +Successfully generated Typechain artifacts! +Error: Error: Bytecode length in 32-byte words must be odd + at hashL2Bytecode (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/utils.ts:29:15) + at computeL2Create2Address (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/utils.ts:53:26) + at /Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:50:63 + at step (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:33:23) + at Object.next (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:14:53) + at fulfilled (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:5:58) +error Command failed with exit code 1. +info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command. +error Command failed. +Exit code: 1 +Command: /Users/emilluta/.nvm/versions/node/v16.19.1/bin/node +Arguments: /opt/homebrew/Cellar/yarn/1.22.19/libexec/lib/cli.js compile-and-deploy-libs +Directory: /Users/emilluta/code/zksync-2-dev/contracts/zksync +Output: + +info Visit https://yarnpkg.com/en/docs/cli/workspace for documentation about this command. +error Command failed with exit code 1. +info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command. +Error: Child process exited with code 1 +``` + +**Description**. This means that your bytecode config file has an empty entry: `"bytecode": "0x"`. This happens because +your `zksync-2-dev/contracts/zksync/package.json`'s dependency on `"@matterlabs/hardhat-zksync-solc"` is outdated. We +don't expect this error to happen as we've updated to latest version which fixes the problem. + +**Solution**. Update your dependency and reinit: + +``` +yarn add -D @matterlabs/hardhat-zksync-solc # in the system-contracts folder +zk clean --all && zk init +``` + +On the run, it moved from: + +``` + "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", +``` + +to: + +``` + "@matterlabs/hardhat-zksync-solc": "^0.3.15", +``` diff --git a/docs/repositories.md b/docs/repositories.md new file mode 100644 index 000000000000..ffde2ec357c4 --- /dev/null +++ b/docs/repositories.md @@ -0,0 +1,75 @@ +# Repositories + +## zkSync Era + +### Core components + +| Internal repository | Public repository | Description | +| ----------------------------------------------------------- | --------------------------------------------------------------------- | --------------------------------------------------------- | +| [zksync-2-dev](https://github.com/matter-labs/zksync-2-dev) | [zksync-era](https://github.com/matter-labs/zksync-era) | zk server logic, including the APIs and database accesses | +| - | [zksync-wallet-vue](https://github.com/matter-labs/zksync-wallet-vue) | Wallet frontend | + +### Contracts + +| Internal repository | Public repository | Description | +| ------------------------------------------------------------------- | --------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| [contracts](https://github.com/matter-labs/contracts) | [era-contracts](https://github.com/matter-labs/era-contracts) | L1 & L2 contracts, that are used to manage bridges and communication between L1 & L2. | +| [system-contracts](https://github.com/matter-labs/system-contracts) | [era-system-contracts](https://github.com/matter-labs/era-system-contracts) | Privileged contracts that are running on L2 (like Bootloader oc ContractDeployer) | +| | [v2-testnet-contracts](https://github.com/matter-labs/zksync-2-dev) | | + +### Compiler + +| Internal repository | Public repository | Description | +| ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------- | +| [compiler-tester](https://github.com/matter-labs/compiler-tester) | [era-compiler-tester](https://github.com/matter-labs/era-compiler-tester) | Integration testing framework for running executable tests on zkEVM | +| [compiler-tests](https://github.com/matter-labs/compiler-tests) | [era-compiler-tests](https://github.com/matter-labs/era-compiler-tests) | Collection of executable tests for zkEVM | +| [compiler-llvm](https://github.com/matter-labs/compiler-llvm) | [era-compiler-llvm](https://github.com/matter-labs/compiler-llvm) | zkEVM fork of the LLVM framework | +| [compiler-solidity](https://github.com/matter-labs/compiler-solidity) | [era-compiler-solidity](https://github.com/matter-labs/era-compiler-solidity) | Solidity Yul/EVMLA compiler front end | +| [compiler-vyper](https://github.com/matter-labs/compiler-vyper) | [era-compiler-vyper](https://github.com/matter-labs/era-compiler-vyper) | Vyper LLL compiler front end | +| [compiler-llvm-context](https://github.com/matter-labs/compiler-llvm-context) | [era-compiler-llvm-context](https://github.com/matter-labs/era-compiler-llvm-context) | LLVM IR generator logic shared by multiple front ends | +| [compiler-common](https://github.com/matter-labs/compiler-common) | [era-compiler-common](https://github.com/matter-labs/era-compiler-common) | Common compiler constants | +| | [era-compiler-llvm-builder](https://github.com/matter-labs/era-compiler-llvm-builder) | Tool for building our fork of the LLVM framework | + +### zkEVM + +| Internal repository | Public repository | Description | +| ----------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| [zkevm_opcode_defs](https://github.com/matter-labs/zkevm_opcode_defs) | [era-zkevm_opcode_defs](https://github.com/matter-labs/era-zkevm_opcode_defs) | Opcode definitions for zkEVM - main dependency for many other repos | +| [zk_evm](https://github.com/matter-labs/zk_evm) | [era-zk_evm](https://github.com/matter-labs/era-zk_evm) | EVM implementation in pure rust, without circuits | +| [sync_vm](https://github.com/matter-labs/sync_evm) | [era-sync_vm](https://github.com/matter-labs/era-sync_vm) | EVM implementation using circuits | +| [zkEVM-assembly](https://github.com/matter-labs/zkEVM-assembly) | [era-zkEVM-assembly](https://github.com/matter-labs/era-zkEVM-assembly) | Code for parsing zkEVM assembly | +| [zkevm_test_harness](https://github.com/matter-labs/zkevm_test_harness) | [era-zkevm_test_harness](https://github.com/matter-labs/era-zkevm_test_harness) | Tests that compare the two implementation of the zkEVM - the non-circuit one (zk_evm) and the circuit one (sync_vm) | +| [circuit_testing](https://github.com/matter-labs/circuit_testing) | [era-cicruit_testing](https://github.com/matter-labs/era-circuit_testing) | ?? | +| [heavy-ops-service](https://github.com/matter-labs/heavy-ops-service) | [era-heavy-ops-service](https://github.com/matter-labs/era-heavy-ops-service) | Main circuit prover, that requires GPU to run. | +| [bellman-cuda](https://github.com/matter-labs/bellman-cuda) | [era-bellman-cuda](https://github.com/matter-labs/era-bellman-cuda) | Cuda implementations for cryptographic functions used by the prover | +| [zkevm_tester](https://github.com/matter-labs/zkevm_tester) | [era-zkevm_tester](https://github.com/matter-labs/era-zkevm_tester) | Assembly runner for zkEVM testing | + +### Tools & contract developers + +| Public repository | Description | +| --------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| [local-setup](https://github.com/matter-labs/local-setup) | Docker-based zk server (together with L1), that can be used for local testing | +| [zksolc-bin](https://github.com/matter-labs/zksolc-bin) | repository with solc compiler binaries | +| [zkvyper-bin](https://github.com/matter-labs/zkvyper-bin) | repository with vyper compiler binaries | +| [zksync-cli](<(https://github.com/matter-labs/zksync-cli)>) | Command line tool to interact with zksync | +| [hardhat-zksync](https://github.com/matter-labs/hardhat-zksync) | Plugins for hardhat | + +### Examples & documentation + +| Public repository | Description | +| ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | +| [zksync-web-era-docs](https://github.com/matter-labs/zksync-web-era-docs) | Public documentation, API descriptions etc. Source code for [public docs](https://era.zksync.io/docs/) | +| [era-tutorial-examples](https://github.com/matter-labs/era-tutorial-examples) | List of tutorials | +| [custom-paymaster-tutorial](https://github.com/matter-labs/custom-paymaster-tutorial) | ?? | +| [daily-spendlimit-tutorial](https://github.com/matter-labs/daily-spendlimit-tutorial) | ?? | +| [custom-aa-tutorial](https://github.com/matter-labs/custom-aa-tutorial) | Tutorial for Account Abstraction | +| [era-hardhat-with-plugins](https://github.com/matter-labs/era-hardhat-with-plugins) | ?? | +| [zksync-hardhat-template](https://github.com/matter-labs/zksync-hardhat-template) | ?? | + +## zkSync Lite (v1) + +| Internal repository | Public repository | Description | +| ------------------------------------------------------- | --------------------------------------------------------------------------- | ---------------------------------- | +| [zksync-dev](https://github.com/matter-labs/zksync-dev) | [zksync](https://github.com/matter-labs/zksync) | zksync Lite/v1 implementation | +| | [zksync-docs](https://github.com/matter-labs/zksync-docs) | Public documentation for zkSync v1 | +| | [zksync-dapp-checkout](https://github.com/matter-labs/zksync-dapp-checkout) | ?? | diff --git a/docs/setup-dev.md b/docs/setup-dev.md index d299eee96ebb..89646756d01e 100644 --- a/docs/setup-dev.md +++ b/docs/setup-dev.md @@ -16,7 +16,7 @@ If you are a NixOS user or would like to have a reproducible environment, skip t If you are using an ssh key to authenticate with Github you need to make git always use ssh instead of http. ```bash -git config --global url."ssh://git@github.com/".insteadOf https://github.com/ +git config url."ssh://git@github.com/".insteadOf https://github.com/ ``` [More information about how we use git](https://www.notion.so/matterlabs/Working-with-dependencies-in-private-repositories-697620178338452798a0ea5ac0d8e56a) diff --git a/etc/ERC20/package.json b/etc/ERC20/package.json index 5c3adca0a349..ef16d6baebf5 100644 --- a/etc/ERC20/package.json +++ b/etc/ERC20/package.json @@ -3,9 +3,8 @@ "version": "0.1.0", "main": "index.js", "license": "MIT", - "dependencies": {}, "devDependencies": { - "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", + "@matterlabs/hardhat-zksync-solc": "^0.3.15", "hardhat": "=2.12.4" } } diff --git a/etc/contracts-test-data/contracts/counter/counter.sol b/etc/contracts-test-data/contracts/counter/counter.sol index acf613538a22..748ab91aa70f 100644 --- a/etc/contracts-test-data/contracts/counter/counter.sol +++ b/etc/contracts-test-data/contracts/counter/counter.sol @@ -9,11 +9,16 @@ contract Counter { value += x; } - function incrementWithRevert(uint256 x, bool shouldRevert) public { + function incrementWithRevertPayable(uint256 x, bool shouldRevert) payable public returns (uint256) { + return incrementWithRevert(x, shouldRevert); + } + + function incrementWithRevert(uint256 x, bool shouldRevert) public returns (uint256) { value += x; if(shouldRevert) { revert("This method always reverts"); } + return value; } function get() public view returns (uint256) { diff --git a/etc/contracts-test-data/contracts/custom-account/RLPEncoder.sol b/etc/contracts-test-data/contracts/custom-account/RLPEncoder.sol index 13884b71727b..409f3d16b372 100644 --- a/etc/contracts-test-data/contracts/custom-account/RLPEncoder.sol +++ b/etc/contracts-test-data/contracts/custom-account/RLPEncoder.sol @@ -75,6 +75,7 @@ library RLPEncoder { /// @notice Uses little endian ordering (The least significant byte has index `0`). /// NOTE: returns `0` for `0` function _highestByteSet(uint256 _number) private pure returns (uint256 hbs) { + // TODO: for optimization, the comparison can be replaced with bitwise operations // should be resolver after evaluating the cost of opcodes. if (_number >= 2**128) { _number >>= 128; diff --git a/etc/contracts-test-data/contracts/long-return-data/long-return-data.sol b/etc/contracts-test-data/contracts/long-return-data/long-return-data.sol new file mode 100644 index 000000000000..793bf191cbd8 --- /dev/null +++ b/etc/contracts-test-data/contracts/long-return-data/long-return-data.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.0; + +contract LongReturnData{ + function longReturnData() external returns (bool, bytes memory) { + // do some recursion, let's have more layers + (bool success, bytes memory _tmp) = this.longReturnData{gas: 79500000}(); + require(success == false); // they should fail by design + assembly { + return(0, 0xffffffffffffffff) + } + } +} diff --git a/etc/contracts-test-data/package.json b/etc/contracts-test-data/package.json index a68fd1074344..8b528a66093b 100644 --- a/etc/contracts-test-data/package.json +++ b/etc/contracts-test-data/package.json @@ -7,6 +7,6 @@ "hardhat": "2.12.4" }, "devDependencies": { - "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3" + "@matterlabs/hardhat-zksync-solc": "^0.3.15" } } diff --git a/etc/env/base/README.md b/etc/env/base/README.md index a41c6fb9e284..e251eee6ec45 100644 --- a/etc/env/base/README.md +++ b/etc/env/base/README.md @@ -1,7 +1,7 @@ # Base configuration for zkSync stack This folder contains the template for generating the configuration for zkSync applications. Configs in this folder are -assigned default values suitable for the development. +assigned default values suitable for development. Since all the applications expect configuration to be set via the environment variables, these configs are compiled into one `*.env` file, which will be loaded prior to the application launch. diff --git a/etc/env/base/api.toml b/etc/env/base/api.toml index 3d1ac7b4b727..4d9e9190a567 100644 --- a/etc/env/base/api.toml +++ b/etc/env/base/api.toml @@ -51,7 +51,7 @@ url="http://127.0.0.1:3070" # Interval between polling db for network stats (in ms). network_stats_polling_interval=1000 req_entities_limit=100 -offset_limit=10000 +offset_limit=250 threads_per_server=128 # Configuration for the prometheus exporter server. @@ -59,3 +59,7 @@ threads_per_server=128 listener_port=3312 pushgateway_url="http://127.0.0.1:9091" push_interval_ms=100 + +# Configuration for the healtcheck server. +[api.healthcheck] +port=3071 diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 4eb387b47788..d74051b6a83d 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -44,9 +44,6 @@ reject_tx_at_eth_params_percentage=0.95 # it takes more percentage of the max block gas capacity than this value. reject_tx_at_gas_percentage=0.95 - -# Whether all transactions should be reexecuted. This is needed to test the rollback functionality. -reexecute_each_tx=true bootloader_hash="0x0100038581be3d0e201b3cc45d151ef5cc59eb3a0f146ad44f0f72abf00b594c" default_aa_hash="0x0100038dc66b69be75ec31653c64cb931678299b9b659472772b2550b703f41c" @@ -55,6 +52,7 @@ fair_l2_gas_price=250000000 # Max number of computational gas that validation step is allowed to take. validation_computational_gas_limit=300000 +save_call_traces=true [chain.operations_manager] # Sleep time when there is no new input data @@ -69,3 +67,5 @@ remove_stuck_txs=true [chain.circuit_breaker] sync_interval_ms=30000 +http_req_max_retry_number=5 +http_req_retry_interval_sec=2 diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index 63c98bd97c2d..5278ac5af55e 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -19,7 +19,7 @@ L1_ALLOW_LIST_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" CREATE2_FACTORY_ADDR="0xce0042B868300000d44A59004Da54A005ffdcf9f" VALIDATOR_TIMELOCK_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" VALIDATOR_TIMELOCK_EXECUTION_DELAY=0 -VK_COMMITMENT_BASIC_CIRCUITS="0x142a364ef2073132eaf07aa7f3d8495065be5b92a2dc14fda09b4216affed9c0" +VK_COMMITMENT_BASIC_CIRCUITS="0x0a3657f884af32d3a573c5fdb3440c9ac45271ede8c982faeaae7434d032ab3e" VK_COMMITMENT_LEAF="0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" VK_COMMITMENT_NODE="0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8" GENESIS_TX_HASH="0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" @@ -29,6 +29,9 @@ DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT=10000000 GENESIS_BLOCK_COMMITMENT="0x6c7f89335e3ade24a7768ed73c425afd9fac92a094e0681f76cb6feabf8b6223" # Current rollup leaf index after genesis GENESIS_ROLLUP_LEAF_INDEX="21" +L1_WETH_BRIDGE_IMPL_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" +L1_WETH_BRIDGE_PROXY_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" +L1_WETH_TOKEN_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" [contracts.test] dummy_verifier=true diff --git a/etc/env/base/database.toml b/etc/env/base/database.toml index 3e039a7e59f3..380b64939592 100644 --- a/etc/env/base/database.toml +++ b/etc/env/base/database.toml @@ -1,11 +1,11 @@ [database] # Path to the database data directory. -path="./db" +path="./db/main/full" # Path to the database data directory that contains state cache. -state_keeper_db_path="./db/state_keeper" -merkle_tree_backup_path="./db/backups" -merkle_tree_fast_ssd_path="./db/lightweight" +state_keeper_db_path="./db/main/state_keeper" +merkle_tree_backup_path="./db/main/backups" +merkle_tree_fast_ssd_path="./db/main/lightweight" backup_count=5 backup_interval_ms=60000 max_block_batch=100 diff --git a/etc/env/base/eth_sender.toml b/etc/env/base/eth_sender.toml index 7dd973c23f0e..60193788fd6f 100644 --- a/etc/env/base/eth_sender.toml +++ b/etc/env/base/eth_sender.toml @@ -41,6 +41,9 @@ max_aggregated_tx_gas=4000000 # Max gas that can used to include single block in aggregated operation max_single_tx_gas=6000000 +# Max acceptable fee for sending tx to L1 +max_acceptable_priority_fee_in_gwei=100000000000 + [eth_sender.gas_adjuster] # Priority fee to be used by GasAdjuster (in wei). default_priority_fee_per_gas=1_000_000_000 diff --git a/etc/env/base/house_keeper.toml b/etc/env/base/house_keeper.toml new file mode 100644 index 000000000000..8c0614600d64 --- /dev/null +++ b/etc/env/base/house_keeper.toml @@ -0,0 +1,8 @@ +[house_keeper] +l1_batch_metrics_reporting_interval_ms=10000 +blob_cleaning_interval_ms=60000 +gpu_prover_queue_reporting_interval_ms=10000 +prover_job_retrying_interval_ms=300000 +prover_stats_reporting_interval_ms=5000 +witness_job_moving_interval_ms=30000 +witness_generator_stats_reporting_interval_ms=10000 diff --git a/etc/env/base/object_store.toml b/etc/env/base/object_store.toml index b06ec092fce5..e6491b278e53 100644 --- a/etc/env/base/object_store.toml +++ b/etc/env/base/object_store.toml @@ -2,3 +2,5 @@ bucket_base_url="base_url" mode="FileBacked" file_backed_base_path="artifacts" +gcs_credential_file_path="/path/to/gcs_credentials.json" +max_retries=5 diff --git a/etc/env/base/prover.toml b/etc/env/base/prover.toml index 7071f499ce36..3504b41343f2 100644 --- a/etc/env/base/prover.toml +++ b/etc/env/base/prover.toml @@ -1,7 +1,7 @@ [prover.non_gpu] prometheus_port=3313 initial_setup_key_path="./../../../keys/setup/setup_2^22.key" -key_download_url="https://storage.googleapis.com/universal-setup/setup_2^22.key" +key_download_url="https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^22.key" generation_timeout_in_secs=2700 number_of_threads=22 max_attempts=1 @@ -16,7 +16,7 @@ specialized_prover_group_id=0 [prover.two_gpu_forty_gb_mem] prometheus_port=3313 initial_setup_key_path="./../../../keys/setup/setup_2^26.key" -key_download_url="https://storage.googleapis.com/universal-setup/setup_2^26.key" +key_download_url="https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key" generation_timeout_in_secs=2700 number_of_threads=5 max_attempts=1 @@ -31,7 +31,7 @@ specialized_prover_group_id=1 [prover.one_gpu_eighty_gb_mem] prometheus_port=3313 initial_setup_key_path="./../../../keys/setup/setup_2^26.key" -key_download_url="https://storage.googleapis.com/universal-setup/setup_2^26.key" +key_download_url="https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key" generation_timeout_in_secs=2700 number_of_threads=5 max_attempts=1 @@ -46,7 +46,7 @@ specialized_prover_group_id=2 [prover.two_gpu_eighty_gb_mem] prometheus_port=3313 initial_setup_key_path="./../../../keys/setup/setup_2^26.key" -key_download_url="https://storage.googleapis.com/universal-setup/setup_2^26.key" +key_download_url="https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key" generation_timeout_in_secs=2700 number_of_threads=9 max_attempts=1 @@ -61,7 +61,7 @@ specialized_prover_group_id=3 [prover.four_gpu_eighty_gb_mem] prometheus_port=3313 initial_setup_key_path="./../../../keys/setup/setup_2^26.key" -key_download_url="https://storage.googleapis.com/universal-setup/setup_2^26.key" +key_download_url="https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^26.key" generation_timeout_in_secs=2700 number_of_threads=18 max_attempts=1 diff --git a/etc/env/base/prover_group.toml b/etc/env/base/prover_group.toml index 6bc0ca001354..7372a407f3b8 100644 --- a/etc/env/base/prover_group.toml +++ b/etc/env/base/prover_group.toml @@ -11,3 +11,7 @@ group_8_circuit_ids="16,17" group_9_circuit_ids="3" group_100_circuit_ids="" region_read_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location" +region_override="us-central-1" +zone_read_url="http://metadata.google.internal/computeMetadata/v1/instance/zone" +zone_override="us-central-1-b" +synthesizer_per_gpu="10" diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index b23144a1e335..5d49878f3496 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -27,6 +27,8 @@ zksync_object_store=info,\ setup_key_generator_and_server=info,\ zksync_circuit_synthesizer=info,\ en_playground=info,\ +zksync_external_node=info,\ +cross_nodes_checker=debug,\ """ # `RUST_BACKTRACE` variable diff --git a/etc/env/base/witness_generator.toml b/etc/env/base/witness_generator.toml index 9b77ccf8fcdc..9deaade9d2c9 100644 --- a/etc/env/base/witness_generator.toml +++ b/etc/env/base/witness_generator.toml @@ -1,6 +1,6 @@ [witness] generation_timeout_in_secs=900 initial_setup_key_path="./keys/setup/setup_2^22.key" -key_download_url="https://storage.googleapis.com/universal-setup/setup_2^22.key" +key_download_url="https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^22.key" max_attempts=1 dump_arguments_for_blocks="2,3" diff --git a/etc/prover-test-data/proof.bin b/etc/prover-test-data/proof.bin new file mode 100644 index 000000000000..456a013498da Binary files /dev/null and b/etc/prover-test-data/proof.bin differ diff --git a/etc/tokens/sepolia.json b/etc/tokens/sepolia.json new file mode 100644 index 000000000000..2649a6b31a8f --- /dev/null +++ b/etc/tokens/sepolia.json @@ -0,0 +1,8 @@ +[ + { + "name": "DAI", + "symbol": "DAI", + "decimals": 18, + "address": "0x35EfF6eA96571ff475136117FdD92A9ba25b1f37" + } +] diff --git a/flake.lock b/flake.lock index 3995d1abc87a..3b683c15c855 100644 --- a/flake.lock +++ b/flake.lock @@ -7,16 +7,16 @@ }, "stable": { "locked": { - "lastModified": 1659446231, - "narHash": "sha256-hekabNdTdgR/iLsgce5TGWmfIDZ86qjPhxDg/8TlzhE=", + "lastModified": 1683478192, + "narHash": "sha256-7f7RR71w0jRABDgBwjq3vE1yY3nrVJyXk8hDzu5kl1E=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "eabc38219184cc3e04a974fe31857d8e0eac098d", + "rev": "c568239bcc990050b7aedadb7387832440ad8fb1", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-21.11", + "ref": "nixos-22.11", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/flake.nix index f1e7ae5b9dc3..e4fe935907e7 100644 --- a/flake.nix +++ b/flake.nix @@ -1,7 +1,7 @@ { description = "zkSync development shell"; inputs = { - stable.url = "github:NixOS/nixpkgs/nixos-21.11"; + stable.url = "github:NixOS/nixpkgs/nixos-22.11"; }; outputs = {self, stable}: { packages.x86_64-linux.default = diff --git a/infrastructure/local-setup-preparation/package.json b/infrastructure/local-setup-preparation/package.json index 61fc44e1e916..e8f0b062b1a8 100644 --- a/infrastructure/local-setup-preparation/package.json +++ b/infrastructure/local-setup-preparation/package.json @@ -4,8 +4,8 @@ "main": "build/index.js", "license": "MIT", "dependencies": { - "ts-node": "^10.7.0", "ethers": "~5.5.0", + "ts-node": "^10.7.0", "zksync-web3": "link:../../sdk/zksync-web3.js" }, "devDependencies": { diff --git a/infrastructure/zk/package.json b/infrastructure/zk/package.json index e88091396864..38aaca784005 100644 --- a/infrastructure/zk/package.json +++ b/infrastructure/zk/package.json @@ -21,7 +21,7 @@ "zksync-web3": "link:../../sdk/zksync-web3.js" }, "devDependencies": { - "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", + "@matterlabs/hardhat-zksync-solc": "^0.3.15", "@types/deep-extend": "^0.4.31", "@types/node": "^14.6.1", "@types/node-fetch": "^2.5.7", diff --git a/infrastructure/zk/src/clean.ts b/infrastructure/zk/src/clean.ts index f5dc76446eb6..29c9180194bd 100644 --- a/infrastructure/zk/src/clean.ts +++ b/infrastructure/zk/src/clean.ts @@ -1,14 +1,11 @@ import { Command } from 'commander'; import * as fs from 'fs'; +import * as path from 'path'; import { confirmAction } from './utils'; export function clean(path: string) { if (fs.existsSync(path)) { - if (fs.lstatSync(path).isDirectory()) { - fs.rmdirSync(path, { recursive: true }); - } else { - fs.rmSync(path); - } + fs.rmSync(path, { recursive: true }); console.log(`Successfully removed ${path}`); } } @@ -16,7 +13,6 @@ export function clean(path: string) { export const command = new Command('clean') .option('--config [environment]') .option('--database') - .option('--backups') .option('--contracts') .option('--artifacts') .option('--all') @@ -30,7 +26,6 @@ export const command = new Command('clean') if (cmd.all || cmd.config) { const env = process.env.ZKSYNC_ENV; clean(`etc/env/${env}.env`); - clean('etc/env/.current'); clean('etc/env/.init.env'); } @@ -39,11 +34,8 @@ export const command = new Command('clean') } if (cmd.all || cmd.database) { - clean('db'); - } - - if (cmd.all || cmd.backups) { - clean('backups'); + const dbPath = process.env.DATABASE_PATH!; + clean(path.dirname(dbPath)); } if (cmd.all || cmd.contracts) { diff --git a/infrastructure/zk/src/compiler.ts b/infrastructure/zk/src/compiler.ts index ee69e60ac211..c8dca3851ade 100644 --- a/infrastructure/zk/src/compiler.ts +++ b/infrastructure/zk/src/compiler.ts @@ -3,7 +3,10 @@ import * as utils from './utils'; export async function compileTestContracts() { await utils.spawn('yarn --cwd etc/contracts-test-data hardhat compile'); - await utils.spawn('yarn --cwd core/tests/ts-integration/contracts hardhat compile'); + process.chdir('core/tests/ts-integration'); + await utils.spawn('yarn hardhat compile'); + await utils.spawn('yarn hardhat run ./scripts/compile-yul.ts'); + process.chdir('../../..'); } export async function compileSystemContracts() { diff --git a/infrastructure/zk/src/config.ts b/infrastructure/zk/src/config.ts index eb9bd72dc6fe..bee01b07e584 100644 --- a/infrastructure/zk/src/config.ts +++ b/infrastructure/zk/src/config.ts @@ -21,7 +21,8 @@ const CONFIG_FILES = [ 'fetcher.toml', 'witness_generator.toml', 'circuit_synthesizer.toml', - 'prover_group.toml' + 'prover_group.toml', + 'house_keeper.toml' ]; function loadConfigFile(path: string) { diff --git a/infrastructure/zk/src/contract.ts b/infrastructure/zk/src/contract.ts index 1e1f23becd1f..bbd0ca5861a9 100644 --- a/infrastructure/zk/src/contract.ts +++ b/infrastructure/zk/src/contract.ts @@ -62,10 +62,7 @@ export async function deployL2(args: any[] = []) { // Skip compilation for local setup, since we already copied artifacts into the container. await utils.spawn(`${baseCommandL2} build`); - await utils.spawn(`${baseCommandL2} compile-and-deploy-libs ${args.join(' ')}`); - // IMPORTANT: initialize-bridges must go strictly *right after* the compile-and-deploy-libs step. - // Otherwise, the ExternalDecoder library will be erased. await utils.spawn(`${baseCommandL1} initialize-bridges ${args.join(' ')} | tee deployL2.log`); await utils.spawn(`${baseCommandL2} deploy-testnet-paymaster ${args.join(' ')} | tee -a deployL2.log`); @@ -107,6 +104,9 @@ export async function deployL1(args: any[]) { 'CONTRACTS_GENESIS_TX_HASH', 'CONTRACTS_L1_ERC20_BRIDGE_PROXY_ADDR', 'CONTRACTS_L1_ERC20_BRIDGE_IMPL_ADDR', + 'CONTRACTS_L1_WETH_BRIDGE_IMPL_ADDR', + 'CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR', + 'CONTRACTS_L1_WETH_TOKEN_ADDR', 'CONTRACTS_L1_ALLOW_LIST_ADDR' ]; const updatedContracts = updateContractsEnv(deployLog, envVars); diff --git a/infrastructure/zk/src/database/database.ts b/infrastructure/zk/src/database.ts similarity index 89% rename from infrastructure/zk/src/database/database.ts rename to infrastructure/zk/src/database.ts index c9e34734d160..25eefbd0c2f7 100644 --- a/infrastructure/zk/src/database/database.ts +++ b/infrastructure/zk/src/database.ts @@ -1,6 +1,6 @@ import { Command } from 'commander'; -import * as utils from '../utils'; -import * as env from '../env'; +import * as utils from './utils'; +import * as env from './env'; export async function reset() { await utils.confirmAction(); @@ -39,9 +39,8 @@ export async function generateMigration(name: String) { export async function setup() { process.chdir('core/lib/dal'); - const localDbUrl = 'postgres://postgres@localhost/zksync_local'; - const localTestDbUrl = 'postgres://postgres@localhost/zksync_local_test'; - if (process.env.DATABASE_URL == localDbUrl || process.env.DATABASE_URL == localTestDbUrl) { + const localDbUrl = 'postgres://postgres@localhost'; + if (process.env.DATABASE_URL!.startsWith(localDbUrl)) { console.log(`Using localhost database:`); console.log(`DATABASE_URL = ${process.env.DATABASE_URL}`); } else { diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 3b916ec16b72..cda9fc983e71 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -4,12 +4,14 @@ import * as contract from './contract'; const IMAGES = [ 'server-v2', + 'external-node', 'contract-verifier', 'prover-v2', 'geth', 'local-node', 'zk-environment', - 'circuit-synthesizer' + 'circuit-synthesizer', + 'witness-generator' ]; const UNIX_TIMESTAMP = Date.now(); @@ -52,7 +54,15 @@ async function dockerCommand(command: 'push' | 'build', image: string, customTag } function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: string) { - const tagList = ['server-v2', 'prover', 'contract-verifier', 'prover-v2', 'circuit-synthesizer'].includes(image) + const tagList = [ + 'server-v2', + 'external-node', + 'prover', + 'contract-verifier', + 'prover-v2', + 'circuit-synthesizer', + 'witness-generator' + ].includes(image) ? ['latest2.0', `2.0-${imageTagSha}`, `2.0-${imageTagShaTS}`] : [`latest2.0`]; @@ -60,7 +70,7 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin } async function _build(image: string, tagList: string[]) { - if (image == 'server-v2' || image == 'prover') { + if (image == 'server-v2' || image == 'external-node' || image == 'prover') { await contract.build(); } @@ -87,6 +97,13 @@ async function _push(image: string, tagList: string[]) { `docker tag matterlabs/${image}:${tag} us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag}` ); await utils.spawn(`docker push us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag}`); + + if (image == 'circuit-synthesizer') { + await utils.spawn( + `docker tag us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag} asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag}` + ); + await utils.spawn(`docker push asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag}`); + } } } diff --git a/infrastructure/zk/src/index.ts b/infrastructure/zk/src/index.ts index 2b03e361977d..805add4e45c1 100644 --- a/infrastructure/zk/src/index.ts +++ b/infrastructure/zk/src/index.ts @@ -2,7 +2,7 @@ import { program, Command } from 'commander'; import { spawnSync } from 'child_process'; -import { command as server } from './server'; +import { serverCommand as server, enCommand as en } from './server'; import { command as contractVerifier } from './contract_verifier'; import { command as up } from './up'; import { command as down } from './down'; @@ -17,11 +17,12 @@ import { command as compiler } from './compiler'; import { command as completion } from './completion'; import { command as config } from './config'; import { command as clean } from './clean'; -import { command as db } from './database/database'; +import { command as db } from './database'; import * as env from './env'; const COMMANDS = [ server, + en, contractVerifier, up, down, diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index 8e4c88161ad1..928b2e426fc4 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -7,7 +7,7 @@ import * as server from './server'; import * as contract from './contract'; import * as run from './run/run'; import * as compiler from './compiler'; -import * as db from './database/database'; +import * as db from './database'; import { clean } from './clean'; import * as env from './env'; import * as docker from './docker'; @@ -97,6 +97,7 @@ function createVolumes() { } async function submoduleUpdate() { + await utils.exec('git submodule init'); await utils.exec('git submodule update'); } diff --git a/infrastructure/zk/src/server.ts b/infrastructure/zk/src/server.ts index bd1735686674..1317c67ee7b9 100644 --- a/infrastructure/zk/src/server.ts +++ b/infrastructure/zk/src/server.ts @@ -22,6 +22,14 @@ export async function server(rebuildTree: boolean, openzeppelinTests: boolean, c await utils.spawn(`cargo run --bin zksync_server --release ${options}`); } +export async function externalNode() { + if (process.env.ZKSYNC_ENV != 'ext-node') { + console.warn(`WARINING: using ${process.env.ZKSYNC_ENV} environment for external node`); + console.warn('If this is a mistake, set $ZKSYNC_ENV to "ext-node" or other environment'); + } + await utils.spawn('cargo run --release --bin zksync_external_node'); +} + async function create_genesis(cmd: string) { await utils.confirmAction(); await utils.spawn(`${cmd} | tee genesis.log`); @@ -92,7 +100,7 @@ export async function genesisFromBinary() { await create_genesis('zksync_server --genesis'); } -export const command = new Command('server') +export const serverCommand = new Command('server') .description('start zksync server') .option('--genesis', 'generate genesis data via server') .option('--rebuild-tree', 'rebuilds merkle tree from database logs', 'rebuild_tree') @@ -105,3 +113,7 @@ export const command = new Command('server') await server(cmd.rebuildTree, cmd.openzeppelinTests, cmd.components); } }); + +export const enCommand = new Command('external-node').description('start zksync external node').action(async () => { + await externalNode(); +}); diff --git a/infrastructure/zk/src/test/integration.ts b/infrastructure/zk/src/test/integration.ts index 096e3198cb2d..2f53ef4aace3 100644 --- a/infrastructure/zk/src/test/integration.ts +++ b/infrastructure/zk/src/test/integration.ts @@ -15,7 +15,7 @@ export async function all() { export async function api(bail: boolean = false) { const flag = bail ? ' --bail' : ''; - await utils.spawn('yarn ts-tests api-test' + flag); + await utils.spawn('yarn ts-integration api-test' + flag); } export async function server() { diff --git a/infrastructure/zk/src/test/test.ts b/infrastructure/zk/src/test/test.ts index def91a724def..821a59714448 100644 --- a/infrastructure/zk/src/test/test.ts +++ b/infrastructure/zk/src/test/test.ts @@ -2,7 +2,7 @@ import { Command } from 'commander'; import * as utils from '../utils'; import * as integration from './integration'; -import * as db from '../database/database'; +import * as db from '../database'; export { integration }; export async function l1Contracts() { diff --git a/sdk/zksync-rs/src/ethereum/mod.rs b/sdk/zksync-rs/src/ethereum/mod.rs index 4a1e5faa13e5..55f277e09dfa 100644 --- a/sdk/zksync-rs/src/ethereum/mod.rs +++ b/sdk/zksync-rs/src/ethereum/mod.rs @@ -11,12 +11,13 @@ use zksync_types::{ transports::Http, types::{TransactionReceipt, H160, H256, U256}, }, - L1ChainId, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U64, + L1ChainId, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; use zksync_web3_decl::namespaces::{EthNamespaceClient, ZksNamespaceClient}; -use zksync_eth_client::clients::http_client::Error; -use zksync_eth_client::{ETHDirectClient, EthInterface}; +use zksync_eth_client::{ + clients::http::SigningClient, types::Error, BoundEthInterface, EthInterface, +}; use zksync_eth_signer::EthereumSigner; use zksync_types::network::Network; use zksync_types::{l1::L1Tx, Address, L1TxCommonData}; @@ -58,7 +59,7 @@ pub fn l1_bridge_contract() -> ethabi::Contract { /// via `EthereumProvider::web3` method. #[derive(Debug)] pub struct EthereumProvider { - eth_client: ETHDirectClient, + eth_client: SigningClient, default_bridges: BridgeAddresses, erc20_abi: ethabi::Contract, l1_bridge_abi: ethabi::Contract, @@ -83,13 +84,11 @@ impl EthereumProvider { .map_err(|err| ClientError::NetworkError(err.to_string()))?; let l1_chain_id = provider.l1_chain_id().await?; - if l1_chain_id > U64::from(u16::MAX) { - return Err(ClientError::MalformedResponse( - "Chain id overflow".to_owned(), - )); - } - let l1_chain_id = - u8::try_from(l1_chain_id).expect("Expected chain id to be in range 0..256"); + let l1_chain_id = u64::try_from(l1_chain_id).map_err(|_| { + ClientError::MalformedResponse( + "Chain id overflow - Expected chain id to be in range 0..2^64".to_owned(), + ) + })?; let contract_address = provider.get_main_contract().await?; let default_bridges = provider @@ -97,7 +96,7 @@ impl EthereumProvider { .await .map_err(|err| ClientError::NetworkError(err.to_string()))?; - let eth_client = ETHDirectClient::new( + let eth_client = SigningClient::new( transport, zksync_contract(), eth_addr, @@ -120,7 +119,7 @@ impl EthereumProvider { } /// Exposes Ethereum node `web3` API. - pub fn client(&self) -> ÐDirectClient { + pub fn client(&self) -> &SigningClient { &self.eth_client } @@ -209,7 +208,7 @@ impl EthereumProvider { let bridge = bridge.unwrap_or(self.default_bridges.l1_erc20_default_bridge); let current_allowance = self .client() - .allowance_on_contract(token_address, bridge, self.erc20_abi.clone()) + .allowance_on_account(token_address, bridge, self.erc20_abi.clone()) .await .map_err(|err| ClientError::NetworkError(err.to_string()))?; diff --git a/sdk/zksync-web3.js/CHANGELOG.md b/sdk/zksync-web3.js/CHANGELOG.md index e9d1fbeb8d96..8c769bb56720 100644 --- a/sdk/zksync-web3.js/CHANGELOG.md +++ b/sdk/zksync-web3.js/CHANGELOG.md @@ -1,5 +1,36 @@ # Changelog +## [0.15.1](https://github.com/matter-labs/zksync-2-dev/compare/zksync-web3-v0.15.0...zksync-web3-v0.15.1) (2023-04-24) + + +### Bug Fixes + +* add coeficient to gas limit + method for full fee estimation ([#1622](https://github.com/matter-labs/zksync-2-dev/issues/1622)) ([229cda9](https://github.com/matter-labs/zksync-2-dev/commit/229cda977daa11a98a97515a2f75d709e2e8ed9a)) + +## [0.15.0](https://github.com/matter-labs/zksync-2-dev/compare/zksync-web3-v0.14.4...zksync-web3-v0.15.0) (2023-04-20) + + +### ⚠ BREAKING CHANGES + +* Implement WETH bridge, support custom bridge in sdk, bootloader gas calculation fix ([#1633](https://github.com/matter-labs/zksync-2-dev/issues/1633)) + +### Features + +* Implement WETH bridge, support custom bridge in sdk, bootloader gas calculation fix ([#1633](https://github.com/matter-labs/zksync-2-dev/issues/1633)) ([eb67ec5](https://github.com/matter-labs/zksync-2-dev/commit/eb67ec555bc027137d80122873cd12a93f9234c6)) + + +### Bug Fixes + +* **sdk:** Fix getSignInput when gas parameters are 0 ([#1695](https://github.com/matter-labs/zksync-2-dev/issues/1695)) ([cf61772](https://github.com/matter-labs/zksync-2-dev/commit/cf61772ba612bd3532ad3d3b808d18e25c12973f)) + +## [0.14.4](https://github.com/matter-labs/zksync-2-dev/compare/zksync-web3-v0.14.1...zksync-web3-v0.14.4) (2023-04-13) + + +### Features + +* add getL1BatchDetails method to js SDK ([#1666](https://github.com/matter-labs/zksync-2-dev/issues/1666)) ([babb8a9](https://github.com/matter-labs/zksync-2-dev/commit/babb8a94466a8f8c81a19391d61aa9ea66f9cfa8)) +* **sdk:** extend BlockDetails type to include l1BatchNumber ([#1677](https://github.com/matter-labs/zksync-2-dev/issues/1677)) ([67acf90](https://github.com/matter-labs/zksync-2-dev/commit/67acf90301e401004d41361b43f2d3336a48676e)) + ## [0.14.0](https://github.com/matter-labs/zksync-2-dev/compare/zksync-web3-v0.13.3...zksync-web3-v0.14.0) (2023-03-21) diff --git a/sdk/zksync-web3.js/abi/IL1Bridge.json b/sdk/zksync-web3.js/abi/IL1Bridge.json index f0cb575994f5..8908becfbaaf 100644 --- a/sdk/zksync-web3.js/abi/IL1Bridge.json +++ b/sdk/zksync-web3.js/abi/IL1Bridge.json @@ -28,6 +28,12 @@ { "anonymous": false, "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "l2DepositTxHash", + "type": "bytes32" + }, { "indexed": true, "internalType": "address", @@ -41,7 +47,7 @@ "type": "address" }, { - "indexed": true, + "indexed": false, "internalType": "address", "name": "l1Token", "type": "address" @@ -150,6 +156,11 @@ "internalType": "uint256", "name": "_l2TxGasPerPubdataByte", "type": "uint256" + }, + { + "internalType": "address", + "name": "_refundRecipient", + "type": "address" } ], "name": "deposit", @@ -220,6 +231,19 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "l2Bridge", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { diff --git a/sdk/zksync-web3.js/abi/IL2Bridge.json b/sdk/zksync-web3.js/abi/IL2Bridge.json index e62a099dcd59..d6ca3ce598fd 100644 --- a/sdk/zksync-web3.js/abi/IL2Bridge.json +++ b/sdk/zksync-web3.js/abi/IL2Bridge.json @@ -92,7 +92,7 @@ ], "name": "finalizeDeposit", "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "payable", "type": "function" }, { diff --git a/sdk/zksync-web3.js/abi/IZkSync.json b/sdk/zksync-web3.js/abi/IZkSync.json index 0196291d0185..b553291d6c29 100644 --- a/sdk/zksync-web3.js/abi/IZkSync.json +++ b/sdk/zksync-web3.js/abi/IZkSync.json @@ -176,6 +176,25 @@ "name": "IsPorterAvailableStatusUpdate", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "oldAllowList", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newAllowList", + "type": "address" + } + ], + "name": "NewAllowList", + "type": "event" + }, { "anonymous": false, "inputs": [ @@ -959,6 +978,19 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "getAllowList", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "getCurrentProposalId", @@ -1050,6 +1082,19 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "getPriorityTxMaxGasLimit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "getProposedUpgradeHash", @@ -1197,19 +1242,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "getpriorityTxMaxGasLimit", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [], "name": "isApprovedBySecurityCouncil", @@ -1825,6 +1857,19 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "contract IAllowList", + "name": "_newAllowList", + "type": "address" + } + ], + "name": "setAllowList", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { diff --git a/sdk/zksync-web3.js/package.json b/sdk/zksync-web3.js/package.json index 619c45bf676e..932e3938695d 100644 --- a/sdk/zksync-web3.js/package.json +++ b/sdk/zksync-web3.js/package.json @@ -1,6 +1,6 @@ { "name": "zksync-web3", - "version": "0.14.1", + "version": "0.15.1", "main": "build/src/index.js", "types": "build/src/index.d.ts", "files": [ @@ -10,7 +10,6 @@ "typechain/" ], "license": "MIT", - "dependencies": {}, "devDependencies": { "@types/chai": "^4.2.19", "@types/mocha": "^8.2.2", diff --git a/sdk/zksync-web3.js/src/adapters.ts b/sdk/zksync-web3.js/src/adapters.ts index c8b3db64f93b..ab7d68a53162 100644 --- a/sdk/zksync-web3.js/src/adapters.ts +++ b/sdk/zksync-web3.js/src/adapters.ts @@ -1,7 +1,15 @@ import { BigNumber, BigNumberish, BytesLike, ethers } from 'ethers'; import { IERC20MetadataFactory, IL1BridgeFactory, IL2BridgeFactory, IZkSyncFactory } from '../typechain'; import { Provider } from './provider'; -import { Address, BalancesMap, BlockTag, Eip712Meta, PriorityOpResponse, TransactionResponse } from './types'; +import { + Address, + BalancesMap, + BlockTag, + Eip712Meta, + FullDepositFee, + PriorityOpResponse, + TransactionResponse +} from './types'; import { BOOTLOADER_FORMAL_ADDRESS, checkBaseCost, @@ -12,7 +20,12 @@ import { L1_MESSENGER_ADDRESS, layer1TxDefaults, undoL1ToL2Alias, - estimateDefaultBridgeDepositL2Gas + estimateDefaultBridgeDepositL2Gas, + scaleGasLimit, + L1_RECOMMENDED_MIN_ETH_DEPOSIT_GAS_LIMIT, + L1_RECOMMENDED_MIN_ERC20_DEPOSIT_GAS_LIMIT, + estimateCustomBridgeDepositL2Gas, + getERC20DefaultBridgeData } from './utils'; type Constructor = new (...args: any[]) => T; @@ -56,6 +69,16 @@ export function AdapterL1>(Base: TBase) { } } + async getAllowanceL1( + token: Address, + bridgeAddress?: Address, + blockTag?: ethers.providers.BlockTag + ): Promise { + const erc20contract = IERC20MetadataFactory.connect(token, this._providerL1()); + bridgeAddress ??= (await this.getL1BridgeContracts()).erc20.address; + return await erc20contract.allowance(await this.getAddress(), bridgeAddress, { blockTag }); + } + async l2TokenAddress(token: Address) { if (token == ETH_ADDRESS) { return ETH_ADDRESS; @@ -83,6 +106,8 @@ export function AdapterL1>(Base: TBase) { delete overrides.bridgeAddress; } + overrides ??= {}; + return await erc20contract.approve(bridgeAddress, amount, overrides); } @@ -114,21 +139,44 @@ export function AdapterL1>(Base: TBase) { approveERC20?: boolean; l2GasLimit?: BigNumberish; gasPerPubdataByte?: BigNumberish; + refundRecipient?: Address; overrides?: ethers.PayableOverrides; approveOverrides?: ethers.Overrides; + customBridgeData?: BytesLike; }): Promise { const depositTx = await this.getDepositTx(transaction); + if (transaction.token == ETH_ADDRESS) { + const baseGasLimit = await this.estimateGasRequestExecute(depositTx); + const gasLimit = scaleGasLimit(baseGasLimit); + + depositTx.overrides ??= {}; + depositTx.overrides.gasLimit ??= gasLimit; + return this.requestExecute(depositTx); } else { const bridgeContracts = await this.getL1BridgeContracts(); if (transaction.approveERC20) { - const approveTx = await this.approveERC20(transaction.token, transaction.amount, { - bridgeAddress: transaction.bridgeAddress ?? bridgeContracts.erc20.address, - ...transaction.approveOverrides - }); - await approveTx.wait(); + const bridgeAddress = transaction.bridgeAddress + ? transaction.bridgeAddress + : bridgeContracts.erc20.address; + + // We only request the allowance if the current one is not enough. + const allowance = await this.getAllowanceL1(transaction.token, bridgeAddress); + if (allowance.lt(transaction.amount)) { + const approveTx = await this.approveERC20(transaction.token, transaction.amount, { + bridgeAddress, + ...transaction.approveOverrides + }); + await approveTx.wait(); + } } + + const baseGasLimit = await this._providerL1().estimateGas(depositTx); + const gasLimit = scaleGasLimit(baseGasLimit); + + depositTx.gasLimit ??= gasLimit; + return await this._providerL2().getPriorityOpResponse( await this._signerL1().sendTransaction(depositTx) ); @@ -141,16 +189,22 @@ export function AdapterL1>(Base: TBase) { to?: Address; operatorTip?: BigNumberish; bridgeAddress?: Address; + customBridgeData?: BytesLike; l2GasLimit?: BigNumberish; gasPerPubdataByte?: BigNumberish; + refundRecipient?: Address; overrides?: ethers.PayableOverrides; }): Promise { const depositTx = await this.getDepositTx(transaction); + + let baseGasLimit: BigNumber; if (transaction.token == ETH_ADDRESS) { - return await this.estimateGasRequestExecute(depositTx); + baseGasLimit = await this.estimateGasRequestExecute(depositTx); } else { - return await this._providerL1().estimateGas(depositTx); + baseGasLimit = await this._providerL1().estimateGas(depositTx); } + + return scaleGasLimit(baseGasLimit); } async getDepositTx(transaction: { @@ -161,11 +215,13 @@ export function AdapterL1>(Base: TBase) { bridgeAddress?: Address; l2GasLimit?: BigNumberish; gasPerPubdataByte?: BigNumberish; + customBridgeData?: BytesLike; + refundRecipient?: Address; overrides?: ethers.PayableOverrides; }): Promise { const bridgeContracts = await this.getL1BridgeContracts(); - if (transaction.bridgeAddress) { - bridgeContracts.erc20.attach(transaction.bridgeAddress); + if (transaction.bridgeAddress != null) { + bridgeContracts.erc20 = bridgeContracts.erc20.attach(transaction.bridgeAddress); } const { ...tx } = transaction; @@ -173,23 +229,43 @@ export function AdapterL1>(Base: TBase) { tx.operatorTip ??= BigNumber.from(0); tx.overrides ??= {}; tx.gasPerPubdataByte ??= REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; - tx.l2GasLimit ??= await estimateDefaultBridgeDepositL2Gas( - this._providerL1(), - this._providerL2(), - tx.token, - tx.amount, - tx.to, - await this.getAddress(), - tx.gasPerPubdataByte - ); + if (tx.bridgeAddress != null) { + const customBridgeData = + tx.customBridgeData ?? (await getERC20DefaultBridgeData(tx.token, this._providerL1())); + let bridge = IL1BridgeFactory.connect(tx.bridgeAddress, this._signerL1()); + let l2Address = await bridge.l2Bridge(); + tx.l2GasLimit ??= await estimateCustomBridgeDepositL2Gas( + this._providerL2(), + tx.bridgeAddress, + l2Address, + tx.token, + tx.amount, + tx.to, + customBridgeData, + await this.getAddress(), + tx.gasPerPubdataByte + ); + } else { + tx.l2GasLimit ??= await estimateDefaultBridgeDepositL2Gas( + this._providerL1(), + this._providerL2(), + tx.token, + tx.amount, + tx.to, + await this.getAddress(), + tx.gasPerPubdataByte + ); + } const { to, token, amount, operatorTip, overrides } = tx; - overrides.gasPrice ??= await this._providerL1().getGasPrice(); + + await insertGasPrice(this._providerL1(), overrides); + const gasPriceForEstimation = overrides.maxFeePerGas || overrides.gasPrice; const zksyncContract = await this.getMainContract(); const baseCost = await zksyncContract.l2TransactionBaseCost( - await overrides.gasPrice, + await gasPriceForEstimation, tx.l2GasLimit, tx.gasPerPubdataByte ); @@ -207,12 +283,14 @@ export function AdapterL1>(Base: TBase) { ...tx }; } else { - const args: [Address, Address, BigNumberish, BigNumberish, BigNumberish] = [ + let refundRecipient = tx.refundRecipient ?? ethers.constants.AddressZero; + const args: [Address, Address, BigNumberish, BigNumberish, BigNumberish, Address] = [ to, token, amount, tx.l2GasLimit, - tx.gasPerPubdataByte + tx.gasPerPubdataByte, + refundRecipient ]; overrides.value ??= baseCost.add(operatorTip); @@ -222,6 +300,125 @@ export function AdapterL1>(Base: TBase) { } } + // Retrieves the full needed ETH fee for the deposit. + // Returns the L1 fee and the L2 fee. + async getFullRequiredDepositFee(transaction: { + token: Address; + to?: Address; + bridgeAddress?: Address; + customBridgeData?: BytesLike; + gasPerPubdataByte?: BigNumberish; + overrides?: ethers.PayableOverrides; + }): Promise { + // It is assumed that the L2 fee for the transaction does not depend on its value. + const dummyAmount = '1'; + + const { ...tx } = transaction; + const zksyncContract = await this.getMainContract(); + + tx.overrides ??= {}; + await insertGasPrice(this._providerL1(), tx.overrides); + const gasPriceForMessages = (await tx.overrides.maxFeePerGas) || (await tx.overrides.gasPrice); + + tx.to ??= await this.getAddress(); + tx.gasPerPubdataByte ??= REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; + + let l2GasLimit; + if (tx.bridgeAddress != null) { + const customBridgeData = + tx.customBridgeData ?? (await getERC20DefaultBridgeData(tx.token, this._providerL1())); + let bridge = IL1BridgeFactory.connect(tx.bridgeAddress, this._signerL1()); + let l2Address = await bridge.l2Bridge(); + l2GasLimit ??= await estimateCustomBridgeDepositL2Gas( + this._providerL2(), + tx.bridgeAddress, + l2Address, + tx.token, + dummyAmount, + tx.to, + customBridgeData, + await this.getAddress(), + tx.gasPerPubdataByte + ); + } else { + l2GasLimit ??= await estimateDefaultBridgeDepositL2Gas( + this._providerL1(), + this._providerL2(), + tx.token, + dummyAmount, + tx.to, + await this.getAddress(), + tx.gasPerPubdataByte + ); + } + + const baseCost = await zksyncContract.l2TransactionBaseCost( + gasPriceForMessages, + l2GasLimit, + tx.gasPerPubdataByte + ); + + const selfBalanceETH = await this.getBalanceL1(); + + // We could 0 in, because the final fee will anyway be bigger than + if (baseCost.gte(selfBalanceETH.add(dummyAmount))) { + const recommendedETHBalance = BigNumber.from( + tx.token == ETH_ADDRESS + ? L1_RECOMMENDED_MIN_ETH_DEPOSIT_GAS_LIMIT + : L1_RECOMMENDED_MIN_ERC20_DEPOSIT_GAS_LIMIT + ) + .mul(gasPriceForMessages) + .add(baseCost); + const formattedRecommendedBalance = ethers.utils.formatEther(recommendedETHBalance); + throw new Error( + `Not enough balance for deposit. Under the provided gas price, the recommended balance to perform a deposit is ${formattedRecommendedBalance} ETH` + ); + } + + // For ETH token the value that the user passes to the estimation is the one which has the + // value for the L2 comission substracted. + let amountForEstimate: BigNumber; + if (isETH(tx.token)) { + amountForEstimate = BigNumber.from(dummyAmount); + } else { + amountForEstimate = BigNumber.from(dummyAmount); + + if ((await this.getAllowanceL1(tx.token)) < amountForEstimate) { + throw new Error('Not enough allowance to cover the deposit'); + } + } + + // Deleting the explicit gas limits in the fee estimation + // in order to prevent the situation where the transaction + // fails because the user does not have enough balance + const estimationOverrides = { ...tx.overrides }; + delete estimationOverrides.gasPrice; + delete estimationOverrides.maxFeePerGas; + delete estimationOverrides.maxPriorityFeePerGas; + + const l1GasLimit = await this.estimateGasDeposit({ + ...tx, + amount: amountForEstimate, + overrides: estimationOverrides, + l2GasLimit + }); + + const fullCost: FullDepositFee = { + baseCost, + l1GasLimit, + l2GasLimit + }; + + if (tx.overrides.gasPrice) { + fullCost.gasPrice = BigNumber.from(await tx.overrides.gasPrice); + } else { + fullCost.maxFeePerGas = BigNumber.from(await tx.overrides.maxFeePerGas); + fullCost.maxPriorityFeePerGas = BigNumber.from(await tx.overrides.maxPriorityFeePerGas); + } + + return fullCost; + } + async _getWithdrawalLog(withdrawalHash: BytesLike, index: number = 0) { const hash = ethers.utils.hexlify(withdrawalHash); const receipt = await this._providerL2().getTransactionReceipt(hash); @@ -380,6 +577,11 @@ export function AdapterL1>(Base: TBase) { overrides?: ethers.PayableOverrides; }): Promise { const requestExecuteTx = await this.getRequestExecuteTx(transaction); + + delete requestExecuteTx.gasPrice; + delete requestExecuteTx.maxFeePerGas; + delete requestExecuteTx.maxPriorityFeePerGas; + return this._providerL1().estimateGas(requestExecuteTx); } @@ -416,10 +618,12 @@ export function AdapterL1>(Base: TBase) { gasPerPubdataByte, refundRecipient } = tx; - overrides.gasPrice ??= await this._providerL1().getGasPrice(); + + await insertGasPrice(this._providerL1(), overrides); + const gasPriceForEstimation = overrides.maxFeePerGas || overrides.gasPrice; const baseCost = await this.getBaseCost({ - gasPrice: await overrides.gasPrice, + gasPrice: await gasPriceForEstimation, gasPerPubdataByte, gasLimit: l2GasLimit }); @@ -503,3 +707,21 @@ export function AdapterL2>(Base: TBase) { } }; } + +/// @dev This method checks if the overrides contain a gasPrice (or maxFeePerGas), if not it will insert +/// the maxFeePerGas +async function insertGasPrice(l1Provider: ethers.providers.Provider, overrides: ethers.PayableOverrides) { + if (!overrides.gasPrice && !overrides.maxFeePerGas) { + const l1FeeData = await l1Provider.getFeeData(); + + // Sometimes baseFeePerGas is not available, so we use gasPrice instead. + const baseFee = l1FeeData.lastBaseFeePerGas || l1FeeData.gasPrice; + + // ethers.js by default uses multiplcation by 2, but since the price for the L2 part + // will depend on the L1 part, doubling base fee is typically too much. + const maxFeePerGas = baseFee.mul(3).div(2).add(l1FeeData.maxPriorityFeePerGas); + + overrides.maxFeePerGas = maxFeePerGas; + overrides.maxPriorityFeePerGas = l1FeeData.maxPriorityFeePerGas; + } +} diff --git a/sdk/zksync-web3.js/src/index.ts b/sdk/zksync-web3.js/src/index.ts index 600a4d21070d..add020d90efd 100644 --- a/sdk/zksync-web3.js/src/index.ts +++ b/sdk/zksync-web3.js/src/index.ts @@ -1,6 +1,6 @@ export * as utils from './utils'; export * as types from './types'; -export { EIP712Signer, Signer, L1Signer } from './signer'; +export { EIP712Signer, Signer, L1Signer, L1VoidSigner, L2VoidSigner } from './signer'; export { Wallet } from './wallet'; export { Web3Provider, Provider } from './provider'; export { ContractFactory, Contract } from './contract'; diff --git a/sdk/zksync-web3.js/src/provider.ts b/sdk/zksync-web3.js/src/provider.ts index fd6ce694f18c..ff9dc6566bcc 100644 --- a/sdk/zksync-web3.js/src/provider.ts +++ b/sdk/zksync-web3.js/src/provider.ts @@ -19,6 +19,7 @@ import { BlockWithTransactions, Log, TransactionDetails, + BatchDetails, BlockDetails, ContractAccountInfo } from './types'; @@ -361,6 +362,10 @@ export class Provider extends ethers.providers.JsonRpcProvider { return BigNumber.from(number).toNumber(); } + async getL1BatchDetails(number: number): Promise { + return await this.send('zks_getL1BatchDetails', [number]); + } + async getBlockDetails(number: number): Promise { return await this.send('zks_getBlockDetails', [number]); } diff --git a/sdk/zksync-web3.js/src/signer.ts b/sdk/zksync-web3.js/src/signer.ts index 442cabc6a1a9..db04ef77bb68 100644 --- a/sdk/zksync-web3.js/src/signer.ts +++ b/sdk/zksync-web3.js/src/signer.ts @@ -35,9 +35,9 @@ export class EIP712Signer { } static getSignInput(transaction: TransactionRequest) { - const maxFeePerGas = transaction.maxFeePerGas || transaction.gasPrice; - const maxPriorityFeePerGas = transaction.maxPriorityFeePerGas || maxFeePerGas; - const gasPerPubdataByteLimit = transaction.customData?.gasPerPubdata || DEFAULT_GAS_PER_PUBDATA_LIMIT; + const maxFeePerGas = transaction.maxFeePerGas ?? transaction.gasPrice ?? 0; + const maxPriorityFeePerGas = transaction.maxPriorityFeePerGas ?? maxFeePerGas; + const gasPerPubdataByteLimit = transaction.customData?.gasPerPubdata ?? DEFAULT_GAS_PER_PUBDATA_LIMIT; const signInput = { txType: transaction.type, from: transaction.from, @@ -168,3 +168,89 @@ export class L1Signer extends AdapterL1(ethers.providers.JsonRpcSigner) { return this; } } + +export class L2VoidSigner extends AdapterL2(ethers.VoidSigner) { + public override provider: Provider; + public eip712: EIP712Signer; + + override _signerL2() { + return this; + } + + override _providerL2() { + return this.provider; + } + + static from(signer: ethers.VoidSigner & { provider: Provider }): L2VoidSigner { + const newSigner: L2VoidSigner = Object.setPrototypeOf(signer, L2VoidSigner.prototype); + // @ts-ignore + newSigner.eip712 = new EIP712Signer(newSigner, newSigner.getChainId()); + return newSigner; + } + + // an alias with a better name + async getNonce(blockTag?: BlockTag) { + return await this.getTransactionCount(blockTag); + } + + override async sendTransaction(transaction: TransactionRequest): Promise { + if (transaction.customData == null && transaction.type == null) { + // use legacy txs by default + transaction.type = 0; + } + if (transaction.customData == null && transaction.type != EIP712_TX_TYPE) { + return (await super.sendTransaction(transaction)) as TransactionResponse; + } else { + const address = await this.getAddress(); + transaction.from ??= address; + if (transaction.from.toLowerCase() != address.toLowerCase()) { + throw new Error('Transaction `from` address mismatch'); + } + transaction.type = EIP712_TX_TYPE; + transaction.value ??= 0; + transaction.data ??= '0x'; + transaction.nonce ??= await this.getNonce(); + transaction.customData = this._fillCustomData(transaction.customData); + transaction.gasPrice ??= await this.provider.getGasPrice(); + transaction.gasLimit ??= await this.provider.estimateGas(transaction); + transaction.chainId ??= (await this.provider.getNetwork()).chainId; + transaction.customData.customSignature = await this.eip712.sign(transaction); + + const txBytes = serialize(transaction); + return await this.provider.sendTransaction(txBytes); + } + } +} + +// This class is to be used on the frontend with metamask injection. +// It only contains L1 operations. For L2 operations, see Signer. +// Sample usage: +// const provider = new ethers.Web3Provider(window.ethereum); +// const zksyncProvider = new zkweb3.Provider(''); +// const signer = zkweb3.L1Signer.from(provider.getSigner(), zksyncProvider); +// const tx = await signer.deposit({ ... }); +export class L1VoidSigner extends AdapterL1(ethers.VoidSigner) { + public providerL2: Provider; + override _providerL2() { + return this.providerL2; + } + + override _providerL1() { + return this.provider; + } + + override _signerL1() { + return this; + } + + static from(signer: ethers.VoidSigner, zksyncProvider: Provider): L1VoidSigner { + const newSigner: L1VoidSigner = Object.setPrototypeOf(signer, L1VoidSigner.prototype); + newSigner.providerL2 = zksyncProvider; + return newSigner; + } + + connectToL2(provider: Provider): this { + this.providerL2 = provider; + return this; + } +} diff --git a/sdk/zksync-web3.js/src/types.ts b/sdk/zksync-web3.js/src/types.ts index 8ae6ad9784e1..29dc2713d3f9 100644 --- a/sdk/zksync-web3.js/src/types.ts +++ b/sdk/zksync-web3.js/src/types.ts @@ -174,9 +174,27 @@ export interface ContractAccountInfo { nonceOrdering: AccountNonceOrdering; } +export interface BatchDetails { + number: number; + timestamp: number; + l1TxCount: number; + l2TxCount: number; + rootHash?: string; + status: string; + commitTxHash?: string; + committedAt?: Date; + proveTxHash?: string; + provenAt?: Date; + executeTxHash?: string; + executedAt?: Date; + l1GasPrice: number; + l2FairGasPrice: number; +} + export interface BlockDetails { number: number; timestamp: number; + l1BatchNumber: number; l1TxCount: number; l2TxCount: number; rootHash?: string; @@ -199,3 +217,12 @@ export interface TransactionDetails { ethProveTxHash?: string; ethExecuteTxHash?: string; } + +export interface FullDepositFee { + maxFeePerGas?: BigNumber; + maxPriorityFeePerGas?: BigNumber; + gasPrice?: BigNumber; + baseCost: BigNumber; + l1GasLimit: BigNumber; + l2GasLimit: BigNumber; +} diff --git a/sdk/zksync-web3.js/src/utils.ts b/sdk/zksync-web3.js/src/utils.ts index 599aecedb1e6..ca5ebce005b3 100644 --- a/sdk/zksync-web3.js/src/utils.ts +++ b/sdk/zksync-web3.js/src/utils.ts @@ -41,6 +41,15 @@ export const PRIORITY_OPERATION_L2_TX_TYPE = 0xff; export const MAX_BYTECODE_LEN_BYTES = ((1 << 16) - 1) * 32; +// Currently, for some reason the SDK may return slightly smaller L1 gas limit than required for initiating L1->L2 +// transaction. We use a coefficient to ensure that the transaction will be accepted. +export const L1_FEE_ESTIMATION_COEF_NUMERATOR = BigNumber.from(12); +export const L1_FEE_ESTIMATION_COEF_DENOMINATOR = BigNumber.from(10); + +// This gas limit will be used for displaying the error messages when the users do not have enough fee. +export const L1_RECOMMENDED_MIN_ERC20_DEPOSIT_GAS_LIMIT = 400000; +export const L1_RECOMMENDED_MIN_ETH_DEPOSIT_GAS_LIMIT = 200000; + // The large L2 gas per pubdata to sign. This gas is enough to ensure that // any reasonable limit will be accepted. Note, that the operator is NOT required to // use the honest value of gas per pubdata and it can use any value up to the one signed by the user. @@ -385,7 +394,10 @@ export function undoL1ToL2Alias(address: string): string { } /// Getters data used to correctly initialize the L1 token counterpart on L2 -async function getERC20GettersData(l1TokenAddress: string, provider: ethers.providers.Provider): Promise { +export async function getERC20DefaultBridgeData( + l1TokenAddress: string, + provider: ethers.providers.Provider +): Promise { const token = IERC20MetadataFactory.connect(l1TokenAddress, provider); const name = await token.name(); @@ -408,15 +420,14 @@ export async function getERC20BridgeCalldata( l1Sender: string, l2Receiver: string, amount: BigNumberish, - provider: ethers.providers.Provider + bridgeData: BytesLike ): Promise { - const gettersData = await getERC20GettersData(l1TokenAddress, provider); return L2_BRIDGE_ABI.encodeFunctionData('finalizeDeposit', [ l1Sender, l2Receiver, l1TokenAddress, amount, - gettersData + bridgeData ]); } @@ -523,14 +534,41 @@ export async function estimateDefaultBridgeDepositL2Gas( } else { const l1ERC20BridgeAddresses = (await providerL2.getDefaultBridgeAddresses()).erc20L1; const erc20BridgeAddress = (await providerL2.getDefaultBridgeAddresses()).erc20L2; + const bridgeData = await getERC20DefaultBridgeData(token, providerL1); + return await estimateCustomBridgeDepositL2Gas( + providerL2, + l1ERC20BridgeAddresses, + erc20BridgeAddress, + token, + amount, + to, + bridgeData, + from, + gasPerPubdataByte + ); + } +} - const calldata = await getERC20BridgeCalldata(token, from, to, amount, providerL1); +export function scaleGasLimit(gasLimit: BigNumber): BigNumber { + return gasLimit.mul(L1_FEE_ESTIMATION_COEF_NUMERATOR).div(L1_FEE_ESTIMATION_COEF_DENOMINATOR); +} - return await providerL2.estimateL1ToL2Execute({ - caller: applyL1ToL2Alias(l1ERC20BridgeAddresses), - contractAddress: erc20BridgeAddress, - gasPerPubdataByte: gasPerPubdataByte, - calldata: calldata - }); - } +export async function estimateCustomBridgeDepositL2Gas( + providerL2: Provider, + l1BridgeAddress: Address, + l2BridgeAddress: Address, + token: Address, + amount: BigNumberish, + to: Address, + bridgeData: BytesLike, + from?: Address, + gasPerPubdataByte?: BigNumberish +): Promise { + const calldata = await getERC20BridgeCalldata(token, from, to, amount, bridgeData); + return await providerL2.estimateL1ToL2Execute({ + caller: applyL1ToL2Alias(l1BridgeAddress), + contractAddress: l2BridgeAddress, + gasPerPubdataByte: gasPerPubdataByte, + calldata: calldata + }); } diff --git a/sdk/zksync-web3.js/typechain/IL1Bridge.d.ts b/sdk/zksync-web3.js/typechain/IL1Bridge.d.ts index e770ca81975f..2a1e6e4143d2 100644 --- a/sdk/zksync-web3.js/typechain/IL1Bridge.d.ts +++ b/sdk/zksync-web3.js/typechain/IL1Bridge.d.ts @@ -24,62 +24,65 @@ import { FunctionFragment, EventFragment, Result } from "@ethersproject/abi"; interface IL1BridgeInterface extends ethers.utils.Interface { functions: { "claimFailedDeposit(address,address,bytes32,uint256,uint256,uint16,bytes32[])": FunctionFragment; - "deposit(address,address,uint256,uint256,uint256)": FunctionFragment; + "deposit(address,address,uint256,uint256,uint256,address)": FunctionFragment; "finalizeWithdrawal(uint256,uint256,uint16,bytes,bytes32[])": FunctionFragment; "isWithdrawalFinalized(uint256,uint256)": FunctionFragment; + "l2Bridge()": FunctionFragment; "l2TokenAddress(address)": FunctionFragment; }; encodeFunctionData( - functionFragment: "claimFailedDeposit", - values: [ - string, - string, - BytesLike, - BigNumberish, - BigNumberish, - BigNumberish, - BytesLike[] - ] + functionFragment: "claimFailedDeposit", + values: [ + string, + string, + BytesLike, + BigNumberish, + BigNumberish, + BigNumberish, + BytesLike[] + ] ): string; encodeFunctionData( - functionFragment: "deposit", - values: [string, string, BigNumberish, BigNumberish, BigNumberish] + functionFragment: "deposit", + values: [string, string, BigNumberish, BigNumberish, BigNumberish, string] ): string; encodeFunctionData( - functionFragment: "finalizeWithdrawal", - values: [BigNumberish, BigNumberish, BigNumberish, BytesLike, BytesLike[]] + functionFragment: "finalizeWithdrawal", + values: [BigNumberish, BigNumberish, BigNumberish, BytesLike, BytesLike[]] ): string; encodeFunctionData( - functionFragment: "isWithdrawalFinalized", - values: [BigNumberish, BigNumberish] + functionFragment: "isWithdrawalFinalized", + values: [BigNumberish, BigNumberish] ): string; + encodeFunctionData(functionFragment: "l2Bridge", values?: undefined): string; encodeFunctionData( - functionFragment: "l2TokenAddress", - values: [string] + functionFragment: "l2TokenAddress", + values: [string] ): string; decodeFunctionResult( - functionFragment: "claimFailedDeposit", - data: BytesLike + functionFragment: "claimFailedDeposit", + data: BytesLike ): Result; decodeFunctionResult(functionFragment: "deposit", data: BytesLike): Result; decodeFunctionResult( - functionFragment: "finalizeWithdrawal", - data: BytesLike + functionFragment: "finalizeWithdrawal", + data: BytesLike ): Result; decodeFunctionResult( - functionFragment: "isWithdrawalFinalized", - data: BytesLike + functionFragment: "isWithdrawalFinalized", + data: BytesLike ): Result; + decodeFunctionResult(functionFragment: "l2Bridge", data: BytesLike): Result; decodeFunctionResult( - functionFragment: "l2TokenAddress", - data: BytesLike + functionFragment: "l2TokenAddress", + data: BytesLike ): Result; events: { "ClaimedFailedDeposit(address,address,uint256)": EventFragment; - "DepositInitiated(address,address,address,uint256)": EventFragment; + "DepositInitiated(bytes32,address,address,address,uint256)": EventFragment; "WithdrawalFinalized(address,address,uint256)": EventFragment; }; @@ -103,435 +106,470 @@ export class IL1Bridge extends Contract { functions: { claimFailedDeposit( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: Overrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; "claimFailedDeposit(address,address,bytes32,uint256,uint256,uint16,bytes32[])"( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: Overrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; deposit( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: PayableOverrides + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: PayableOverrides ): Promise; - "deposit(address,address,uint256,uint256,uint256)"( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: PayableOverrides + "deposit(address,address,uint256,uint256,uint256,address)"( + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: PayableOverrides ): Promise; finalizeWithdrawal( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: Overrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; "finalizeWithdrawal(uint256,uint256,uint16,bytes,bytes32[])"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: Overrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; isWithdrawalFinalized( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise<{ 0: boolean; }>; "isWithdrawalFinalized(uint256,uint256)"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise<{ 0: boolean; }>; + l2Bridge(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + "l2Bridge()"(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + l2TokenAddress( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise<{ 0: string; }>; "l2TokenAddress(address)"( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise<{ 0: string; }>; }; claimFailedDeposit( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: Overrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; "claimFailedDeposit(address,address,bytes32,uint256,uint256,uint16,bytes32[])"( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: Overrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; deposit( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: PayableOverrides + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: PayableOverrides ): Promise; - "deposit(address,address,uint256,uint256,uint256)"( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: PayableOverrides + "deposit(address,address,uint256,uint256,uint256,address)"( + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: PayableOverrides ): Promise; finalizeWithdrawal( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: Overrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; "finalizeWithdrawal(uint256,uint256,uint16,bytes,bytes32[])"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: Overrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; isWithdrawalFinalized( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise; "isWithdrawalFinalized(uint256,uint256)"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise; + l2Bridge(overrides?: CallOverrides): Promise; + + "l2Bridge()"(overrides?: CallOverrides): Promise; + l2TokenAddress(_l1Token: string, overrides?: CallOverrides): Promise; "l2TokenAddress(address)"( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise; callStatic: { claimFailedDeposit( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: CallOverrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: CallOverrides ): Promise; "claimFailedDeposit(address,address,bytes32,uint256,uint256,uint16,bytes32[])"( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: CallOverrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: CallOverrides ): Promise; deposit( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: CallOverrides + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: CallOverrides ): Promise; - "deposit(address,address,uint256,uint256,uint256)"( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: CallOverrides + "deposit(address,address,uint256,uint256,uint256,address)"( + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: CallOverrides ): Promise; finalizeWithdrawal( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: CallOverrides ): Promise; "finalizeWithdrawal(uint256,uint256,uint16,bytes,bytes32[])"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: CallOverrides ): Promise; isWithdrawalFinalized( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise; "isWithdrawalFinalized(uint256,uint256)"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise; + l2Bridge(overrides?: CallOverrides): Promise; + + "l2Bridge()"(overrides?: CallOverrides): Promise; + l2TokenAddress( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise; "l2TokenAddress(address)"( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise; }; filters: { ClaimedFailedDeposit( - to: string | null, - l1Token: string | null, - amount: null + to: string | null, + l1Token: string | null, + amount: null ): EventFilter; DepositInitiated( - from: string | null, - to: string | null, - l1Token: string | null, - amount: null + l2DepositTxHash: BytesLike | null, + from: string | null, + to: string | null, + l1Token: null, + amount: null ): EventFilter; WithdrawalFinalized( - to: string | null, - l1Token: string | null, - amount: null + to: string | null, + l1Token: string | null, + amount: null ): EventFilter; }; estimateGas: { claimFailedDeposit( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: Overrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; "claimFailedDeposit(address,address,bytes32,uint256,uint256,uint16,bytes32[])"( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: Overrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; deposit( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: PayableOverrides + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: PayableOverrides ): Promise; - "deposit(address,address,uint256,uint256,uint256)"( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: PayableOverrides + "deposit(address,address,uint256,uint256,uint256,address)"( + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: PayableOverrides ): Promise; finalizeWithdrawal( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: Overrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; "finalizeWithdrawal(uint256,uint256,uint16,bytes,bytes32[])"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: Overrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; isWithdrawalFinalized( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise; "isWithdrawalFinalized(uint256,uint256)"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise; + l2Bridge(overrides?: CallOverrides): Promise; + + "l2Bridge()"(overrides?: CallOverrides): Promise; + l2TokenAddress( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise; "l2TokenAddress(address)"( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise; }; populateTransaction: { claimFailedDeposit( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: Overrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; "claimFailedDeposit(address,address,bytes32,uint256,uint256,uint16,bytes32[])"( - _depositSender: string, - _l1Token: string, - _l2TxHash: BytesLike, - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _merkleProof: BytesLike[], - overrides?: Overrides + _depositSender: string, + _l1Token: string, + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; deposit( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: PayableOverrides + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: PayableOverrides ): Promise; - "deposit(address,address,uint256,uint256,uint256)"( - _l2Receiver: string, - _l1Token: string, - _amount: BigNumberish, - _l2TxGasLimit: BigNumberish, - _l2TxGasPerPubdataByte: BigNumberish, - overrides?: PayableOverrides + "deposit(address,address,uint256,uint256,uint256,address)"( + _l2Receiver: string, + _l1Token: string, + _amount: BigNumberish, + _l2TxGasLimit: BigNumberish, + _l2TxGasPerPubdataByte: BigNumberish, + _refundRecipient: string, + overrides?: PayableOverrides ): Promise; finalizeWithdrawal( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: Overrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; "finalizeWithdrawal(uint256,uint256,uint16,bytes,bytes32[])"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - _l2TxNumberInBlock: BigNumberish, - _message: BytesLike, - _merkleProof: BytesLike[], - overrides?: Overrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides ): Promise; isWithdrawalFinalized( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise; "isWithdrawalFinalized(uint256,uint256)"( - _l2BlockNumber: BigNumberish, - _l2MessageIndex: BigNumberish, - overrides?: CallOverrides + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides ): Promise; + l2Bridge(overrides?: CallOverrides): Promise; + + "l2Bridge()"(overrides?: CallOverrides): Promise; + l2TokenAddress( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise; "l2TokenAddress(address)"( - _l1Token: string, - overrides?: CallOverrides + _l1Token: string, + overrides?: CallOverrides ): Promise; }; -} \ No newline at end of file +} diff --git a/sdk/zksync-web3.js/typechain/IL1BridgeFactory.ts b/sdk/zksync-web3.js/typechain/IL1BridgeFactory.ts index 425118662f2d..091a994136ac 100644 --- a/sdk/zksync-web3.js/typechain/IL1BridgeFactory.ts +++ b/sdk/zksync-web3.js/typechain/IL1BridgeFactory.ts @@ -9,8 +9,8 @@ import type { IL1Bridge } from "./IL1Bridge"; export class IL1BridgeFactory { static connect( - address: string, - signerOrProvider: Signer | Provider + address: string, + signerOrProvider: Signer | Provider ): IL1Bridge { return new Contract(address, _abi, signerOrProvider) as IL1Bridge; } @@ -45,6 +45,12 @@ const _abi = [ { anonymous: false, inputs: [ + { + indexed: true, + internalType: "bytes32", + name: "l2DepositTxHash", + type: "bytes32", + }, { indexed: true, internalType: "address", @@ -58,7 +64,7 @@ const _abi = [ type: "address", }, { - indexed: true, + indexed: false, internalType: "address", name: "l1Token", type: "address", @@ -168,6 +174,11 @@ const _abi = [ name: "_l2TxGasPerPubdataByte", type: "uint256", }, + { + internalType: "address", + name: "_refundRecipient", + type: "address", + }, ], name: "deposit", outputs: [ @@ -237,6 +248,19 @@ const _abi = [ stateMutability: "view", type: "function", }, + { + inputs: [], + name: "l2Bridge", + outputs: [ + { + internalType: "address", + name: "", + type: "address", + }, + ], + stateMutability: "view", + type: "function", + }, { inputs: [ { diff --git a/yarn.lock b/yarn.lock index 69efb2766e4b..f5080ce2e28c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -435,7 +435,7 @@ "@ethersproject/properties" "^5.5.0" "@ethersproject/strings" "^5.5.0" -"@ethersproject/abi@5.7.0", "@ethersproject/abi@^5.1.2", "@ethersproject/abi@^5.5.0", "@ethersproject/abi@^5.7.0": +"@ethersproject/abi@5.7.0", "@ethersproject/abi@^5.0.0-beta.146", "@ethersproject/abi@^5.0.9", "@ethersproject/abi@^5.1.2", "@ethersproject/abi@^5.5.0", "@ethersproject/abi@^5.7.0": version "5.7.0" resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.7.0.tgz#b3f3e045bbbeed1af3947335c247ad625a44e449" integrity sha512-351ktp42TiRcYB3H1OP8yajPeAQstMW/yCFokj/AthP9bLHzQFPlOrxOcwYEDkUAICmOHljvN4K39OMTMUa9RA== @@ -1377,10 +1377,17 @@ "@jridgewell/resolve-uri" "3.1.0" "@jridgewell/sourcemap-codec" "1.4.14" -"@matterlabs/hardhat-zksync-solc@^0.3.14-beta.3": - version "0.3.14-beta.3" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.3.14-beta.3.tgz#b80275666459d8a047480d223c1098075b790170" - integrity sha512-H7MqJ4QXDgCvTYPWTJGjIJ71IGShT450SiSKKS3Vz8qbJNJusv7KKDsIDe2urwCTwLasSxRXk+Z+cEf03TnR8A== +"@matterlabs/hardhat-zksync-deploy@^0.6.1": + version "0.6.3" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-0.6.3.tgz#833b208373e7037bf43671054328d82511444e2a" + integrity sha512-FB+2xFL/80JJwlGna+aHA6dk4ONrMFqThTZATYVJUAKooA0Aw5qmpmM8B3qsNB4LLzHSO/EmVrHIcLaPv8hYwQ== + dependencies: + chalk "4.1.2" + +"@matterlabs/hardhat-zksync-solc@^0.3.15": + version "0.3.16" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.3.16.tgz#dd8ed44f1a580f282794a15fee995f418b040158" + integrity sha512-gw46yyiCfj49I/nbUcOlnF5xE80WyeW/i8i9ouHom4KWJNt1kioQIwOPkN7aJURhXpJJxKSdeWBrQHLWTZDnTA== dependencies: "@nomiclabs/hardhat-docker" "^2.0.0" chalk "4.1.2" @@ -1855,7 +1862,7 @@ dependencies: "@sinonjs/commons" "^1.7.0" -"@solidity-parser/parser@^0.14.1", "@solidity-parser/parser@^0.14.2": +"@solidity-parser/parser@^0.14.0", "@solidity-parser/parser@^0.14.1", "@solidity-parser/parser@^0.14.2": version "0.14.5" resolved "https://registry.yarnpkg.com/@solidity-parser/parser/-/parser-0.14.5.tgz#87bc3cc7b068e08195c219c91cd8ddff5ef1a804" integrity sha512-6dKnHZn7fg/iQATVEzqyUOyEidbn05q7YA2mQ9hC0MMXhhV3/JrsxmFSYZAcr7j1yUP700LLhTruvJ3MiQmjJg== @@ -1987,11 +1994,33 @@ resolved "https://registry.yarnpkg.com/@types/chai/-/chai-4.3.3.tgz#3c90752792660c4b562ad73b3fbd68bf3bc7ae07" integrity sha512-hC7OMnszpxhZPduX+m+nrx+uFoLkWOMiR4oa/AZF3MuSETYTZmFfJAHqZEM8MVlvfG7BEUcgvtwoCTxBp6hm3g== +"@types/concat-stream@^1.6.0": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@types/concat-stream/-/concat-stream-1.6.1.tgz#24bcfc101ecf68e886aaedce60dfd74b632a1b74" + integrity sha512-eHE4cQPoj6ngxBZMvVf6Hw7Mh4jMW4U9lpGmS5GBPB9RYxlFg+CHaVN7ErNY4W9XfLIEn20b4VDYaIrbq0q4uA== + dependencies: + "@types/node" "*" + "@types/deep-extend@^0.4.31": version "0.4.32" resolved "https://registry.yarnpkg.com/@types/deep-extend/-/deep-extend-0.4.32.tgz#0af51fffde55cb168e8d68f8236908c2cdfe7419" integrity sha512-7/pcMJr5I5OnpWTTfv0o3fJ9+f36EqoQa27/oJlbfvfZAMMrPyU5/+AUC+5OOtTEKdyoW4lAeIBYHtodtEdNUA== +"@types/form-data@0.0.33": + version "0.0.33" + resolved "https://registry.yarnpkg.com/@types/form-data/-/form-data-0.0.33.tgz#c9ac85b2a5fd18435b8c85d9ecb50e6d6c893ff8" + integrity sha512-8BSvG1kGm83cyJITQMZSulnl6QV8jqAGreJsc5tPu1Jq0vTSOiY/k24Wx82JRpWwZSqrala6sd5rWi6aNXvqcw== + dependencies: + "@types/node" "*" + +"@types/glob@^7.1.1": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.2.0.tgz#bc1b5bf3aa92f25bd5dd39f35c57361bdce5b2eb" + integrity sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA== + dependencies: + "@types/minimatch" "*" + "@types/node" "*" + "@types/graceful-fs@^4.1.3": version "4.1.5" resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.5.tgz#21ffba0d98da4350db64891f92a9e5db3cdb4e15" @@ -2048,6 +2077,11 @@ resolved "https://registry.yarnpkg.com/@types/lru-cache/-/lru-cache-5.1.1.tgz#c48c2e27b65d2a153b19bfc1a317e30872e01eef" integrity sha512-ssE3Vlrys7sdIzs5LOxCzTVMsU7i9oa/IaW92wF32JFb3CVczqOkru2xspuKczHEbG3nvmPY7IFqVmGGHdNbYw== +"@types/minimatch@*": + version "5.1.2" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-5.1.2.tgz#07508b45797cb81ec3f273011b054cd0755eddca" + integrity sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA== + "@types/mkdirp@^0.5.2": version "0.5.2" resolved "https://registry.yarnpkg.com/@types/mkdirp/-/mkdirp-0.5.2.tgz#503aacfe5cc2703d5484326b1b27efa67a339c1f" @@ -2085,6 +2119,11 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.9.tgz#02d013de7058cea16d36168ef2fc653464cfbad4" integrity sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg== +"@types/node@^10.0.3": + version "10.17.60" + resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.60.tgz#35f3d6213daed95da7f0f73e75bcc6980e90597b" + integrity sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw== + "@types/node@^12.12.6": version "12.20.55" resolved "https://registry.yarnpkg.com/@types/node/-/node-12.20.55.tgz#c329cbd434c42164f846b909bd6f85b5537f6240" @@ -2100,6 +2139,11 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-16.18.3.tgz#d7f7ba828ad9e540270f01ce00d391c54e6e0abc" integrity sha512-jh6m0QUhIRcZpNv7Z/rpN+ZWXOicUUQbSoWks7Htkbb9IjFQj4kzcX/xFCkjstCj5flMsN8FiSvt+q+Tcs4Llg== +"@types/node@^8.0.0": + version "8.10.66" + resolved "https://registry.yarnpkg.com/@types/node/-/node-8.10.66.tgz#dd035d409df322acc83dff62a602f12a5783bbb3" + integrity sha512-tktOkFUA4kXx2hhhrB8bIFb5TbwzS4uOhKEmwiD+NoiL0qtP2OQ9mFldbgD4dV1djrlBYP6eBuQZiWjuHUpqFw== + "@types/pbkdf2@^3.0.0": version "3.1.0" resolved "https://registry.yarnpkg.com/@types/pbkdf2/-/pbkdf2-3.1.0.tgz#039a0e9b67da0cdc4ee5dab865caa6b267bb66b1" @@ -2117,6 +2161,11 @@ resolved "https://registry.yarnpkg.com/@types/prettier/-/prettier-2.7.1.tgz#dfd20e2dc35f027cdd6c1908e80a5ddc7499670e" integrity sha512-ri0UmynRRvZiiUJdiz38MmIblKK+oH30MztdBVR95dv/Ubw6neWSb8u1XpRb72L4qsZOhz+L+z9JD40SJmfWow== +"@types/qs@^6.2.31": + version "6.9.7" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.7.tgz#63bb7d067db107cc1e457c303bc25d511febf6cb" + integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw== + "@types/resolve@^0.0.8": version "0.0.8" resolved "https://registry.yarnpkg.com/@types/resolve/-/resolve-0.0.8.tgz#f26074d238e02659e323ce1a13d041eee280e194" @@ -2283,6 +2332,16 @@ JSONStream@1.3.2: jsonparse "^1.2.0" through ">=2.2.7 <3" +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== + +abbrev@1.0.x: + version "1.0.9" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135" + integrity sha512-LEyx4aLEC3x6T0UguF6YILf+ntvmOaWsVfENmIW0E9H09vKlLDGelMjjSm0jkDHALj8A8quZ/HapKNigzwge+Q== + abort-controller@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" @@ -2364,6 +2423,11 @@ acorn@^8.4.1: resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.1.tgz#0a3f9cbecc4ec3bea6f0a80b66ae8dd2da250b73" integrity sha512-7zFpHzhnqYKrkYdUjF1HI1bzd0VygEGX8lFk4k5zVMqHEoES+P+7TKI+EvLO9WVMJ8eekdO0aDEK044xTXwPPA== +address@^1.0.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/address/-/address-1.2.2.tgz#2b5248dac5485a6390532c6a517fda2e3faac89e" + integrity sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA== + adm-zip@^0.4.16: version "0.4.16" resolved "https://registry.yarnpkg.com/adm-zip/-/adm-zip-0.4.16.tgz#cf4c508fdffab02c269cbc7f471a875f05570365" @@ -2414,6 +2478,16 @@ ajv@^8.0.1: require-from-string "^2.0.2" uri-js "^4.2.2" +amdefine@>=0.0.4: + version "1.0.1" + resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" + integrity sha512-S2Hw0TtNkMJhIabBwIojKL9YHO5T0n5eNqWJ7Lrlel/zDbftQpxpapi8tZs3X1HWa+u+QeydGmzzNU0m09+Rcg== + +ansi-colors@3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.3.tgz#57d35b8686e851e2cc04c403f1c00203976a1813" + integrity sha512-LEHHyuhlPY3TmuUYMh2oz89lTShfvgbmzaBcxve9t/9Wuy7Dwf4yoAKcND7KFT1HAQfqZ12qtc+DUrBMeKF9nw== + ansi-colors@4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.1.tgz#cbb9ae256bf750af1eab344f229aa27fe94ba348" @@ -2508,6 +2582,14 @@ anymatch@^3.0.3, anymatch@~3.1.2: normalize-path "^3.0.0" picomatch "^2.0.4" +anymatch@~3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" + integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw== + dependencies: + normalize-path "^3.0.0" + picomatch "^2.0.4" + arg@^4.1.0: version "4.1.3" resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" @@ -2564,6 +2646,11 @@ array-union@^2.1.0: resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== +array-uniq@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + integrity sha512-MNha4BWQ6JbwhFhj03YK552f7cb3AzoE8SzeljgChvL1dl3IcvggXVz1DilzySZkCja+CXuZbdW7yATchWn8/Q== + array-unique@^0.3.2: version "0.3.2" resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" @@ -2580,6 +2667,11 @@ array.prototype.reduce@^1.0.5: es-array-method-boxes-properly "^1.0.0" is-string "^1.0.7" +asap@~2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" + integrity sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA== + asn1.js@^5.2.0: version "5.4.1" resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-5.4.1.tgz#11a980b84ebb91781ce35b0fdc2ee294e3783f07" @@ -2639,6 +2731,11 @@ async-limiter@~1.0.0: resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd" integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== +async@1.x, async@^1.4.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" + integrity sha512-nSVgobk4rv61R9PUSDtYt7mPVB2olxNR5RWJcAsH676/ef11bUZwvu7+RGYrYauVdDPcO519v68wRhXQtxsV9w== + async@2.6.2: version "2.6.2" resolved "https://registry.yarnpkg.com/async/-/async-2.6.2.tgz#18330ea7e6e313887f5d2f2a904bac6fe4dd5381" @@ -2646,11 +2743,6 @@ async@2.6.2: dependencies: lodash "^4.17.11" -async@^1.4.2: - version "1.5.2" - resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" - integrity sha512-nSVgobk4rv61R9PUSDtYt7mPVB2olxNR5RWJcAsH676/ef11bUZwvu7+RGYrYauVdDPcO519v68wRhXQtxsV9w== - async@^2.0.1, async@^2.1.2, async@^2.4.0, async@^2.5.0, async@^2.6.1: version "2.6.4" resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221" @@ -3800,7 +3892,7 @@ camelcase@^3.0.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a" integrity sha512-4nhGqUkc4BqbBBB4Q6zLuD7lzzrHYrjKGeYaEji/3tFR5VdJu9v+LilhGIVe8wxEJPPOeWo7eg8dwY13TZ1BNg== -camelcase@^5.3.1: +camelcase@^5.0.0, camelcase@^5.3.1: version "5.3.1" resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== @@ -3820,7 +3912,7 @@ caniuse-lite@^1.0.30001400: resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001429.tgz#70cdae959096756a85713b36dd9cb82e62325639" integrity sha512-511ThLu1hF+5RRRt0zYCf2U2yRr9GPF6m5y90SBCWsvSoYoW7yAGlv/elyPaNfvGCkp6kj/KFZWU0BMA69Prsg== -caseless@~0.12.0: +caseless@^0.12.0, caseless@~0.12.0: version "0.12.0" resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" integrity sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw== @@ -3896,6 +3988,11 @@ chardet@^0.7.0: resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== +"charenc@>= 0.0.1": + version "0.0.2" + resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667" + integrity sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA== + check-error@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/check-error/-/check-error-1.0.2.tgz#574d312edd88bb5dd8912e9286dd6c0aed4aac82" @@ -3908,6 +4005,21 @@ checkpoint-store@^1.1.0: dependencies: functional-red-black-tree "^1.0.1" +chokidar@3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.3.0.tgz#12c0714668c55800f659e262d4962a97faf554a6" + integrity sha512-dGmKLDdT3Gdl7fBUe8XK+gAtGmzy5Fn0XkkWQuYxGIgWVPPse2CxFA5mtrlD0TOHaHjEUqkWNyP1XdHoJES/4A== + dependencies: + anymatch "~3.1.1" + braces "~3.0.2" + glob-parent "~5.1.0" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.2.0" + optionalDependencies: + fsevents "~2.1.1" + chokidar@3.5.3, chokidar@^3.4.0: version "3.5.3" resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" @@ -4000,6 +4112,16 @@ cli-cursor@^2.1.0: dependencies: restore-cursor "^2.0.0" +cli-table3@^0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.5.1.tgz#0252372d94dfc40dbd8df06005f48f31f656f202" + integrity sha512-7Qg2Jrep1S/+Q3EceiZtQcDPWxhAvBw+ERf1162v4sikJrvojMHFqXt8QIVha8UlH9rgU0BeWPytZ9/TzYqlUw== + dependencies: + object-assign "^4.1.0" + string-width "^2.1.1" + optionalDependencies: + colors "^1.1.2" + cli-table3@^0.6.0: version "0.6.3" resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.3.tgz#61ab765aac156b52f222954ffc607a6f01dbeeb2" @@ -4023,6 +4145,15 @@ cliui@^3.2.0: strip-ansi "^3.0.1" wrap-ansi "^2.0.0" +cliui@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-5.0.0.tgz#deefcfdb2e800784aa34f46fa08e06851c7bbbc5" + integrity sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA== + dependencies: + string-width "^3.1.0" + strip-ansi "^5.2.0" + wrap-ansi "^5.1.0" + cliui@^7.0.2: version "7.0.4" resolved "https://registry.yarnpkg.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" @@ -4107,6 +4238,11 @@ color-name@~1.1.4: resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== +colors@1.4.0, colors@^1.1.2: + version "1.4.0" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" + integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== + combined-stream@^1.0.6, combined-stream@^1.0.8, combined-stream@~1.0.6: version "1.0.8" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" @@ -4170,7 +4306,7 @@ concat-map@0.0.1: resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== -concat-stream@^1.5.1, concat-stream@~1.6.2: +concat-stream@^1.5.1, concat-stream@^1.6.0, concat-stream@^1.6.2, concat-stream@~1.6.2: version "1.6.2" resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw== @@ -4346,6 +4482,11 @@ cross-spawn@^7.0.2, cross-spawn@^7.0.3: shebang-command "^2.0.0" which "^2.0.1" +"crypt@>= 0.0.1": + version "0.0.2" + resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b" + integrity sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow== + crypto-browserify@3.12.0: version "3.12.0" resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" @@ -4383,6 +4524,11 @@ dashdash@^1.12.0: dependencies: assert-plus "^1.0.0" +death@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/death/-/death-1.1.0.tgz#01aa9c401edd92750514470b8266390c66c67318" + integrity sha512-vsV6S4KVHvTGxbEcij7hkWRv0It+sGGWVOM67dQde/o5Xjnr+KmLjxWJii2uEObIrt1CcM9w0Yaovx+iOlIL+w== + debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.8, debug@^2.6.9: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" @@ -4418,7 +4564,7 @@ debug@^3.1.0, debug@^3.2.6: dependencies: ms "^2.1.1" -decamelize@^1.1.1: +decamelize@^1.1.1, decamelize@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" integrity sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA== @@ -4521,6 +4667,14 @@ deferred-leveldown@~4.0.0: abstract-leveldown "~5.0.0" inherits "^2.0.3" +define-properties@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.0.tgz#52988570670c9eacedd8064f4a990f2405849bd5" + integrity sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA== + dependencies: + has-property-descriptors "^1.0.0" + object-keys "^1.1.1" + define-properties@^1.1.3, define-properties@^1.1.4: version "1.1.4" resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.4.tgz#0b14d7bd7fbeb2f3572c3a7eda80ea5d57fb05b1" @@ -4591,11 +4745,24 @@ detect-newline@^3.0.0: resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651" integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA== +detect-port@^1.3.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/detect-port/-/detect-port-1.5.1.tgz#451ca9b6eaf20451acb0799b8ab40dff7718727b" + integrity sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ== + dependencies: + address "^1.0.1" + debug "4" + diff-sequences@^29.2.0: version "29.2.0" resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.2.0.tgz#4c55b5b40706c7b5d2c5c75999a50c56d214e8f6" integrity sha512-413SY5JpYeSBZxmenGEmCVQ8mCgtFJF0w9PROdaS6z987XC2Pd2GOKqOITLtMftmyFZqgtCOb/QA7/Z3ZXfzIw== +diff@3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-3.5.0.tgz#800c0dd1e0a8bfbc95835c202ad220fe317e5a12" + integrity sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA== + diff@5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/diff/-/diff-5.0.0.tgz#7ed6ad76d859d030787ec35855f5b1daf31d852b" @@ -4615,6 +4782,13 @@ diffie-hellman@^5.0.0: miller-rabin "^4.0.0" randombytes "^2.0.0" +difflib@^0.2.4: + version "0.2.4" + resolved "https://registry.yarnpkg.com/difflib/-/difflib-0.2.4.tgz#b5e30361a6db023176d562892db85940a718f47e" + integrity sha512-9YVwmMb0wQHQNr5J9m6BSj6fk4pfGITGQOOs+D9Fl+INODWFOfvhIU1hNv6GgR1RBoC/9NJcwu77zShxV0kT7w== + dependencies: + heap ">= 0.2.0" + dir-glob@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" @@ -4946,21 +5120,33 @@ escape-html@~1.0.3: resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow== +escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== + escape-string-regexp@4.0.0, escape-string-regexp@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== -escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== - escape-string-regexp@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz#a30304e99daa32e23b2fd20f51babd07cffca344" integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w== +escodegen@1.8.x: + version "1.8.1" + resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.8.1.tgz#5a5b53af4693110bebb0867aa3430dd3b70a1018" + integrity sha512-yhi5S+mNTOuRvyW4gWlg5W1byMaQGWWSYHXsuFZ7GBo7tpyOwi2EdzMP/QWxh9hwkD2m+wDVHJsxhRIj+v/b/A== + dependencies: + esprima "^2.7.1" + estraverse "^1.9.1" + esutils "^2.0.2" + optionator "^0.8.1" + optionalDependencies: + source-map "~0.2.0" + eslint-config-alloy@^3.8.2: version "3.10.0" resolved "https://registry.yarnpkg.com/eslint-config-alloy/-/eslint-config-alloy-3.10.0.tgz#b2d85ba3bd7dddcc6d7fc79088c192a646f4f246" @@ -5119,6 +5305,11 @@ espree@^7.3.0, espree@^7.3.1: acorn-jsx "^5.3.1" eslint-visitor-keys "^1.3.0" +esprima@2.7.x, esprima@^2.7.1: + version "2.7.3" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581" + integrity sha512-OarPfz0lFCiW4/AV2Oy1Rp9qu0iusTKqykwTspGCZtPxmF81JR4MmIebvF1F9+UOKth2ZubLQ4XGGaU+hSn99A== + esprima@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" @@ -5138,6 +5329,11 @@ esrecurse@^4.1.0, esrecurse@^4.3.0: dependencies: estraverse "^5.2.0" +estraverse@^1.9.1: + version "1.9.3" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.9.3.tgz#af67f2dc922582415950926091a4005d29c9bb44" + integrity sha512-25w1fMXQrGdoquWnScXZGckOv+Wes+JDnuN/+7ex3SauFRS72r2lFDec0EKPt2YD1wUJ/IrfEex+9yp4hfSOJA== + estraverse@^4.1.1: version "4.3.0" resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" @@ -5179,6 +5375,27 @@ eth-ens-namehash@2.0.8, eth-ens-namehash@^2.0.8: idna-uts46-hx "^2.3.1" js-sha3 "^0.5.7" +eth-gas-reporter@^0.2.25: + version "0.2.25" + resolved "https://registry.yarnpkg.com/eth-gas-reporter/-/eth-gas-reporter-0.2.25.tgz#546dfa946c1acee93cb1a94c2a1162292d6ff566" + integrity sha512-1fRgyE4xUB8SoqLgN3eDfpDfwEfRxh2Sz1b7wzFbyQA+9TekMmvSjjoRu9SKcSVyK+vLkLIsVbJDsTWjw195OQ== + dependencies: + "@ethersproject/abi" "^5.0.0-beta.146" + "@solidity-parser/parser" "^0.14.0" + cli-table3 "^0.5.0" + colors "1.4.0" + ethereum-cryptography "^1.0.3" + ethers "^4.0.40" + fs-readdir-recursive "^1.1.0" + lodash "^4.17.14" + markdown-table "^1.1.3" + mocha "^7.1.1" + req-cwd "^2.0.0" + request "^2.88.0" + request-promise-native "^1.0.5" + sha1 "^1.1.1" + sync-request "^6.0.0" + eth-json-rpc-infura@^3.1.0: version "3.2.1" resolved "https://registry.yarnpkg.com/eth-json-rpc-infura/-/eth-json-rpc-infura-3.2.1.tgz#26702a821067862b72d979c016fd611502c6057f" @@ -5548,6 +5765,21 @@ ethereumjs-wallet@0.6.5: utf8 "^3.0.0" uuid "^3.3.2" +ethers@^4.0.40: + version "4.0.49" + resolved "https://registry.yarnpkg.com/ethers/-/ethers-4.0.49.tgz#0eb0e9161a0c8b4761be547396bbe2fb121a8894" + integrity sha512-kPltTvWiyu+OktYy1IStSO16i2e7cS9D9OxZ81q2UUaiNPVrm/RTcbxamCXF9VUSKzJIdJV68EAIhTEVBalRWg== + dependencies: + aes-js "3.0.0" + bn.js "^4.11.9" + elliptic "6.5.4" + hash.js "1.1.3" + js-sha3 "0.5.7" + scrypt-js "2.0.4" + setimmediate "1.0.4" + uuid "2.0.1" + xmlhttprequest "1.8.0" + ethers@^5.0.1, ethers@^5.0.2, ethers@^5.5.2, ethers@^5.7.0, ethers@~5.7.0: version "5.7.2" resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.7.2.tgz#3a7deeabbb8c030d4126b24f84e525466145872e" @@ -5916,7 +6148,7 @@ fast-diff@^1.1.2: resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.2.0.tgz#73ee11982d86caaf7959828d519cfe927fac5f03" integrity sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w== -fast-glob@^3.2.9: +fast-glob@^3.0.3, fast-glob@^3.2.9: version "3.2.12" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.12.tgz#7f39ec99c2e6ab030337142da9e0c18f37afae80" integrity sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w== @@ -6017,6 +6249,13 @@ find-replace@^1.0.3: array-back "^1.0.4" test-value "^2.1.0" +find-up@3.0.0, find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== + dependencies: + locate-path "^3.0.0" + find-up@5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" @@ -6080,6 +6319,13 @@ flat-cache@^3.0.4: flatted "^3.1.0" rimraf "^3.0.2" +flat@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/flat/-/flat-4.1.1.tgz#a392059cc382881ff98642f5da4dde0a959f309b" + integrity sha512-FmTtBsHskrU6FJ2VxCnsDb84wu9zhmO3cUX2kGFb5tuwhfXxGciiT0oRY+cck35QmG+NmGh5eLz6lLCpWTqwpA== + dependencies: + is-buffer "~2.0.3" + flat@^5.0.2: version "5.0.2" resolved "https://registry.yarnpkg.com/flat/-/flat-5.0.2.tgz#8ca6fe332069ffa9d324c327198c598259ceb241" @@ -6122,6 +6368,15 @@ forever-agent@~0.6.1: resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw== +form-data@^2.2.0: + version "2.5.1" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.5.1.tgz#f2cbec57b5e59e23716e128fe44d4e5dd23895f4" + integrity sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + form-data@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.1.tgz#ebd53791b78356a99af9a300d4282c4d5eb9755f" @@ -6210,6 +6465,15 @@ fs-extra@^7.0.0, fs-extra@^7.0.1: jsonfile "^4.0.0" universalify "^0.1.0" +fs-extra@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-8.1.0.tgz#49d43c45a88cd9677668cb7be1b46efdb8d2e1c0" + integrity sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^4.0.0" + universalify "^0.1.0" + fs-extra@^9.0.0: version "9.1.0" resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" @@ -6227,6 +6491,11 @@ fs-minipass@^1.2.7: dependencies: minipass "^2.6.0" +fs-readdir-recursive@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz#e32fc030a2ccee44a6b5371308da54be0b397d27" + integrity sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA== + fs.realpath@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" @@ -6242,6 +6511,11 @@ fsevents@^2.3.2, fsevents@~2.3.2: resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== +fsevents@~2.1.1: + version "2.1.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.1.3.tgz#fb738703ae8d2f9fe900c33836ddebee8b97f23e" + integrity sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ== + function-bind@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" @@ -6314,7 +6588,7 @@ get-caller-file@^1.0.1: resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w== -get-caller-file@^2.0.5: +get-caller-file@^2.0.1, get-caller-file@^2.0.5: version "2.0.5" resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== @@ -6338,6 +6612,11 @@ get-package-type@^0.1.0: resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== +get-port@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/get-port/-/get-port-3.2.0.tgz#dd7ce7de187c06c8bf353796ac71e099f0980ebc" + integrity sha512-x5UJKlgeUiNT8nyo/AcnwLnZuZNcSjSw0kogRB+Whd1fjjFq4B1hySFxSFWWSn4mIBzg3sRNUDFYc4g5gjPoLg== + get-stdin@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-5.0.1.tgz#122e161591e21ff4c52530305693f20e6393a398" @@ -6382,13 +6661,33 @@ getpass@^0.1.1: dependencies: assert-plus "^1.0.0" -glob-parent@^5.1.2, glob-parent@~5.1.2: +ghost-testrpc@^0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/ghost-testrpc/-/ghost-testrpc-0.0.2.tgz#c4de9557b1d1ae7b2d20bbe474a91378ca90ce92" + integrity sha512-i08dAEgJ2g8z5buJIrCTduwPIhih3DP+hOCTyyryikfV8T0bNvHnGXO67i0DD1H4GBDETTclPy9njZbfluQYrQ== + dependencies: + chalk "^2.4.2" + node-emoji "^1.10.0" + +glob-parent@^5.1.2, glob-parent@~5.1.0, glob-parent@~5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== dependencies: is-glob "^4.0.1" +glob@7.1.3: + version "7.1.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1" + integrity sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + glob@7.2.0: version "7.2.0" resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023" @@ -6401,7 +6700,18 @@ glob@7.2.0: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@~7.2.3: +glob@^5.0.15: + version "5.0.15" + resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" + integrity sha512-c9IPMazfRITpmAAKi22dK1VKxGDX9ehhqfABDriL/lzO92xcUKEJPQHrVA/2YHSNFB4iFlykVmWvwo48nr3OxA== + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "2 || 3" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^7.0.0, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@~7.2.3: version "7.2.3" resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== @@ -6425,6 +6735,22 @@ glob@~7.1.2: once "^1.3.0" path-is-absolute "^1.0.0" +global-modules@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-2.0.0.tgz#997605ad2345f27f51539bea26574421215c7780" + integrity sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A== + dependencies: + global-prefix "^3.0.0" + +global-prefix@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/global-prefix/-/global-prefix-3.0.0.tgz#fc85f73064df69f50421f47f883fe5b913ba9b97" + integrity sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg== + dependencies: + ini "^1.3.5" + kind-of "^6.0.2" + which "^1.3.1" + global@~4.4.0: version "4.4.0" resolved "https://registry.yarnpkg.com/global/-/global-4.4.0.tgz#3e7b105179006a323ed71aafca3e9c57a5cc6406" @@ -6457,6 +6783,20 @@ globalthis@^1.0.3: dependencies: define-properties "^1.1.3" +globby@^10.0.1: + version "10.0.2" + resolved "https://registry.yarnpkg.com/globby/-/globby-10.0.2.tgz#277593e745acaa4646c3ab411289ec47a0392543" + integrity sha512-7dUi7RvCoT/xast/o/dLN53oqND4yk0nsHkhRgn9w65C4PofCLOoJ39iSOg+qVDdWQPIEj+eszMHQ+aLVwwQSg== + dependencies: + "@types/glob" "^7.1.1" + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.0.3" + glob "^7.1.3" + ignore "^5.1.1" + merge2 "^1.2.3" + slash "^3.0.0" + globby@^11.0.3: version "11.1.0" resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" @@ -6525,7 +6865,7 @@ growl@1.10.5: resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" integrity sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA== -handlebars@^4.7.6: +handlebars@^4.0.1, handlebars@^4.7.6: version "4.7.7" resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.7.7.tgz#9ce33416aad02dbd6c8fafa8240d5d98004945a1" integrity sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA== @@ -6558,6 +6898,15 @@ hardhat-contract-sizer@^2.0.2: chalk "^4.0.0" cli-table3 "^0.6.0" +hardhat-gas-reporter@^1.0.9: + version "1.0.9" + resolved "https://registry.yarnpkg.com/hardhat-gas-reporter/-/hardhat-gas-reporter-1.0.9.tgz#9a2afb354bc3b6346aab55b1c02ca556d0e16450" + integrity sha512-INN26G3EW43adGKBNzYWOlI3+rlLnasXTwW79YNnUhXPDa+yHESgt639dJEs37gCjhkbNKcRRJnomXEuMFBXJg== + dependencies: + array-uniq "1.0.3" + eth-gas-reporter "^0.2.25" + sha1 "^1.1.1" + hardhat-typechain@^0.3.3: version "0.3.5" resolved "https://registry.yarnpkg.com/hardhat-typechain/-/hardhat-typechain-0.3.5.tgz#8e50616a9da348b33bd001168c8fda9c66b7b4af" @@ -6631,6 +6980,11 @@ has-bigints@^1.0.1, has-bigints@^1.0.2: resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa" integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ== +has-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa" + integrity sha512-DyYHfIYwAJmjAjSSPKANxI8bFY9YtFrgkAfinBojQ8YJTOuOuav64tMUJv584SES4xl74PmuaevIyaLESHdTAA== + has-flag@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" @@ -6653,7 +7007,7 @@ has-proto@^1.0.1: resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.1.tgz#1885c1305538958aff469fef37937c22795408e0" integrity sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg== -has-symbols@^1.0.1, has-symbols@^1.0.2, has-symbols@^1.0.3: +has-symbols@^1.0.0, has-symbols@^1.0.1, has-symbols@^1.0.2, has-symbols@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== @@ -6712,6 +7066,14 @@ hash-base@^3.0.0: readable-stream "^3.6.0" safe-buffer "^5.2.0" +hash.js@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.3.tgz#340dedbe6290187151c1ea1d777a3448935df846" + integrity sha512-/UETyP0W22QILqS+6HowevwhEFJ3MBJnwTf75Qob9Wz9t0DPuisL8kW8YZMK62dHAKE1c1p+gY1TtOLY+USEHA== + dependencies: + inherits "^2.0.3" + minimalistic-assert "^1.0.0" + hash.js@1.1.7, hash.js@^1.0.0, hash.js@^1.0.3, hash.js@^1.1.7: version "1.1.7" resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42" @@ -6730,6 +7092,11 @@ heap@0.2.6: resolved "https://registry.yarnpkg.com/heap/-/heap-0.2.6.tgz#087e1f10b046932fc8594dd9e6d378afc9d1e5ac" integrity sha512-MzzWcnfB1e4EG2vHi3dXHoBupmuXNZzx6pY6HldVS55JKKBoq3xOyzfSaZRkJp37HIhEYC78knabHff3zc4dQQ== +"heap@>= 0.2.0": + version "0.2.7" + resolved "https://registry.yarnpkg.com/heap/-/heap-0.2.7.tgz#1e6adf711d3f27ce35a81fe3b7bd576c2260a8fc" + integrity sha512-2bsegYkkHO+h/9MGbn6KWcE45cHZgPANo5LXF7EvWdT0yT2EguSVO1nDgU5c8+ZOPwp2vMNa7YFsJhVcDR9Sdg== + hmac-drbg@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" @@ -6757,6 +7124,16 @@ html-escaper@^2.0.0: resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg== +http-basic@^8.1.1: + version "8.1.3" + resolved "https://registry.yarnpkg.com/http-basic/-/http-basic-8.1.3.tgz#a7cabee7526869b9b710136970805b1004261bbf" + integrity sha512-/EcDMwJZh3mABI2NhGfHOGOeOZITqfkEO4p/xK+l3NpyncIHUQBoMvCSF/b5GqvKtySC2srL/GGG3+EtlqlmCw== + dependencies: + caseless "^0.12.0" + concat-stream "^1.6.2" + http-response-object "^3.0.1" + parse-cache-control "^1.0.1" + http-cache-semantics@^4.0.0: version "4.1.0" resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" @@ -6778,6 +7155,13 @@ http-https@^1.0.0: resolved "https://registry.yarnpkg.com/http-https/-/http-https-1.0.0.tgz#2f908dd5f1db4068c058cd6e6d4ce392c913389b" integrity sha512-o0PWwVCSp3O0wS6FvNr6xfBCHgt0m1tvPLFOCc2iFDKTRAXhB7m8klDf7ErowFH8POa6dVdGatKU5I1YYwzUyg== +http-response-object@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/http-response-object/-/http-response-object-3.0.2.tgz#7f435bb210454e4360d074ef1f989d5ea8aa9810" + integrity sha512-bqX0XTF6fnXSQcEJ2Iuyr75yVakyjIDCqroJQ/aHfSdlM743Cwqoi2nDYMzLGWUcuTWGWy8AAvOKXTfiv6q9RA== + dependencies: + "@types/node" "^10.0.3" + http-signature@~1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" @@ -6839,6 +7223,11 @@ ignore@^4.0.6: resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg== +ignore@^5.1.1: + version "5.2.4" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" + integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== + ignore@^5.1.8, ignore@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.0.tgz#6d3bac8fa7fe0d45d9f9be7bac2fc279577e345a" @@ -6916,7 +7305,7 @@ inherits@2.0.3: resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== -ini@~1.3.0: +ini@^1.3.5, ini@~1.3.0: version "1.3.8" resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== @@ -6958,6 +7347,11 @@ internal-slot@^1.0.4: has "^1.0.3" side-channel "^1.0.4" +interpret@^1.0.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.4.0.tgz#665ab8bc4da27a774a40584e812e3e0fa45b1a1e" + integrity sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA== + invariant@^2.2.2: version "2.2.4" resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" @@ -7045,7 +7439,7 @@ is-buffer@^1.1.5: resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== -is-buffer@^2.0.5: +is-buffer@^2.0.5, is-buffer@~2.0.3: version "2.0.5" resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== @@ -7062,6 +7456,13 @@ is-ci@^2.0.0: dependencies: ci-info "^2.0.0" +is-core-module@^2.11.0: + version "2.12.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.12.0.tgz#36ad62f6f73c8253fd6472517a12483cf03e7ec4" + integrity sha512-RECHCBCd/viahWmwj6enj19sKbHfJrddi/6cBDsNTKbNq0f7VeaUkBo60BqzvPqo/W54ChS62Z5qyun7cfOMqQ== + dependencies: + has "^1.0.3" + is-core-module@^2.9.0: version "2.11.0" resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.11.0.tgz#ad4cb3e3863e814523c96f3f58d26cc570ff0144" @@ -7749,16 +8150,16 @@ js-sha3@0.5.5: resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.5.5.tgz#baf0c0e8c54ad5903447df96ade7a4a1bca79a4a" integrity sha512-yLLwn44IVeunwjpDVTDZmQeVbB0h+dZpY2eO68B/Zik8hu6dH+rKeLxwua79GGIvW6xr8NBAcrtiUbYrTjEFTA== +js-sha3@0.5.7, js-sha3@^0.5.7: + version "0.5.7" + resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.5.7.tgz#0d4ffd8002d5333aabaf4a23eed2f6374c9f28e7" + integrity sha512-GII20kjaPX0zJ8wzkTbNDYMY7msuZcTWk8S5UOh6806Jq/wz1J8/bnr8uGU0DAUmYDjj2Mr4X1cW8v/GLYnR+g== + js-sha3@0.8.0, js-sha3@^0.8.0: version "0.8.0" resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.8.0.tgz#b9b7a5da73afad7dedd0f8c463954cbde6818840" integrity sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q== -js-sha3@^0.5.7: - version "0.5.7" - resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.5.7.tgz#0d4ffd8002d5333aabaf4a23eed2f6374c9f28e7" - integrity sha512-GII20kjaPX0zJ8wzkTbNDYMY7msuZcTWk8S5UOh6806Jq/wz1J8/bnr8uGU0DAUmYDjj2Mr4X1cW8v/GLYnR+g== - "js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" @@ -7769,14 +8170,15 @@ js-tokens@^3.0.2: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" integrity sha512-RjTcuD4xjtthQkaWH7dFlH85L+QaVtSoOyGdZ3g6HFhS9dFNDfLyqgm2NFe2X6cQpeFmt0452FJjFG5UameExg== -js-yaml@4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" - integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== +js-yaml@3.13.1, js-yaml@~3.13.1: + version "3.13.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" + integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw== dependencies: - argparse "^2.0.1" + argparse "^1.0.7" + esprima "^4.0.0" -js-yaml@^3.12.0, js-yaml@^3.13.0, js-yaml@^3.13.1: +js-yaml@3.x, js-yaml@^3.12.0, js-yaml@^3.13.0, js-yaml@^3.13.1: version "3.14.1" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== @@ -7784,13 +8186,12 @@ js-yaml@^3.12.0, js-yaml@^3.13.0, js-yaml@^3.13.1: argparse "^1.0.7" esprima "^4.0.0" -js-yaml@~3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" - integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw== +js-yaml@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== dependencies: - argparse "^1.0.7" - esprima "^4.0.0" + argparse "^2.0.1" jsbn@~0.1.0: version "0.1.1" @@ -7936,6 +8337,11 @@ jsonparse@^1.2.0: resolved "https://registry.yarnpkg.com/jsonparse/-/jsonparse-1.3.1.tgz#3f4dae4a91fac315f71062f8521cc239f1366280" integrity sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg== +jsonschema@^1.2.4: + version "1.4.1" + resolved "https://registry.yarnpkg.com/jsonschema/-/jsonschema-1.4.1.tgz#cc4c3f0077fb4542982973d8a083b6b34f482dab" + integrity sha512-S6cATIPVv1z0IlxdN+zUk5EPjkGCdnhN4wVSBlvoUO1tOLJootbo9CquNJmbIh4yikWHiUedhRYrNPn1arpEmQ== + jsonwebtoken@^8.5.1: version "8.5.1" resolved "https://registry.yarnpkg.com/jsonwebtoken/-/jsonwebtoken-8.5.1.tgz#00e71e0b8df54c2121a1f26137df2280673bcc0d" @@ -8283,6 +8689,14 @@ locate-path@^2.0.0: p-locate "^2.0.0" path-exists "^3.0.0" +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + locate-path@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" @@ -8367,11 +8781,18 @@ lodash@4.17.20: resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.20.tgz#b44a9b6297bcb698f1c51a3545a2b3b368d59c52" integrity sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA== -lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.4: +lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.21, lodash@^4.17.4: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== +log-symbols@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-3.0.0.tgz#f3a08516a5dea893336a7dee14d18a1cfdab77c4" + integrity sha512-dSkNGuI7iG3mfvDzUuYZyvk5dD9ocYCYzNU6CYDE6+Xqd+gwme6Z00NS3dUh8mq/73HaEtT7m6W+yUPtU6BZnQ== + dependencies: + chalk "^2.4.2" + log-symbols@4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" @@ -8492,6 +8913,11 @@ markdown-it@11.0.0: mdurl "^1.0.1" uc.micro "^1.0.5" +markdown-table@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-1.1.3.tgz#9fcb69bcfdb8717bfd0398c6ec2d93036ef8de60" + integrity sha512-1RUZVgQlpJSPWYbFSpmudq5nHY1doEIv89gBtF0s4gW1GF2XorxcA/70M5vq7rLv0a6mhOUccRsqkwhwLCIQ2Q== + markdownlint-cli@^0.24.0: version "0.24.0" resolved "https://registry.yarnpkg.com/markdownlint-cli/-/markdownlint-cli-0.24.0.tgz#d1c1d43cd53b87aaec93035b3234eef7097139a8" @@ -8596,7 +9022,7 @@ merge-stream@^2.0.0: resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== -merge2@^1.3.0, merge2@^1.4.1: +merge2@^1.2.3, merge2@^1.3.0, merge2@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== @@ -8733,6 +9159,20 @@ minimalistic-crypto-utils@^1.0.1: resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" integrity sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg== +"minimatch@2 || 3", minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== + dependencies: + brace-expansion "^1.1.7" + +minimatch@3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + minimatch@4.2.1: version "4.2.1" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-4.2.1.tgz#40d9d511a46bdc4e563c22c3080cde9c0d8299b4" @@ -8747,13 +9187,6 @@ minimatch@5.0.1: dependencies: brace-expansion "^2.0.1" -minimatch@^3.0.4, minimatch@^3.1.1: - version "3.1.2" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" - integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== - dependencies: - brace-expansion "^1.1.7" - minimatch@~3.0.4: version "3.0.8" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.8.tgz#5e6a59bd11e2ab0de1cfb843eb2d82e546c321c1" @@ -8806,7 +9239,14 @@ mkdirp@*: resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-2.1.3.tgz#b083ff37be046fd3d6552468c1f0ff44c1545d1f" integrity sha512-sjAkg21peAG9HS+Dkx7hlG9Ztx7HLeKnvB3NQRcu/mltCVmvkF0pisbiTSfDVYTT86XEfZrTUosLdZLStquZUw== -mkdirp@^0.5.1, mkdirp@^0.5.5: +mkdirp@0.5.5: + version "0.5.5" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.5.tgz#d91cefd62d1436ca0f41620e251288d420099def" + integrity sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ== + dependencies: + minimist "^1.2.5" + +mkdirp@0.5.x, mkdirp@^0.5.1, mkdirp@^0.5.5: version "0.5.6" resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6" integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== @@ -8825,6 +9265,36 @@ mocha-steps@^1.3.0: resolved "https://registry.yarnpkg.com/mocha-steps/-/mocha-steps-1.3.0.tgz#2449231ec45ec56810f65502cb22e2571862957f" integrity sha512-KZvpMJTqzLZw3mOb+EEuYi4YZS41C9iTnb7skVFRxHjUd1OYbl64tCMSmpdIRM9LnwIrSOaRfPtNpF5msgv6Eg== +mocha@7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-7.1.2.tgz#8e40d198acf91a52ace122cd7599c9ab857b29e6" + integrity sha512-o96kdRKMKI3E8U0bjnfqW4QMk12MwZ4mhdBTf+B5a1q9+aq2HRnj+3ZdJu0B/ZhJeK78MgYuv6L8d/rA5AeBJA== + dependencies: + ansi-colors "3.2.3" + browser-stdout "1.3.1" + chokidar "3.3.0" + debug "3.2.6" + diff "3.5.0" + escape-string-regexp "1.0.5" + find-up "3.0.0" + glob "7.1.3" + growl "1.10.5" + he "1.2.0" + js-yaml "3.13.1" + log-symbols "3.0.0" + minimatch "3.0.4" + mkdirp "0.5.5" + ms "2.1.1" + node-environment-flags "1.0.6" + object.assign "4.1.0" + strip-json-comments "2.0.1" + supports-color "6.0.0" + which "1.3.1" + wide-align "1.1.3" + yargs "13.3.2" + yargs-parser "13.1.2" + yargs-unparser "1.6.0" + mocha@^10.0.0: version "10.1.0" resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.1.0.tgz#dbf1114b7c3f9d0ca5de3133906aea3dfc89ef7a" @@ -8852,6 +9322,36 @@ mocha@^10.0.0: yargs-parser "20.2.4" yargs-unparser "2.0.0" +mocha@^7.1.1: + version "7.2.0" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-7.2.0.tgz#01cc227b00d875ab1eed03a75106689cfed5a604" + integrity sha512-O9CIypScywTVpNaRrCAgoUnJgozpIofjKUYmJhiCIJMiuYnLI6otcb1/kpW9/n/tJODHGZ7i8aLQoDVsMtOKQQ== + dependencies: + ansi-colors "3.2.3" + browser-stdout "1.3.1" + chokidar "3.3.0" + debug "3.2.6" + diff "3.5.0" + escape-string-regexp "1.0.5" + find-up "3.0.0" + glob "7.1.3" + growl "1.10.5" + he "1.2.0" + js-yaml "3.13.1" + log-symbols "3.0.0" + minimatch "3.0.4" + mkdirp "0.5.5" + ms "2.1.1" + node-environment-flags "1.0.6" + object.assign "4.1.0" + strip-json-comments "2.0.1" + supports-color "6.0.0" + which "1.3.1" + wide-align "1.1.3" + yargs "13.3.2" + yargs-parser "13.1.2" + yargs-unparser "1.6.0" + mocha@^9.0.2: version "9.2.2" resolved "https://registry.yarnpkg.com/mocha/-/mocha-9.2.2.tgz#d70db46bdb93ca57402c809333e5a84977a88fb9" @@ -8897,6 +9397,11 @@ ms@2.0.0: resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== +ms@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" + integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg== + ms@2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" @@ -9033,6 +9538,21 @@ node-addon-api@^2.0.0: resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-2.0.2.tgz#432cfa82962ce494b132e9d72a15b29f71ff5d32" integrity sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA== +node-emoji@^1.10.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.11.0.tgz#69a0150e6946e2f115e9d7ea4df7971e2628301c" + integrity sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A== + dependencies: + lodash "^4.17.21" + +node-environment-flags@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/node-environment-flags/-/node-environment-flags-1.0.6.tgz#a30ac13621f6f7d674260a54dede048c3982c088" + integrity sha512-5Evy2epuL+6TM0lCQGpFIj6KwiEsGh1SrHUhTbNX+sLbBtjidPZFAnVK9y5yU1+h//RitLbRHTIMyxQPtxMdHw== + dependencies: + object.getownpropertydescriptors "^2.0.3" + semver "^5.7.0" + node-fetch@^2.6.0, node-fetch@^2.6.1: version "2.6.7" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.7.tgz#24de9fba827e3b4ae44dc8b20256a379160052ad" @@ -9075,6 +9595,13 @@ nofilter@^1.0.4: resolved "https://registry.yarnpkg.com/nofilter/-/nofilter-1.0.4.tgz#78d6f4b6a613e7ced8b015cec534625f7667006e" integrity sha512-N8lidFp+fCz+TD51+haYdbDGrcBWwuHX40F5+z0qkUjMJ5Tp+rdSuAkMJ9N9eoolDlEVTf6u5icM+cNKkKW2mA== +nopt@3.x: + version "3.0.6" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9" + integrity sha512-4GUt3kSEYmk4ITxzB/b9vaIDfUVWN/Ml1Fwl11IlnIG2iaJ9O6WXZ9SrYM9NLI8OCBieN2Y8SWC2oJV0RQ7qYg== + dependencies: + abbrev "1" + normalize-package-data@^2.3.2: version "2.5.0" resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8" @@ -9172,7 +9699,7 @@ object-is@^1.0.1: call-bind "^1.0.2" define-properties "^1.1.3" -object-keys@^1.1.1: +object-keys@^1.0.11, object-keys@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== @@ -9189,6 +9716,16 @@ object-visit@^1.0.0: dependencies: isobject "^3.0.0" +object.assign@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + integrity sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w== + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + object.assign@^4.1.4: version "4.1.4" resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.4.tgz#9673c7c7c351ab8c4d0b516f4343ebf4dfb7799f" @@ -9199,7 +9736,7 @@ object.assign@^4.1.4: has-symbols "^1.0.3" object-keys "^1.1.1" -object.getownpropertydescriptors@^2.1.1: +object.getownpropertydescriptors@^2.0.3, object.getownpropertydescriptors@^2.1.1: version "2.1.5" resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.5.tgz#db5a9002489b64eef903df81d6623c07e5b4b4d3" integrity sha512-yDNzckpM6ntyQiGTik1fKV1DcVDRS+w8bvpWNCBanvH5LfRX9O8WTHqQzG4RZwRAM4I0oU7TV11Lj5v0g20ibw== @@ -9235,7 +9772,7 @@ on-finished@2.4.1: dependencies: ee-first "1.1.1" -once@^1.3.0, once@^1.3.1, once@^1.4.0: +once@1.x, once@^1.3.0, once@^1.3.1, once@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== @@ -9264,7 +9801,7 @@ open@^7.4.2: is-docker "^2.0.0" is-wsl "^2.1.1" -optionator@^0.8.2: +optionator@^0.8.1, optionator@^0.8.2: version "0.8.3" resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495" integrity sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA== @@ -9322,7 +9859,7 @@ p-limit@^1.1.0: dependencies: p-try "^1.0.0" -p-limit@^2.2.0: +p-limit@^2.0.0, p-limit@^2.2.0: version "2.3.0" resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== @@ -9343,6 +9880,13 @@ p-locate@^2.0.0: dependencies: p-limit "^1.1.0" +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== + dependencies: + p-limit "^2.0.0" + p-locate@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" @@ -9392,6 +9936,11 @@ parse-asn1@^5.0.0, parse-asn1@^5.1.5: pbkdf2 "^3.0.3" safe-buffer "^5.1.1" +parse-cache-control@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parse-cache-control/-/parse-cache-control-1.0.1.tgz#8eeab3e54fa56920fe16ba38f77fa21aacc2d74e" + integrity sha512-60zvsJReQPX5/QP0Kzfd/VrpjScIQ7SHBW6bFCYfEP+fp0Eppr1SHhIO5nd1PjZtvclzSzES9D/p5nFJurwfWg== + parse-headers@^2.0.0: version "2.0.5" resolved "https://registry.yarnpkg.com/parse-headers/-/parse-headers-2.0.5.tgz#069793f9356a54008571eb7f9761153e6c770da9" @@ -9597,6 +10146,11 @@ pify@^3.0.0: resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" integrity sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg== +pify@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/pify/-/pify-4.0.1.tgz#4b2cd25c50d598735c50292224fd8c6df41e3231" + integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g== + pinkie-promise@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" @@ -9715,6 +10269,13 @@ promise-to-callback@^1.0.0: is-fn "^1.0.0" set-immediate-shim "^1.0.1" +promise@^8.0.0: + version "8.3.0" + resolved "https://registry.yarnpkg.com/promise/-/promise-8.3.0.tgz#8cb333d1edeb61ef23869fbb8a4ea0279ab60e0a" + integrity sha512-rZPNPKTOYVNEEKFaq1HqTgOwZD+4/YHS5ukLzQCypkj+OkYx7iv0mA91lJlpPPZ8vMau3IIGj5Qlwrx+8iiSmg== + dependencies: + asap "~2.0.6" + prompts@^2.0.1: version "2.4.2" resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" @@ -9849,6 +10410,13 @@ qs@6.11.0, qs@^6.7.0: dependencies: side-channel "^1.0.4" +qs@^6.4.0: + version "6.11.1" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.1.tgz#6c29dff97f0c0060765911ba65cbc9764186109f" + integrity sha512-0wsrzgTz/kAVIeuxSjnpGC56rzYtr6JT/2BwEvMaPhFIoYa1aGO8LbzuU1R0uUYQkLpWBTOj0l/CLAJB64J6nQ== + dependencies: + side-channel "^1.0.4" + qs@~6.5.2: version "6.5.3" resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.3.tgz#3aeeffc91967ef6e35c0e488ef46fb296ab76aad" @@ -9996,6 +10564,13 @@ readable-stream@~1.0.15, readable-stream@~1.0.26-4: isarray "0.0.1" string_decoder "~0.10.x" +readdirp@~3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.2.0.tgz#c30c33352b12c96dfb4b895421a49fd5a9593839" + integrity sha512-crk4Qu3pmXwgxdSgGhgA/eXiJAPQiX4GMOZZMXnqKxHX7TaoL+3gQVo/WeuAiogr07DpnfjIMpXXa+PAIvwPGQ== + dependencies: + picomatch "^2.0.4" + readdirp@~3.6.0: version "3.6.0" resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" @@ -10003,6 +10578,20 @@ readdirp@~3.6.0: dependencies: picomatch "^2.2.1" +rechoir@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" + integrity sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw== + dependencies: + resolve "^1.1.6" + +recursive-readdir@^2.2.2: + version "2.2.3" + resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-2.2.3.tgz#e726f328c0d69153bcabd5c322d3195252379372" + integrity sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA== + dependencies: + minimatch "^3.0.5" + regenerate@^1.2.1: version "1.4.2" resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" @@ -10087,7 +10676,37 @@ repeating@^2.0.0: dependencies: is-finite "^1.0.0" -request@^2.79.0, request@^2.85.0: +req-cwd@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/req-cwd/-/req-cwd-2.0.0.tgz#d4082b4d44598036640fb73ddea01ed53db49ebc" + integrity sha512-ueoIoLo1OfB6b05COxAA9UpeoscNpYyM+BqYlA7H6LVF4hKGPXQQSSaD2YmvDVJMkk4UDpAHIeU1zG53IqjvlQ== + dependencies: + req-from "^2.0.0" + +req-from@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/req-from/-/req-from-2.0.0.tgz#d74188e47f93796f4aa71df6ee35ae689f3e0e70" + integrity sha512-LzTfEVDVQHBRfjOUMgNBA+V6DWsSnoeKzf42J7l0xa/B4jyPOuuF5MlNSmomLNGemWTnV2TIdjSSLnEn95fOQA== + dependencies: + resolve-from "^3.0.0" + +request-promise-core@1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.4.tgz#3eedd4223208d419867b78ce815167d10593a22f" + integrity sha512-TTbAfBBRdWD7aNNOoVOBH4pN/KigV6LyapYNNlAPA8JwbovRti1E88m3sYAwsLi5ryhPKsE9APwnjFTgdUjTpw== + dependencies: + lodash "^4.17.19" + +request-promise-native@^1.0.5: + version "1.0.9" + resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.9.tgz#e407120526a5efdc9a39b28a5679bf47b9d9dc28" + integrity sha512-wcW+sIUiWnKgNY0dqCpOZkUbF/I+YPi+f09JZIDa39Ec+q82CpSYniDp+ISgTTbKmnpJWASeJBPZmoxH84wt3g== + dependencies: + request-promise-core "1.1.4" + stealthy-require "^1.1.1" + tough-cookie "^2.3.3" + +request@^2.79.0, request@^2.85.0, request@^2.88.0: version "2.88.2" resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== @@ -10133,6 +10752,11 @@ require-main-filename@^1.0.1: resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" integrity sha512-IqSUtOVP4ksd1C/ej5zeEh/BIP2ajqpn8c5x+q99gvcIG/Qf0cud5raVnE/Dwd0ua9TXYDoDc0RE5hBSdz22Ug== +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== + resolve-alpn@^1.0.0: version "1.2.1" resolved "https://registry.yarnpkg.com/resolve-alpn/-/resolve-alpn-1.2.1.tgz#b7adbdac3546aaaec20b45e7d8265927072726f9" @@ -10170,6 +10794,11 @@ resolve.exports@^1.1.0: resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-1.1.0.tgz#5ce842b94b05146c0e03076985d1d0e7e48c90c9" integrity sha512-J1l+Zxxp4XK3LUDZ9m60LRJF/mAe4z6a4xyabPHk7pvK5t35dACV32iIjJDFeWZFfZlO29w6SZ67knR0tHzJtQ== +resolve@1.1.x: + version "1.1.7" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b" + integrity sha512-9znBF0vBcaSN3W2j7wKvdERPwqTxSpCq+if5C0WoTCyV9n24rua28jeuQ2pL/HOf+yUe/Mef+H/5p60K0Id3bg== + resolve@1.17.0: version "1.17.0" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.17.0.tgz#b25941b54968231cc2d1bb76a79cb7f2c0bf8444" @@ -10177,6 +10806,15 @@ resolve@1.17.0: dependencies: path-parse "^1.0.6" +resolve@^1.1.6: + version "1.22.2" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.2.tgz#0ed0943d4e301867955766c9f3e1ae6d01c6845f" + integrity sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g== + dependencies: + is-core-module "^2.11.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + resolve@^1.10.0, resolve@^1.12.0, resolve@^1.20.0, resolve@^1.8.1, resolve@~1.22.1: version "1.22.1" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.1.tgz#27cb2ebb53f91abb49470a928bba7558066ac177" @@ -10330,6 +10968,31 @@ safe-regex@^1.1.0: resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== +sc-istanbul@^0.4.5: + version "0.4.6" + resolved "https://registry.yarnpkg.com/sc-istanbul/-/sc-istanbul-0.4.6.tgz#cf6784355ff2076f92d70d59047d71c13703e839" + integrity sha512-qJFF/8tW/zJsbyfh/iT/ZM5QNHE3CXxtLJbZsL+CzdJLBsPD7SedJZoUA4d8iAcN2IoMp/Dx80shOOd2x96X/g== + dependencies: + abbrev "1.0.x" + async "1.x" + escodegen "1.8.x" + esprima "2.7.x" + glob "^5.0.15" + handlebars "^4.0.1" + js-yaml "3.x" + mkdirp "0.5.x" + nopt "3.x" + once "1.x" + resolve "1.1.x" + supports-color "^3.1.0" + which "^1.1.1" + wordwrap "^1.0.0" + +scrypt-js@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-2.0.4.tgz#32f8c5149f0797672e551c07e230f834b6af5f16" + integrity sha512-4KsaGcPnuhtCZQCxFxN3GVYIhKFPTdLd8PLC552XwbMndtD0cjRFAhDuuydXQ0h08ZfPgzqe6EKHozpuH74iDw== + scrypt-js@3.0.1, scrypt-js@^3.0.0, scrypt-js@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-3.0.1.tgz#d314a57c2aef69d1ad98a138a21fe9eafa9ee312" @@ -10361,7 +11024,7 @@ semaphore@>=1.0.1, semaphore@^1.0.3, semaphore@^1.1.0: resolved "https://registry.yarnpkg.com/semaphore/-/semaphore-1.1.0.tgz#aaad8b86b20fe8e9b32b16dc2ee682a8cd26a8aa" integrity sha512-O4OZEaNtkMd/K0i6js9SL+gqy0ZCBMgUvlSqHKi4IBdjhe7wB8pwztUk1BbZ1fmrvpwFrPbHzqd2w5pTcJH6LA== -"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0: +"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0, semver@^5.7.0: version "5.7.1" resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== @@ -10378,6 +11041,13 @@ semver@^6.0.0, semver@^6.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== +semver@^7.3.4: + version "7.4.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.4.0.tgz#8481c92feffc531ab1e012a8ffc15bdd3a0f4318" + integrity sha512-RgOxM8Mw+7Zus0+zcLEUn8+JfoLpj/huFTItQy2hsM4khuC1HYRDp0cU482Ewn/Fcy6bCjufD8vAj7voC66KQw== + dependencies: + lru-cache "^6.0.0" + semver@~5.4.1: version "5.4.1" resolved "https://registry.yarnpkg.com/semver/-/semver-5.4.1.tgz#e059c09d8571f0540823733433505d3a2f00b18e" @@ -10450,6 +11120,11 @@ set-value@^2.0.0, set-value@^2.0.1: is-plain-object "^2.0.3" split-string "^3.0.1" +setimmediate@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.4.tgz#20e81de622d4a02588ce0c8da8973cbcf1d3138f" + integrity sha512-/TjEmXQVEzdod/FFskf3o7oOAsGhHf2j1dZqRFbDzq4F3mvvxflIIi4Hd3bLQE9y/CpwqfSQam5JakI/mi3Pog== + setimmediate@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" @@ -10468,6 +11143,14 @@ sha.js@^2.4.0, sha.js@^2.4.8: inherits "^2.0.1" safe-buffer "^5.0.1" +sha1@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/sha1/-/sha1-1.1.1.tgz#addaa7a93168f393f19eb2b15091618e2700f848" + integrity sha512-dZBS6OrMjtgVkopB1Gmo4RQCDKiZsqcpAQpkV/aaj+FCrCg8r4I4qMkDPQjBgLIxlmu9k4nUbWq6ohXahOneYA== + dependencies: + charenc ">= 0.0.1" + crypt ">= 0.0.1" + shebang-command@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" @@ -10497,6 +11180,15 @@ shell-quote@^1.6.1: resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.7.4.tgz#33fe15dee71ab2a81fcbd3a52106c5cfb9fb75d8" integrity sha512-8o/QEhSSRb1a5i7TFR0iM4G16Z0vYB2OQVs4G3aAFXjn3T6yEx8AZxy1PgDF7I00LZHYA3WxaSYIf5e5sAX8Rw== +shelljs@^0.8.3: + version "0.8.5" + resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.5.tgz#de055408d8361bed66c669d2f000538ced8ee20c" + integrity sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow== + dependencies: + glob "^7.0.0" + interpret "^1.0.0" + rechoir "^0.6.2" + side-channel@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" @@ -10673,6 +11365,32 @@ solidity-comments-extractor@^0.0.7: resolved "https://registry.yarnpkg.com/solidity-comments-extractor/-/solidity-comments-extractor-0.0.7.tgz#99d8f1361438f84019795d928b931f4e5c39ca19" integrity sha512-wciNMLg/Irp8OKGrh3S2tfvZiZ0NEyILfcRCXCD4mp7SgK/i9gzLfhY2hY7VMCQJ3kH9UB9BzNdibIVMchzyYw== +solidity-coverage@^0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/solidity-coverage/-/solidity-coverage-0.8.2.tgz#bc39604ab7ce0a3fa7767b126b44191830c07813" + integrity sha512-cv2bWb7lOXPE9/SSleDO6czkFiMHgP4NXPj+iW9W7iEKLBk7Cj0AGBiNmGX3V1totl9wjPrT0gHmABZKZt65rQ== + dependencies: + "@ethersproject/abi" "^5.0.9" + "@solidity-parser/parser" "^0.14.1" + chalk "^2.4.2" + death "^1.1.0" + detect-port "^1.3.0" + difflib "^0.2.4" + fs-extra "^8.1.0" + ghost-testrpc "^0.0.2" + global-modules "^2.0.0" + globby "^10.0.1" + jsonschema "^1.2.4" + lodash "^4.17.15" + mocha "7.1.2" + node-emoji "^1.10.0" + pify "^4.0.1" + recursive-readdir "^2.2.2" + sc-istanbul "^0.4.5" + semver "^7.3.4" + shelljs "^0.8.3" + web3-utils "^1.3.6" + solpp@^0.11.5: version "0.11.5" resolved "https://registry.yarnpkg.com/solpp/-/solpp-0.11.5.tgz#e5f38b5acc952e1cc2e3871d490fdbed910938dd" @@ -10745,6 +11463,13 @@ source-map@^0.6.0, source-map@^0.6.1: resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== +source-map@~0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.2.0.tgz#dab73fbcfc2ba819b4de03bd6f6eaa48164b3f9d" + integrity sha512-CBdZ2oa/BHhS4xj5DlhjWNHcan57/5YuvfdLf17iVmIpd9KRm+DFLmC6nBNj+6Ua7Kt3TmOjDpQT1aTYOQtoUA== + dependencies: + amdefine ">=0.0.4" + spdx-correct@^3.0.0: version "3.1.1" resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.1.tgz#dece81ac9c1e6713e5f7d1b6f17d468fa53d89a9" @@ -10841,6 +11566,11 @@ statuses@2.0.1: resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== +stealthy-require@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/stealthy-require/-/stealthy-require-1.1.1.tgz#35b09875b4ff49f26a777e509b3090a3226bf24b" + integrity sha512-ZnWpYnYugiOVEY5GkcuJK1io5V8QmNYChG62gSit9pQVGErXtrKuPC55ITaVSukmMta5qpMU7vqLt2Lnni4f/g== + stream-to-pull-stream@^1.7.1: version "1.7.3" resolved "https://registry.yarnpkg.com/stream-to-pull-stream/-/stream-to-pull-stream-1.7.3.tgz#4161aa2d2eb9964de60bfa1af7feaf917e874ece" @@ -10876,7 +11606,7 @@ string-width@^1.0.1: is-fullwidth-code-point "^1.0.0" strip-ansi "^3.0.0" -string-width@^2.1.0: +"string-width@^1.0.2 || 2", string-width@^2.1.0, string-width@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== @@ -10884,7 +11614,7 @@ string-width@^2.1.0: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^3.0.0: +string-width@^3.0.0, string-width@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== @@ -10989,7 +11719,7 @@ strip-ansi@^4.0.0: dependencies: ansi-regex "^3.0.0" -strip-ansi@^5.1.0: +strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== @@ -11032,15 +11762,22 @@ strip-hex-prefix@1.0.0: dependencies: is-hex-prefixed "1.0.0" +strip-json-comments@2.0.1, strip-json-comments@^2.0.1, strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== + strip-json-comments@3.1.1, strip-json-comments@^3.1.0, strip-json-comments@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== -strip-json-comments@^2.0.1, strip-json-comments@~2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" - integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== +supports-color@6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-6.0.0.tgz#76cfe742cf1f41bb9b1c29ad03068c05b4c0e40a" + integrity sha512-on9Kwidc1IUQo+bQdhi8+Tijpo0e1SS6RoGo2guUwn5vdaxw8RXOF9Vb2ws+ihWOmh4JnCJOvaziZWP1VABaLg== + dependencies: + has-flag "^3.0.0" supports-color@8.1.1, supports-color@^8.0.0: version "8.1.1" @@ -11054,6 +11791,13 @@ supports-color@^2.0.0: resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" integrity sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g== +supports-color@^3.1.0: + version "3.2.3" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-3.2.3.tgz#65ac0504b3954171d8a64946b2ae3cbb8a5f54f6" + integrity sha512-Jds2VIYDrlp5ui7t8abHN2bjAu4LV/q4N2KivFPpGH0lrka0BMq/33AmECUXlKPcHigkNaqfXRENFju+rlcy+A== + dependencies: + has-flag "^1.0.0" + supports-color@^5.3.0: version "5.5.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" @@ -11090,6 +11834,22 @@ swarm-js@^0.1.40: tar "^4.0.2" xhr-request "^1.0.1" +sync-request@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/sync-request/-/sync-request-6.1.0.tgz#e96217565b5e50bbffe179868ba75532fb597e68" + integrity sha512-8fjNkrNlNCrVc/av+Jn+xxqfCjYaBoHqCsDz6mt030UMxJGr+GSfCV1dQt2gRtlL63+VPidwDVLr7V2OcTSdRw== + dependencies: + http-response-object "^3.0.1" + sync-rpc "^1.2.1" + then-request "^6.0.0" + +sync-rpc@^1.2.1: + version "1.3.6" + resolved "https://registry.yarnpkg.com/sync-rpc/-/sync-rpc-1.3.6.tgz#b2e8b2550a12ccbc71df8644810529deb68665a7" + integrity sha512-J8jTXuZzRlvU7HemDgHi3pGnh/rkoqR/OZSjhTyyZrEkkYQbk7Z33AXp37mkPfPpfdOuj7Ex3H/TJM1z48uPQw== + dependencies: + get-port "^3.1.0" + table@^5.2.3: version "5.4.6" resolved "https://registry.yarnpkg.com/table/-/table-5.4.6.tgz#1292d19500ce3f86053b05f0e8e7e4a3bb21079e" @@ -11228,6 +11988,23 @@ text-table@^0.2.0: resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== +then-request@^6.0.0: + version "6.0.2" + resolved "https://registry.yarnpkg.com/then-request/-/then-request-6.0.2.tgz#ec18dd8b5ca43aaee5cb92f7e4c1630e950d4f0c" + integrity sha512-3ZBiG7JvP3wbDzA9iNY5zJQcHL4jn/0BWtXIkagfz7QgOL/LqjCEOBQuJNZfu0XYnv5JhKh+cDxCPM4ILrqruA== + dependencies: + "@types/concat-stream" "^1.6.0" + "@types/form-data" "0.0.33" + "@types/node" "^8.0.0" + "@types/qs" "^6.2.31" + caseless "~0.12.0" + concat-stream "^1.6.0" + form-data "^2.2.0" + http-basic "^8.1.1" + http-response-object "^3.0.1" + promise "^8.0.0" + qs "^6.4.0" + thenify-all@^1.0.0: version "1.6.0" resolved "https://registry.yarnpkg.com/thenify-all/-/thenify-all-1.6.0.tgz#1a1918d402d8fc3f98fbf234db0bcc8cc10e9726" @@ -11336,7 +12113,7 @@ toidentifier@1.0.1: resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== -tough-cookie@~2.5.0: +tough-cookie@^2.3.3, tough-cookie@~2.5.0: version "2.5.0" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== @@ -11752,6 +12529,11 @@ utils-merge@1.0.1: resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA== +uuid@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.1.tgz#c2a30dedb3e535d72ccf82e343941a50ba8533ac" + integrity sha512-nWg9+Oa3qD2CQzHIP4qKUqwNfzKn8P0LtFhotaCTFchsV7ZfDhAybeip/HZVeMIpZi9JgY1E3nUlwaCmZT1sEg== + uuid@3.3.2: version "3.3.2" resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" @@ -12090,6 +12872,19 @@ web3-utils@^1.0.0-beta.31, web3-utils@^1.3.4: randombytes "^2.1.0" utf8 "3.0.0" +web3-utils@^1.3.6: + version "1.9.0" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.9.0.tgz#7c5775a47586cefb4ad488831be8f6627be9283d" + integrity sha512-p++69rCNNfu2jM9n5+VD/g26l+qkEOQ1m6cfRQCbH8ZRrtquTmrirJMgTmyOoax5a5XRYOuws14aypCOs51pdQ== + dependencies: + bn.js "^5.2.1" + ethereum-bloom-filters "^1.0.6" + ethereumjs-util "^7.1.0" + ethjs-unit "0.1.6" + number-to-bn "1.7.0" + randombytes "^2.1.0" + utf8 "3.0.0" + web3@1.2.11: version "1.2.11" resolved "https://registry.yarnpkg.com/web3/-/web3-1.2.11.tgz#50f458b2e8b11aa37302071c170ed61cff332975" @@ -12161,6 +12956,11 @@ which-module@^1.0.0: resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f" integrity sha512-F6+WgncZi/mJDrammbTuHe1q0R5hOXv/mBaiNA2TCNT/LTHusX0V+CJnj9XT8ki5ln2UZyyddDgHfCzyrOH7MQ== +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + integrity sha512-B+enWhmw6cjfVC7kS8Pj9pCrKSc5txArRyaYGe088shv/FGWH+0Rjx/xPgtsWfsUtS27FkP697E4DDhgrgoc0Q== + which-typed-array@^1.1.9: version "1.1.9" resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.9.tgz#307cf898025848cf995e795e8423c7f337efbde6" @@ -12173,6 +12973,13 @@ which-typed-array@^1.1.9: has-tostringtag "^1.0.0" is-typed-array "^1.1.10" +which@1.3.1, which@^1.1.1, which@^1.2.9, which@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== + dependencies: + isexe "^2.0.0" + which@2.0.2, which@^2.0.1: version "2.0.2" resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" @@ -12180,12 +12987,12 @@ which@2.0.2, which@^2.0.1: dependencies: isexe "^2.0.0" -which@^1.2.9: - version "1.3.1" - resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" - integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== +wide-align@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA== dependencies: - isexe "^2.0.0" + string-width "^1.0.2 || 2" window-size@^0.2.0: version "0.2.0" @@ -12220,6 +13027,15 @@ wrap-ansi@^2.0.0: string-width "^1.0.1" strip-ansi "^3.0.1" +wrap-ansi@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-5.1.0.tgz#1fd1f67235d5b6d0fee781056001bfb694c03b09" + integrity sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q== + dependencies: + ansi-styles "^3.2.0" + string-width "^3.0.0" + strip-ansi "^5.0.0" + wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" @@ -12317,6 +13133,11 @@ xhr@^2.0.4, xhr@^2.2.0, xhr@^2.3.3: parse-headers "^2.0.0" xtend "^4.0.0" +xmlhttprequest@1.8.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/xmlhttprequest/-/xmlhttprequest-1.8.0.tgz#67fe075c5c24fef39f9d65f5f7b7fe75171968fc" + integrity sha512-58Im/U0mlVBLM38NdZjHyhuMtCqa61469k2YP/AaPbvCoV9aQGUpbJBj1QRm2ytRiVQBD/fsw7L2bJGDVQswBA== + xtend@^4.0.0, xtend@^4.0.1, xtend@~4.0.0, xtend@~4.0.1: version "4.0.2" resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" @@ -12334,6 +13155,11 @@ y18n@^3.2.1: resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.2.tgz#85c901bd6470ce71fc4bb723ad209b70f7f28696" integrity sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ== +y18n@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf" + integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ== + y18n@^5.0.5: version "5.0.8" resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" @@ -12359,6 +13185,14 @@ yaml@^1.10.2: resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== +yargs-parser@13.1.2, yargs-parser@^13.1.2: + version "13.1.2" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.2.tgz#130f09702ebaeef2650d54ce6e3e5706f7a4fb38" + integrity sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + yargs-parser@20.2.4: version "20.2.4" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.4.tgz#b42890f14566796f85ae8e3a25290d205f154a54" @@ -12382,6 +13216,15 @@ yargs-parser@^21.0.0, yargs-parser@^21.0.1: resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== +yargs-unparser@1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-1.6.0.tgz#ef25c2c769ff6bd09e4b0f9d7c605fb27846ea9f" + integrity sha512-W9tKgmSn0DpSatfri0nx52Joq5hVXgeLiqR/5G0sZNDoLZFOr/xjBUDcShCOGNsBnEMNo1KAMBkTej1Hm62HTw== + dependencies: + flat "^4.1.0" + lodash "^4.17.15" + yargs "^13.3.0" + yargs-unparser@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-2.0.0.tgz#f131f9226911ae5d9ad38c432fe809366c2325eb" @@ -12392,6 +13235,22 @@ yargs-unparser@2.0.0: flat "^5.0.2" is-plain-obj "^2.1.0" +yargs@13.3.2, yargs@^13.3.0: + version "13.3.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.2.tgz#ad7ffefec1aa59565ac915f82dccb38a9c31a2dd" + integrity sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw== + dependencies: + cliui "^5.0.0" + find-up "^3.0.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^3.0.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^13.1.2" + yargs@16.2.0: version "16.2.0" resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" @@ -12449,4 +13308,4 @@ yocto-queue@^0.1.0: integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== "zksync-web3@link:sdk/zksync-web3.js": - version "0.12.5" + version "0.15.1"